Spaces:
Sleeping
Sleeping
import gradio as gr | |
from groq import Groq | |
import json | |
import time | |
from typing import Dict, List, Tuple, Optional | |
import threading | |
from datetime import datetime | |
import html | |
import re | |
class ReasoningOrchestra: | |
def __init__(self): | |
self.client = None | |
self.is_api_key_set = False | |
def set_api_key(self, api_key: str) -> str: | |
"""Set the Groq API key and test connection""" | |
if not api_key.strip(): | |
return "β Please enter a valid API key" | |
try: | |
self.client = Groq(api_key=api_key.strip()) | |
# Test the connection with a simple request | |
test_completion = self.client.chat.completions.create( | |
model="llama3-8b-8192", # Using a smaller model for a quick validation | |
messages=[{"role": "user", "content": "Hello"}], | |
max_tokens=10 | |
) | |
self.is_api_key_set = True | |
return "β API key validated successfully! You can now use the Reasoning Orchestra." | |
except Exception as e: | |
self.is_api_key_set = False | |
return f"β API key validation failed: {str(e)}" | |
def format_text_to_html(self, text: str) -> str: | |
"""Convert text to HTML with proper formatting""" | |
if not text or text.strip() == "" or text == "No response generated": | |
return "<p style='color: #666; font-style: italic;'>No content was generated. This might be due to API limitations or model availability issues.</p>" | |
# Escape HTML characters first to prevent injection | |
text = html.escape(text) | |
# Convert markdown-style formatting to HTML | |
# Headers | |
text = re.sub(r'^### (.*$)', r'<h3>\1</h3>', text, flags=re.MULTILINE) | |
text = re.sub(r'^## (.*$)', r'<h2>\1</h2>', text, flags=re.MULTILINE) | |
text = re.sub(r'^# (.*$)', r'<h1>\1</h1>', text, flags=re.MULTILINE) | |
# Bold text | |
text = re.sub(r'\*\*(.*?)\*\*', r'<strong>\1</strong>', text) | |
# Italic text | |
text = re.sub(r'\*(.*?)\*', r'<em>\1</em>', text) | |
# Code blocks | |
text = re.sub(r'```(.*?)```', r'<pre><code>\1</code></pre>', text, flags=re.DOTALL) | |
text = re.sub(r'`(.*?)`', r'<code>\1</code>', text) | |
# Lists (improved logic) | |
lines = text.split('\n') | |
in_ul = False | |
in_ol = False | |
html_lines = [] | |
for line in lines: | |
stripped = line.strip() | |
is_ul_item = stripped.startswith('- ') or stripped.startswith('* ') | |
is_ol_item = re.match(r'^\d+\.\s', stripped) | |
# Close lists if the pattern changes | |
if in_ul and not is_ul_item: | |
html_lines.append('</ul>') | |
in_ul = False | |
if in_ol and not is_ol_item: | |
html_lines.append('</ol>') | |
in_ol = False | |
# Process list items | |
if is_ul_item: | |
if not in_ul: | |
html_lines.append('<ul>') | |
in_ul = True | |
html_lines.append(f'<li>{stripped[2:]}</li>') | |
elif is_ol_item: | |
if not in_ol: | |
html_lines.append('<ol>') | |
in_ol = True | |
html_lines.append(f'<li>{re.sub(r"^\d+\.\\s", "", stripped)}</li>') | |
else: | |
# Regular paragraph or empty line | |
if stripped: | |
html_lines.append(f'<p>{line}</p>') | |
else: | |
html_lines.append('<br>') | |
# Close any open lists at the end | |
if in_ul: | |
html_lines.append('</ul>') | |
if in_ol: | |
html_lines.append('</ol>') | |
return '\n'.join(html_lines) | |
def deep_thinker_analyze(self, problem: str, context: str = "") -> Dict: | |
"""DeepSeek Coder V2 - The Deep Thinker""" | |
if not self.is_api_key_set: | |
return {"error": "API key not set"} | |
prompt = f"""You are the Deep Thinker in a collaborative reasoning system. Your role is to provide thorough, methodical analysis with extensive step-by-step reasoning. | |
Problem: {problem} | |
{f"Additional Context: {context}" if context else ""} | |
Please provide a comprehensive analysis with deep reasoning. Think through all implications, consider multiple angles, and provide detailed step-by-step logic. Be thorough and methodical in your approach.""" | |
try: | |
completion = self.client.chat.completions.create( | |
model="deepseek-coder-v2-lite-instruct", | |
messages=[{"role": "user", "content": prompt}], | |
temperature=0.6, | |
max_tokens=8192, | |
top_p=0.95, | |
) | |
response_content = completion.choices[0].message.content | |
if not response_content or response_content.strip() == "": | |
response_content = "The model did not generate a response. This could be due to content filtering, model limitations, or API issues." | |
return { | |
"model": "DeepSeek Coder V2 (Deep Thinker)", | |
"role": "π The Philosopher & Deep Analyzer", | |
"reasoning": response_content, | |
"timestamp": datetime.now().strftime("%H:%M:%S"), | |
"tokens_used": getattr(completion.usage, 'total_tokens', 'N/A') if hasattr(completion, 'usage') and completion.usage else "N/A" | |
} | |
except Exception as e: | |
return {"error": f"Deep Thinker error: {str(e)}"} | |
def quick_strategist_analyze(self, problem: str, context: str = "") -> Dict: | |
"""Llama3 70B - The Quick Strategist""" | |
if not self.is_api_key_set: | |
return {"error": "API key not set"} | |
prompt = f"""You are the Quick Strategist in a collaborative reasoning system. Your role is to provide fast, efficient strategic analysis with clear action plans. | |
Problem: {problem} | |
{f"Additional Context: {context}" if context else ""} | |
Please provide a strategic analysis with: | |
1. Key insights and patterns | |
2. Practical solutions | |
3. Implementation priorities | |
4. Risk assessment | |
5. Clear next steps | |
Be decisive and solution-focused. Provide concrete, actionable recommendations.""" | |
try: | |
completion = self.client.chat.completions.create( | |
model="llama3-70b-8192", | |
messages=[{"role": "user", "content": prompt}], | |
temperature=0.6, | |
top_p=0.95, | |
max_tokens=8192 | |
) | |
response_content = completion.choices[0].message.content | |
if not response_content or response_content.strip() == "": | |
response_content = "The model did not generate a response. This could be due to content filtering, model limitations, or API issues." | |
return { | |
"model": "Llama3 70B (Quick Strategist)", | |
"role": "π The Strategic Decision Maker", | |
"reasoning": response_content, | |
"timestamp": datetime.now().strftime("%H:%M:%S"), | |
"tokens_used": getattr(completion.usage, 'total_tokens', 'N/A') if hasattr(completion, 'usage') and completion.usage else "N/A" | |
} | |
except Exception as e: | |
return {"error": f"Quick Strategist error: {str(e)}"} | |
def detail_detective_analyze(self, problem: str, context: str = "") -> Dict: | |
"""Mixtral 8x7B - The Detail Detective""" | |
if not self.is_api_key_set: | |
return {"error": "API key not set"} | |
prompt = f"""You are the Detail Detective in a collaborative reasoning system. Your role is to provide meticulous investigation and comprehensive fact-checking. | |
Problem: {problem} | |
{f"Additional Context: {context}" if context else ""} | |
Please conduct a thorough investigation including: | |
1. Detailed analysis of all aspects | |
2. Potential edge cases and considerations | |
3. Verification of assumptions | |
4. Historical context or precedents | |
5. Comprehensive pros and cons | |
6. Hidden connections or implications | |
Be extremely thorough and leave no stone unturned. Provide detailed evidence and reasoning for your conclusions.""" | |
try: | |
completion = self.client.chat.completions.create( | |
model="mixtral-8x7b-32768", | |
messages=[{"role": "user", "content": prompt}], | |
temperature=0.7, | |
top_p=0.9, | |
max_tokens=8192 | |
) | |
response_content = completion.choices[0].message.content | |
if not response_content or response_content.strip() == "": | |
response_content = "The model did not generate a response. This might be due to content filtering or other API issues." | |
return { | |
"model": "Mixtral 8x7B (Detail Detective)", | |
"role": "π The Meticulous Investigator", | |
"reasoning": response_content, | |
"timestamp": datetime.now().strftime("%H:%M:%S"), | |
"tokens_used": getattr(completion.usage, 'total_tokens', 'N/A') if hasattr(completion, 'usage') and completion.usage else "N/A" | |
} | |
except Exception as e: | |
return {"error": f"Detail Detective error: {str(e)}"} | |
def synthesize_orchestra(self, deep_result: Dict, strategic_result: Dict, detective_result: Dict, original_problem: str) -> str: | |
"""Synthesize all three perspectives into a final orchestrated solution using Llama3 70B""" | |
if not self.is_api_key_set: | |
return "API key not set" | |
def extract_reasoning(result: Dict, model_name: str) -> str: | |
if result.get('error'): | |
return f"**{model_name} encountered an issue:** {result['error']}" | |
reasoning = result.get('reasoning', '') | |
if not reasoning or reasoning.strip() == "" or reasoning == "No response generated": | |
return f"**{model_name}** did not provide analysis (this may be due to model limitations or API issues)." | |
return reasoning | |
deep_reasoning = extract_reasoning(deep_result, "Deep Thinker") | |
strategic_reasoning = extract_reasoning(strategic_result, "Quick Strategist") | |
detective_reasoning = extract_reasoning(detective_result, "Detail Detective") | |
synthesis_prompt = f"""You are the Orchestra Conductor using Llama3 70B. You have received analytical perspectives from three different AI reasoning specialists on the same problem. Your job is to synthesize these into a comprehensive, unified solution. | |
ORIGINAL PROBLEM: {original_problem} | |
DEEP THINKER ANALYSIS (π DeepSeek Coder V2): | |
{deep_reasoning} | |
STRATEGIC ANALYSIS (π Llama3 70B): | |
{strategic_reasoning} | |
DETECTIVE INVESTIGATION (π Mixtral 8x7B): | |
{detective_reasoning} | |
As the Orchestra Conductor, please create a unified synthesis that: | |
1. Combines the best insights from all available analyses. | |
2. Addresses any gaps where models didn't provide input. | |
3. Resolves any contradictions between the analyses. | |
4. Provides a comprehensive final recommendation. | |
5. Highlights where the different reasoning styles complement each other. | |
6. Gives a clear, actionable conclusion. | |
If some models didn't provide analysis, work with what's available and note any limitations. | |
Format your response as a well-structured final solution that leverages all available reasoning approaches. Use clear sections and bullet points where appropriate for maximum clarity.""" | |
try: | |
completion = self.client.chat.completions.create( | |
model="llama3-70b-8192", | |
messages=[{"role": "user", "content": synthesis_prompt}], | |
temperature=0.7, | |
max_tokens=8192, | |
top_p=0.9 | |
) | |
synthesis_content = completion.choices[0].message.content | |
if not synthesis_content or synthesis_content.strip() == "": | |
return "The synthesis could not be generated. This may be due to API limitations or the complexity of combining the different analyses." | |
return synthesis_content | |
except Exception as e: | |
return f"Synthesis error: {str(e)}" | |
# Initialize the orchestra | |
orchestra = ReasoningOrchestra() | |
def validate_api_key(api_key: str) -> str: | |
"""Validate the API key and return status""" | |
return orchestra.set_api_key(api_key) | |
def run_single_model(problem: str, model_choice: str, context: str = "") -> str: | |
"""Run a single model analysis""" | |
if not orchestra.is_api_key_set: | |
return """<div style="color: red; padding: 20px; border: 2px solid red; border-radius: 10px; background-color: #ffe6e6;"> | |
<h3>β API Key Required</h3> | |
<p>Please set your Groq API key first in the API Configuration section above.</p> | |
</div>""" | |
if not problem.strip(): | |
return """<div style="color: orange; padding: 20px; border: 2px solid orange; border-radius: 10px; background-color: #fff3e6;"> | |
<h3>β οΈ Problem Required</h3> | |
<p>Please enter a problem to analyze.</p> | |
</div>""" | |
start_time = time.time() | |
if model_choice == "Deep Thinker (DeepSeek Coder V2)": | |
result = orchestra.deep_thinker_analyze(problem, context) | |
elif model_choice == "Quick Strategist (Llama3 70B)": | |
result = orchestra.quick_strategist_analyze(problem, context) | |
elif model_choice == "Detail Detective (Mixtral 8x7B)": | |
result = orchestra.detail_detective_analyze(problem, context) | |
else: | |
return """<div style="color: red; padding: 20px; border: 2px solid red; border-radius: 10px; background-color: #ffe6e6;"> | |
<h3>β Invalid Model Selection</h3> | |
<p>Please select a valid model from the dropdown.</p> | |
</div>""" | |
elapsed_time = time.time() - start_time | |
if "error" in result: | |
return f"""<div style="color: red; padding: 20px; border: 2px solid red; border-radius: 10px; background-color: #ffe6e6;"> | |
<h3>β Error</h3> | |
<p>{result['error']}</p> | |
</div>""" | |
reasoning_html = orchestra.format_text_to_html(result['reasoning']) | |
formatted_output = f""" | |
<div style="border: 2px solid #28a745; border-radius: 15px; padding: 25px; margin: 15px 0; background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%);"> | |
<div style="display: flex; align-items: center; margin-bottom: 20px; padding-bottom: 15px; border-bottom: 2px solid #28a745;"> | |
<h2 style="margin: 0; color: #28a745;">{result['role']}</h2> | |
</div> | |
<div style="background-color: white; padding: 15px; border-radius: 10px; margin-bottom: 20px;"> | |
<div style="display: flex; flex-wrap: wrap; gap: 20px; font-size: 14px; color: #666;"> | |
<span><strong>Model:</strong> {result['model']}</span> | |
<span><strong>Analysis Time:</strong> {elapsed_time:.2f} seconds</span> | |
<span><strong>Timestamp:</strong> {result['timestamp']}</span> | |
<span><strong>Tokens:</strong> {result['tokens_used']}</span> | |
</div> | |
</div> | |
<div style="background-color: white; padding: 20px; border-radius: 10px; line-height: 1.6; font-size: 16px;"> | |
{reasoning_html} | |
</div> | |
</div> | |
""" | |
return formatted_output | |
def run_full_orchestra(problem: str, context: str = "") -> Tuple[str, str, str, str]: | |
"""Run the full collaborative reasoning orchestra""" | |
if not orchestra.is_api_key_set: | |
error_msg = """<div style="color: red; padding: 20px; border: 2px solid red; border-radius: 10px; background-color: #ffe6e6;"> | |
<h3>β API Key Required</h3> | |
<p>Please set your Groq API key first in the API Configuration section above.</p> | |
</div>""" | |
return error_msg, error_msg, error_msg, error_msg | |
if not problem.strip(): | |
error_msg = """<div style="color: orange; padding: 20px; border: 2px solid orange; border-radius: 10px; background-color: #fff3e6;"> | |
<h3>β οΈ Problem Required</h3> | |
<p>Please enter a problem to analyze.</p> | |
</div>""" | |
return error_msg, error_msg, error_msg, error_msg | |
# NOTE: These calls are synchronous. For a more advanced version, | |
# you could use threading to run them concurrently. | |
deep_result = orchestra.deep_thinker_analyze(problem, context) | |
strategic_result = orchestra.quick_strategist_analyze(problem, context) | |
detective_result = orchestra.detail_detective_analyze(problem, context) | |
synthesis = orchestra.synthesize_orchestra(deep_result, strategic_result, detective_result, problem) | |
def format_result_html(result: Dict, color: str, icon: str) -> str: | |
if "error" in result: | |
return f"""<div style="color: red; padding: 20px; border: 2px solid red; border-radius: 10px; background-color: #ffe6e6;"> | |
<h3>β Model Error</h3> | |
<p>{result['error']}</p> | |
<p style="font-size: 12px; color: #666; margin-top: 10px;"><em>This model may have restrictions or temporary availability issues. The other models can still provide analysis.</em></p> | |
</div>""" | |
reasoning_html = orchestra.format_text_to_html(result['reasoning']) | |
return f""" | |
<div style="border: 2px solid {color}; border-radius: 15px; padding: 25px; margin: 15px 0; background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%);"> | |
<div style="display: flex; align-items: center; margin-bottom: 20px; padding-bottom: 15px; border-bottom: 2px solid {color};"> | |
<span style="font-size: 24px; margin-right: 10px;">{icon}</span> | |
<h2 style="margin: 0; color: {color};">{result['model']}</h2> | |
</div> | |
<div style="background-color: white; padding: 15px; border-radius: 10px; margin-bottom: 20px;"> | |
<div style="display: flex; flex-wrap: wrap; gap: 20px; font-size: 14px; color: #666;"> | |
<span><strong>Timestamp:</strong> {result['timestamp']}</span> | |
<span><strong>Tokens:</strong> {result['tokens_used']}</span> | |
</div> | |
</div> | |
<div style="background-color: white; padding: 20px; border-radius: 10px; line-height: 1.6; font-size: 16px;"> | |
{reasoning_html} | |
</div> | |
</div> | |
""" | |
deep_output = format_result_html(deep_result, "#6f42c1", "π") | |
strategic_output = format_result_html(strategic_result, "#fd7e14", "π") | |
detective_output = format_result_html(detective_result, "#20c997", "π") | |
synthesis_html = orchestra.format_text_to_html(synthesis) | |
synthesis_output = f""" | |
<div style="border: 2px solid #dc3545; border-radius: 15px; padding: 25px; margin: 15px 0; background: linear-gradient(135deg, #fff5f5 0%, #fee);"> | |
<div style="display: flex; align-items: center; margin-bottom: 20px; padding-bottom: 15px; border-bottom: 2px solid #dc3545;"> | |
<span style="font-size: 24px; margin-right: 10px;">πΌ</span> | |
<h2 style="margin: 0; color: #dc3545;">Orchestra Conductor - Final Synthesis (Llama3 70B)</h2> | |
</div> | |
<div style="background-color: white; padding: 20px; border-radius: 10px; line-height: 1.6; font-size: 16px;"> | |
{synthesis_html} | |
</div> | |
</div> | |
""" | |
return deep_output, strategic_output, detective_output, synthesis_output | |
# Custom CSS for better styling | |
custom_css = """ | |
body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; } | |
.gradio-container { | |
max-width: 1400px !important; | |
margin: 0 auto !important; | |
} | |
.api-key-section { | |
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
padding: 20px; | |
border-radius: 10px; | |
margin-bottom: 20px; | |
} | |
.orchestra-header { | |
text-align: center; | |
background: linear-gradient(45deg, #f093fb 0%, #f5576c 100%); | |
padding: 20px; | |
border-radius: 15px; | |
margin-bottom: 20px; | |
color: white; | |
} | |
/* Custom styling for HTML output containers */ | |
.html-content { | |
max-height: 600px; /* Set a max height for scrollability */ | |
overflow-y: auto; /* Enable vertical scrolling */ | |
border: 1px solid #ddd; | |
border-radius: 8px; | |
padding: 15px; | |
background-color: #fdfdfd; | |
} | |
.html-content h1, .html-content h2, .html-content h3 { color: #333; } | |
.html-content p { margin-bottom: 1em; } | |
.html-content ul, .html-content ol { padding-left: 25px; } | |
.html-content li { margin-bottom: 0.5em; } | |
.html-content pre { background-color: #f4f4f4; padding: 10px; border-radius: 5px; white-space: pre-wrap; word-wrap: break-word; } | |
.html-content code { font-family: 'Courier New', Courier, monospace; background-color: #eee; padding: 2px 4px; border-radius: 3px;} | |
""" | |
# Build the Gradio interface | |
with gr.Blocks(css=custom_css, title="Reasoning Orchestra") as app: | |
# Header | |
gr.HTML(""" | |
<div class="orchestra-header"> | |
<h1>πΌ The Collaborative Reasoning Orchestra</h1> | |
<p><em>Where AI models collaborate like musicians to solve complex problems</em></p> | |
<p><strong>Powered by Groq's LPUβ’ Inference Engine for real-time results</strong></p> | |
</div> | |
""") | |
# API Key Section | |
with gr.Accordion("π API Configuration", open=True): | |
with gr.Group(): | |
gr.HTML('<div class="api-key-section"><h3 style="color: white; margin-top: 0;">Enter Your Groq API Key</h3></div>') | |
with gr.Row(): | |
api_key_input = gr.Textbox( | |
label="Groq API Key", | |
type="password", | |
placeholder="gsk_...", | |
info="Get your free API key from https://console.groq.com/keys" | |
) | |
api_status = gr.Textbox( | |
label="API Status", | |
interactive=False, | |
placeholder="Enter API key and click validate..." | |
) | |
validate_btn = gr.Button("π Validate API Key", variant="primary") | |
# Main Interface Tabs | |
with gr.Tabs() as tabs: | |
# Single Model Tab | |
with gr.TabItem("π― Single Model Analysis"): | |
gr.Markdown("### Test individual reasoning models with formatted HTML output.") | |
with gr.Row(gap=16): | |
with gr.Column(scale=1): | |
single_problem = gr.Textbox( | |
label="Problem Statement", | |
placeholder="Enter the problem you want to analyze...", | |
lines=5 | |
) | |
single_context = gr.Textbox( | |
label="Additional Context (Optional)", | |
placeholder="Any additional context or constraints...", | |
lines=3 | |
) | |
model_choice = gr.Dropdown( | |
label="Choose Model", | |
choices=[ | |
"Deep Thinker (DeepSeek Coder V2)", | |
"Quick Strategist (Llama3 70B)", | |
"Detail Detective (Mixtral 8x7B)" | |
], | |
value="Quick Strategist (Llama3 70B)" | |
) | |
single_analyze_btn = gr.Button("π Analyze Problem", variant="primary") | |
with gr.Column(scale=2): | |
single_output = gr.HTML(label="Analysis Result", elem_classes=["html-content"]) | |
# Full Orchestra Tab | |
with gr.TabItem("πΌ Full Orchestra Collaboration"): | |
gr.Markdown("### Run all models collaboratively with a Llama3 70B conductor to synthesize a final solution.") | |
with gr.Row(gap=16): | |
with gr.Column(scale=1): | |
orchestra_problem = gr.Textbox( | |
label="Complex Problem Statement", | |
placeholder="Enter a problem that benefits from multiple reasoning perspectives...", | |
lines=8 | |
) | |
orchestra_context = gr.Textbox( | |
label="Additional Context (Optional)", | |
placeholder="Background information, constraints, etc...", | |
lines=4 | |
) | |
orchestra_analyze_btn = gr.Button("πΌ Start Orchestra Analysis", variant="primary") | |
with gr.Column(scale=2): | |
with gr.Tabs(): | |
with gr.TabItem("πΌ Final Synthesis"): | |
synthesis_output = gr.HTML(label="Final Orchestrated Solution (Llama3 70B)", elem_classes=["html-content"]) | |
with gr.TabItem("π Deep Thinker"): | |
deep_output = gr.HTML(label="Deep Thinker Analysis", elem_classes=["html-content"]) | |
with gr.TabItem("π Quick Strategist"): | |
strategic_output = gr.HTML(label="Quick Strategist Analysis", elem_classes=["html-content"]) | |
with gr.TabItem("π Detail Detective"): | |
detective_output = gr.HTML(label="Detail Detective Analysis", elem_classes=["html-content"]) | |
# Examples Tab | |
with gr.TabItem("π‘ Example Problems"): | |
gr.Markdown(""" | |
### Try these example problems to see the Orchestra in action: | |
**π’ Business Strategy:** | |
"Our tech startup has limited funding and needs to decide between focusing on product development or marketing. We have a working MVP but low user adoption. Budget is $50K for the next 6 months." | |
**π€ Ethical AI:** "Should autonomous vehicles prioritize passenger safety over pedestrian safety in unavoidable accident scenarios? Consider the ethical, legal, and practical implications for mass adoption." | |
**π Environmental Policy:** | |
"Design a policy framework to reduce carbon emissions in urban areas by 40% within 10 years while maintaining economic growth and social equity." | |
**π Educational Innovation:** | |
"How can we redesign traditional university education to better prepare students for the rapidly changing job market of the 2030s, considering AI, remote work, and emerging technologies?" | |
**π Urban Planning:** | |
"A city of 500K people wants to build 10,000 affordable housing units but faces opposition from current residents, environmental concerns, and a $2B budget constraint. Develop a comprehensive solution." | |
""") | |
# Footer | |
gr.HTML(""" | |
<div style="text-align: center; margin-top: 30px; padding: 20px; background: #f8f9fa; border-radius: 15px; border: 1px solid #dee2e6;"> | |
<h3>πΌ How the Orchestra Works</h3> | |
<div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); gap: 20px; margin: 20px 0; text-align: left;"> | |
<div style="background: #fff; padding: 15px; border-radius: 10px; border: 1px solid #eee;"> | |
<h4>π Deep Thinker</h4> | |
<p>Provides thorough philosophical and theoretical analysis with comprehensive reasoning chains.</p> | |
</div> | |
<div style="background: #fff; padding: 15px; border-radius: 10px; border: 1px solid #eee;"> | |
<h4>π Quick Strategist</h4> | |
<p>Delivers practical strategies, action plans, and rapid decision-making frameworks.</p> | |
</div> | |
<div style="background: #fff; padding: 15px; border-radius: 10px; border: 1px solid #eee;"> | |
<h4>π Detail Detective</h4> | |
<p>Conducts comprehensive investigation, fact-checking, and finds hidden connections.</p> | |
</div> | |
<div style="background: #fff; padding: 15px; border-radius: 10px; border: 1px solid #eee;"> | |
<h4>πΌ Orchestra Conductor</h4> | |
<p>Synthesizes all perspectives into a unified, comprehensive final solution.</p> | |
</div> | |
</div> | |
<p style="margin-top: 20px; color: #6c757d;"><em>Built with β€οΈ using Groq, Gradio, and Python</em></p> | |
</div> | |
""") | |
# --- Event Handlers --- | |
validate_btn.click( | |
fn=validate_api_key, | |
inputs=[api_key_input], | |
outputs=[api_status] | |
) | |
single_analyze_btn.click( | |
fn=run_single_model, | |
inputs=[single_problem, model_choice, single_context], | |
outputs=[single_output] | |
) | |
orchestra_analyze_btn.click( | |
fn=run_full_orchestra, | |
inputs=[orchestra_problem, orchestra_context], | |
outputs=[deep_output, strategic_output, detective_output, synthesis_output] | |
) | |
# Launch the app | |
if __name__ == "__main__": | |
app.launch(share=False) # Set share=True to create a temporary public link | |