import gradio as gr from groq import Groq import json import time from typing import Dict, List, Tuple, Optional import threading from datetime import datetime import html import re import base64 import io class ReasoningOrchestra: def __init__(self): self.client = None self.is_api_key_set = False def set_api_key(self, api_key: str) -> str: """Set the Groq API key and test connection""" if not api_key.strip(): return "❌ Please enter a valid API key" try: self.client = Groq(api_key=api_key.strip()) test_completion = self.client.chat.completions.create( model="qwen/qwen3-32b", messages=[{"role": "user", "content": "Hello"}], max_completion_tokens=10 ) self.is_api_key_set = True return "✅ API key validated successfully! You can now use the Reasoning Orchestra." except Exception as e: self.is_api_key_set = False return f"❌ API key validation failed: {str(e)}" def format_text_to_html(self, text: str) -> str: """Convert text to HTML with enhanced formatting for clarity""" if not text or text.strip() == "" or text == "No response generated": return """
\1
', text, flags=re.DOTALL)
text = re.sub(r'`(.*?)`', r'\1
', text)
# Lists with better spacing
lines = text.split('\n')
in_list = False
formatted_lines = []
for line in lines:
stripped = line.strip()
if stripped.startswith('- ') or stripped.startswith('* '):
if not in_list:
formatted_lines.append('{line}
') else: formatted_lines.append('Please set your Groq API key first in the API Configuration section above.
Please enter a problem to analyze.
Please select a valid model from the dropdown.
{result['error']}
Please set your Groq API key first in the API Configuration section above.
Please enter a problem to analyze.
{result['error']}
Dict: """Qwen3 32B - The Quick Strategist""" if not self.is_api_key_set: return {"error": "API key not set"} prompt = f"""You are the Quick Strategist in a collaborative reasoning system. Your role is to provide fast, efficient strategic analysis with clear action plans. Problem: {problem} {f"Additional Context: {context}" if context else"Additional Context: {context}" if context else ""} Please provide a strategic analysis with: 1. Key insights and patterns 2. Practical solutions 3. Implementation priorities 4. Risk assessment 5. Clear next steps Be decisive and solution-focused. Provide concrete, actionable recommendations.""" try: completion = self.client.chat.completions.create( model="qwen/qwen3-32b", messages=[{"role": "user", "content": prompt}], temperature=0.6, top_p=0.95, max_completion_tokens=8192 ) response_content = completion.choices[0].message.content if not response_content or response_content.strip() == "": response_content = "The model did not generate a response. This could be due to content filtering, model limitations, or API issues." return { "model": "Qwen3 32B (Quick Strategist)", "role": "🚀 The Strategic Decision Maker", "reasoning": response_content, "timestamp": datetime.now().strftime("%H:%M:%S"), "tokens_used": getattr(completion.usage, 'total_tokens', 'N/A') if hasattr(completion, 'usage') and completion.usage else "N/A" } except Exception as e: return {"error": f"Quick Strategist error: {str(e)}"} def detail_detective_analyze(self, problem: str, context: str = "") -> Dict: """QwQ 32B - The Detail Detective""" if not self.is_api_key_set: return {"error": "API key not set"} prompt = f"""You are the Detail Detective in a collaborative reasoning system. Your role is to provide meticulous investigation and comprehensive fact-checking. Problem: {problem} {f"Additional Context: {context}" if context else ""} Please conduct a thorough investigation including: 1. Detailed analysis of all aspects 2. Potential edge cases and considerations 3. Verification of assumptions 4. Historical context or precedents 5. Comprehensive pros and cons 6. Hidden connections or implications Be extremely thorough and leave no stone unturned. Provide detailed evidence and reasoning for your conclusions.""" try: # Try with different parameters for QwQ model completion = self.client.chat.completions.create( model="qwen-qwq-32b", messages=[{"role": "user", "content": prompt}], temperature=0.7, top_p=0.9, max_completion_tokens=8192 ) response_content = completion.choices[0].message.content if not response_content or response_content.strip() == "": # Fallback: try with a simpler prompt fallback_prompt = f"Analyze this problem in detail: {problem}" fallback_completion = self.client.chat.completions.create( model="qwen-qwq-32b", messages=[{"role": "user", "content": fallback_prompt}], temperature=0.5, max_completion_tokens=8192 ) response_content = fallback_completion.choices[0].message.content if not response_content or response_content.strip() == "": response_content = "The QwQ model encountered an issue generating content. This could be due to the complexity of the prompt, content filtering, or temporary model availability issues. The model may work better with simpler, more direct questions." return { "model": "QwQ 32B (Detail Detective)", "role": "🔍 The Meticulous Investigator", "reasoning": response_content, "timestamp": datetime.now().strftime("%H:%M:%S"), "tokens_used": getattr(completion.usage, 'total_tokens', 'N/A') if hasattr(completion, 'usage') and completion.usage else "N/A" } except Exception as e: # If QwQ fails, provide a helpful error message error_msg = f"Detail Detective error: {str(e)}" if "model" in str(e).lower() or "not found" in str(e).lower(): error_msg += "\n\nNote: The QwQ model may not be available in your region or may have usage restrictions. You can still use the other models in the orchestra." return {"error": error_msg} def synthesize_orchestra(self, deep_result: Dict, strategic_result: Dict, detective_result: Dict, original_problem: str) -> str: """Synthesize all three perspectives into a final orchestrated solution using Llama 3.3 70B""" if not self.is_api_key_set: return "API key not set" # Extract reasoning content safely with better error handling def extract_reasoning(result: Dict, model_name: str) -> str: if result.get('error'): return f"**{model_name} encountered an issue:** {result['error']}" reasoning = result.get('reasoning', '') if not reasoning or reasoning.strip() == "" or reasoning == "No response generated": return f"**{model_name}** did not provide analysis (this may be due to model limitations or API issues)." return reasoning deep_reasoning = extract_reasoning(deep_result, "Deep Thinker") strategic_reasoning = extract_reasoning(strategic_result, "Quick Strategist") detective_reasoning = extract_reasoning(detective_result, "Detail Detective") synthesis_prompt = f"""You are the Orchestra Conductor using Llama 3.3 70B Versatile model. You have received analytical perspectives from three different AI reasoning specialists on the same problem. Your job is to synthesize these into a comprehensive, unified solution. ORIGINAL PROBLEM: {original_problem} DEEP THINKER ANALYSIS (🎭 DeepSeek R1): {deep_reasoning} STRATEGIC ANALYSIS (🚀 Qwen3 32B): {strategic_reasoning} DETECTIVE INVESTIGATION (🔍 QwQ 32B): {detective_reasoning} As the Orchestra Conductor, please create a unified synthesis that: 1. Combines the best insights from all available analyses 2. Addresses any gaps where models didn't provide input 3. Resolves any contradictions between the analyses 4. Provides a comprehensive final recommendation 5. Highlights where the different reasoning styles complement each other 6. Gives a clear, actionable conclusion If some models didn't provide analysis, work with what's available and note any limitations. Format your response as a well-structured final solution that leverages all available reasoning approaches. Use clear sects and bullet points where appropriate for maximum clarity.""" try: completion = self.client.chat.completions.create( model="llama-3.3-70b-versatile", messages=[{"role": "user", "content": synthesis_prompt}], temperature=0.7, max_completion_tokens=8192, top_p=0.9 ) synthesis_content = completion.choices[0].message.content if not synthesis_content or synthesis_content.strip() == "": return "The synthesis could not be generated. This may be due to API limitations or the complexity of combining the different analyses." return synthesis_content except Exception as e: return f"Synthesis error: {str(e)}" # Initialize the orchestra orchestra = ReasoningOrchestra() def validate_api_key(api_key: str) -> str: """Validate the API key and return status""" return orchestra.set_api_key(api_key) def run_single_model(problem: str, model_choice: str, context: str = "") -> str: """Run a single model analysis""" if not orchestra.is_api_key_set: return """
Please set your Groq API key first in the API Configuration section above.
Please enter a problem to analyze.
Please select a valid model from the dropdown.
{result['error']}
Please set your Groq API key first in the API Configuration section above.
Please enter a problem to analyze.
{result['error']}
This model may have restrictions or temporary availability issues. The other models can still provide analysis.
Where AI models collaborate like musicians in an orchestra to solve complex problems
Now with Llama 3.3 70B Versatile as Orchestra Conductor & Enhanced HTML-Formatted Responses!
Provides thorough philosophical and theoretical analysis with comprehensive reasoning chains
Delivers practical strategies, action plans, and rapid decision-making frameworks
Conducts comprehensive investigation, fact-checking, and finds hidden connections
Synthesizes all perspectives into unified, comprehensive solutions
Built with ❤️ using Groq's lightning-fast inference, Gradio, and beautiful HTML formatting