Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -102,7 +102,7 @@ Please provide a comprehensive analysis with deep reasoning. Think through all i
|
|
102 |
model="deepseek-r1-distill-llama-70b",
|
103 |
messages=[{"role": "user", "content": prompt}],
|
104 |
temperature=0.6,
|
105 |
-
max_completion_tokens=
|
106 |
top_p=0.95,
|
107 |
reasoning_format="raw"
|
108 |
)
|
@@ -143,7 +143,7 @@ Be decisive and solution-focused. Provide concrete, actionable recommendations."
|
|
143 |
messages=[{"role": "user", "content": prompt}],
|
144 |
temperature=0.6,
|
145 |
top_p=0.95,
|
146 |
-
max_completion_tokens=
|
147 |
)
|
148 |
|
149 |
response_content = completion.choices[0].message.content
|
@@ -184,7 +184,7 @@ Be extremely thorough and leave no stone unturned. Provide detailed evidence and
|
|
184 |
messages=[{"role": "user", "content": prompt}],
|
185 |
temperature=0.7,
|
186 |
top_p=0.9,
|
187 |
-
max_completion_tokens=
|
188 |
)
|
189 |
|
190 |
response_content = completion.choices[0].message.content
|
@@ -195,7 +195,7 @@ Be extremely thorough and leave no stone unturned. Provide detailed evidence and
|
|
195 |
model="qwen-qwq-32b",
|
196 |
messages=[{"role": "user", "content": fallback_prompt}],
|
197 |
temperature=0.5,
|
198 |
-
max_completion_tokens=
|
199 |
)
|
200 |
response_content = fallback_completion.choices[0].message.content
|
201 |
|
@@ -235,18 +235,13 @@ Be extremely thorough and leave no stone unturned. Provide detailed evidence and
|
|
235 |
detective_reasoning = extract_reasoning(detective_result, "Detail Detective")
|
236 |
|
237 |
synthesis_prompt = f"""You are the Orchestra Conductor using Llama 3.3 70B Versatile model. You have received analytical perspectives from three different AI reasoning specialists on the same problem. Your job is to synthesize these into a comprehensive, unified solution.
|
238 |
-
|
239 |
ORIGINAL PROBLEM: {original_problem}
|
240 |
-
|
241 |
DEEP THINKER ANALYSIS (π DeepSeek R1):
|
242 |
{deep_reasoning}
|
243 |
-
|
244 |
STRATEGIC ANALYSIS (π Qwen3 32B):
|
245 |
{strategic_reasoning}
|
246 |
-
|
247 |
DETECTIVE INVESTIGATION (π QwQ 32B):
|
248 |
{detective_reasoning}
|
249 |
-
|
250 |
As the Orchestra Conductor, please create a unified synthesis that:
|
251 |
1. Combines the best insights from all available analyses
|
252 |
2. Addresses any gaps where models didn't provide input
|
@@ -254,9 +249,7 @@ As the Orchestra Conductor, please create a unified synthesis that:
|
|
254 |
4. Provides a comprehensive final recommendation
|
255 |
5. Highlights where the different reasoning styles complement each other
|
256 |
6. Gives a clear, actionable conclusion
|
257 |
-
|
258 |
If some models didn't provide analysis, work with what's available and note any limitations.
|
259 |
-
|
260 |
Format your response as a well-structured final solution that leverages all available reasoning approaches. Use clear sections and bullet points where appropriate for maximum clarity."""
|
261 |
|
262 |
try:
|
@@ -264,7 +257,7 @@ Format your response as a well-structured final solution that leverages all avai
|
|
264 |
model="llama-3.3-70b-versatile",
|
265 |
messages=[{"role": "user", "content": synthesis_prompt}],
|
266 |
temperature=0.7,
|
267 |
-
max_completion_tokens=
|
268 |
top_p=0.9
|
269 |
)
|
270 |
|
@@ -626,4 +619,4 @@ with gr.Blocks(css=custom_css, title="Reasoning Orchestra") as app:
|
|
626 |
|
627 |
# Launch the app
|
628 |
if __name__ == "__main__":
|
629 |
-
app.launch()
|
|
|
102 |
model="deepseek-r1-distill-llama-70b",
|
103 |
messages=[{"role": "user", "content": prompt}],
|
104 |
temperature=0.6,
|
105 |
+
max_completion_tokens=8192,
|
106 |
top_p=0.95,
|
107 |
reasoning_format="raw"
|
108 |
)
|
|
|
143 |
messages=[{"role": "user", "content": prompt}],
|
144 |
temperature=0.6,
|
145 |
top_p=0.95,
|
146 |
+
max_completion_tokens=8192
|
147 |
)
|
148 |
|
149 |
response_content = completion.choices[0].message.content
|
|
|
184 |
messages=[{"role": "user", "content": prompt}],
|
185 |
temperature=0.7,
|
186 |
top_p=0.9,
|
187 |
+
max_completion_tokens=8192
|
188 |
)
|
189 |
|
190 |
response_content = completion.choices[0].message.content
|
|
|
195 |
model="qwen-qwq-32b",
|
196 |
messages=[{"role": "user", "content": fallback_prompt}],
|
197 |
temperature=0.5,
|
198 |
+
max_completion_tokens=8192
|
199 |
)
|
200 |
response_content = fallback_completion.choices[0].message.content
|
201 |
|
|
|
235 |
detective_reasoning = extract_reasoning(detective_result, "Detail Detective")
|
236 |
|
237 |
synthesis_prompt = f"""You are the Orchestra Conductor using Llama 3.3 70B Versatile model. You have received analytical perspectives from three different AI reasoning specialists on the same problem. Your job is to synthesize these into a comprehensive, unified solution.
|
|
|
238 |
ORIGINAL PROBLEM: {original_problem}
|
|
|
239 |
DEEP THINKER ANALYSIS (π DeepSeek R1):
|
240 |
{deep_reasoning}
|
|
|
241 |
STRATEGIC ANALYSIS (π Qwen3 32B):
|
242 |
{strategic_reasoning}
|
|
|
243 |
DETECTIVE INVESTIGATION (π QwQ 32B):
|
244 |
{detective_reasoning}
|
|
|
245 |
As the Orchestra Conductor, please create a unified synthesis that:
|
246 |
1. Combines the best insights from all available analyses
|
247 |
2. Addresses any gaps where models didn't provide input
|
|
|
249 |
4. Provides a comprehensive final recommendation
|
250 |
5. Highlights where the different reasoning styles complement each other
|
251 |
6. Gives a clear, actionable conclusion
|
|
|
252 |
If some models didn't provide analysis, work with what's available and note any limitations.
|
|
|
253 |
Format your response as a well-structured final solution that leverages all available reasoning approaches. Use clear sections and bullet points where appropriate for maximum clarity."""
|
254 |
|
255 |
try:
|
|
|
257 |
model="llama-3.3-70b-versatile",
|
258 |
messages=[{"role": "user", "content": synthesis_prompt}],
|
259 |
temperature=0.7,
|
260 |
+
max_completion_tokens=8192,
|
261 |
top_p=0.9
|
262 |
)
|
263 |
|
|
|
619 |
|
620 |
# Launch the app
|
621 |
if __name__ == "__main__":
|
622 |
+
app.launch(share=False) # Set share=False for local running; use share=True for public sharing
|