luminoussg commited on
Commit
640fedc
Β·
verified Β·
1 Parent(s): 4766698

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -20
app.py CHANGED
@@ -72,31 +72,26 @@ def query_model(model_name: str, messages: List[Dict[str, str]]) -> str:
72
  return f"{model_name} error: {str(e)}"
73
 
74
  def respond(message: str, history: List[List[str]]) -> str:
75
- """Handle sequential model responses with collaboration"""
76
  messages = [{"role": "user", "content": message}]
77
  responses = []
78
 
79
- # Define processing order
80
- processing_order = [
81
- "Qwen2.5-Coder-32B-Instruct",
82
- "Qwen2.5-72B-Instruct",
83
- "Llama3.3-70B-Instruct"
84
- ]
85
 
86
- # Process models in sequence with accumulating context
87
- for model_name in processing_order:
88
- # Get current model's response
89
- response = query_model(model_name, messages)
90
- responses.append(f"**{model_name}**:\n{response}")
91
-
92
- # Add model's response to message history for next model
93
- messages.append({
94
- "role": "assistant",
95
- "content": f"{model_name} response: {response}"
96
- })
97
 
98
- # Format output with collaboration timeline
99
- return "\n\n→→→ Next Model Builds Upon This →→→\n\n".join(responses)
 
 
 
 
100
 
101
  # Create the Gradio interface
102
  chat_interface = gr.ChatInterface(
 
72
  return f"{model_name} error: {str(e)}"
73
 
74
  def respond(message: str, history: List[List[str]]) -> str:
75
+ """Handle sequential model responses with progressive context"""
76
  messages = [{"role": "user", "content": message}]
77
  responses = []
78
 
79
+ # First model: Initial response
80
+ reply1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
81
+ responses.append(f"**Qwen2.5-Coder-32B-Instruct**:\n{reply1}")
82
+ messages.append({"role": "assistant", "content": reply1})
 
 
83
 
84
+ # Second model: Contextualize with first response
85
+ reply2 = query_model("Qwen2.5-72B-Instruct", messages)
86
+ responses.append(f"\n\n**Qwen2.5-72B-Instruct**:\n{reply2}")
87
+ messages.append({"role": "assistant", "content": reply2})
 
 
 
 
 
 
 
88
 
89
+ # Third model: Final synthesis
90
+ reply3 = query_model("Llama3.3-70B-Instruct", messages)
91
+ responses.append(f"\n\n**Llama3.3-70B-Instruct**:\n{reply3}")
92
+
93
+ # Return progressive responses with clear separation
94
+ return "\n\n".join(responses)
95
 
96
  # Create the Gradio interface
97
  chat_interface = gr.ChatInterface(