luminoussg commited on
Commit
f9c29be
·
verified ·
1 Parent(s): 640fedc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -15
app.py CHANGED
@@ -72,26 +72,32 @@ def query_model(model_name: str, messages: List[Dict[str, str]]) -> str:
72
  return f"{model_name} error: {str(e)}"
73
 
74
  def respond(message: str, history: List[List[str]]) -> str:
75
- """Handle sequential model responses with progressive context"""
76
  messages = [{"role": "user", "content": message}]
77
- responses = []
78
 
79
- # First model: Initial response
80
- reply1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
81
- responses.append(f"**Qwen2.5-Coder-32B-Instruct**:\n{reply1}")
82
- messages.append({"role": "assistant", "content": reply1})
83
 
84
- # Second model: Contextualize with first response
85
- reply2 = query_model("Qwen2.5-72B-Instruct", messages)
86
- responses.append(f"\n\n**Qwen2.5-72B-Instruct**:\n{reply2}")
87
- messages.append({"role": "assistant", "content": reply2})
 
88
 
89
- # Third model: Final synthesis
90
- reply3 = query_model("Llama3.3-70B-Instruct", messages)
91
- responses.append(f"\n\n**Llama3.3-70B-Instruct**:\n{reply3}")
92
 
93
- # Return progressive responses with clear separation
94
- return "\n\n".join(responses)
 
 
 
 
 
 
 
95
 
96
  # Create the Gradio interface
97
  chat_interface = gr.ChatInterface(
 
72
  return f"{model_name} error: {str(e)}"
73
 
74
  def respond(message: str, history: List[List[str]]) -> str:
75
+ """Handle sequential model responses with individual updates"""
76
  messages = [{"role": "user", "content": message}]
 
77
 
78
+ # Get first model's response
79
+ response1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
80
+ yield f"**Qwen2.5-Coder-32B-Instruct**:\n{response1}"
 
81
 
82
+ # Add first response to context
83
+ messages.append({
84
+ "role": "assistant",
85
+ "content": f"Previous response: {response1}"
86
+ })
87
 
88
+ # Get second model's response
89
+ response2 = query_model("Qwen2.5-72B-Instruct", messages)
90
+ yield f"**Qwen2.5-72B-Instruct**:\n{response2}"
91
 
92
+ # Add second response to context
93
+ messages.append({
94
+ "role": "assistant",
95
+ "content": f"Previous responses: {response1}\n{response2}"
96
+ })
97
+
98
+ # Get final model's response
99
+ response3 = query_model("Llama3.3-70B-Instruct", messages)
100
+ yield f"**Llama3.3-70B-Instruct**:\n{response3}"
101
 
102
  # Create the Gradio interface
103
  chat_interface = gr.ChatInterface(