luminoussg commited on
Commit
f32ce56
·
verified ·
1 Parent(s): 327109c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -14
app.py CHANGED
@@ -72,27 +72,30 @@ def query_model(model_name: str, messages: List[Dict[str, str]]) -> str:
72
  return f"{model_name} error: {str(e)}"
73
 
74
  def respond(message: str, history: List[List[str]]) -> str:
75
- """Handle continuous contextual conversations"""
76
- # Build full message history from previous interactions
77
- messages = []
78
- for user_msg, bot_msg in history:
79
- messages.append({"role": "user", "content": user_msg})
80
- messages.append({"role": "assistant", "content": bot_msg})
81
 
82
- # Add new user message
83
- messages.append({"role": "user", "content": message})
84
-
85
- # First model sees current prompt + full history
86
  response1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
87
  yield f"**Qwen2.5-Coder-32B-Instruct**:\n{response1}"
88
 
89
- # Second model sees current prompt + history + first response
90
- messages.append({"role": "assistant", "content": response1})
 
 
 
 
 
91
  response2 = query_model("Qwen2.5-72B-Instruct", messages)
92
  yield f"**Qwen2.5-72B-Instruct**:\n{response2}"
93
 
94
- # Third model sees current prompt + history + both responses
95
- messages.append({"role": "assistant", "content": response2})
 
 
 
 
 
96
  response3 = query_model("Llama3.3-70B-Instruct", messages)
97
  yield f"**Llama3.3-70B-Instruct**:\n{response3}"
98
 
 
72
  return f"{model_name} error: {str(e)}"
73
 
74
  def respond(message: str, history: List[List[str]]) -> str:
75
+ """Handle sequential model responses with individual updates"""
76
+ messages = [{"role": "user", "content": message}]
 
 
 
 
77
 
78
+ # Get first model's response
 
 
 
79
  response1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
80
  yield f"**Qwen2.5-Coder-32B-Instruct**:\n{response1}"
81
 
82
+ # Add first response to context
83
+ messages.append({
84
+ "role": "assistant",
85
+ "content": f"Previous response: {response1}"
86
+ })
87
+
88
+ # Get second model's response
89
  response2 = query_model("Qwen2.5-72B-Instruct", messages)
90
  yield f"**Qwen2.5-72B-Instruct**:\n{response2}"
91
 
92
+ # Add second response to context
93
+ messages.append({
94
+ "role": "assistant",
95
+ "content": f"Previous responses: {response1}\n{response2}"
96
+ })
97
+
98
+ # Get final model's response
99
  response3 = query_model("Llama3.3-70B-Instruct", messages)
100
  yield f"**Llama3.3-70B-Instruct**:\n{response3}"
101