luminoussg commited on
Commit
327109c
·
verified ·
1 Parent(s): 73c4292

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -33
app.py CHANGED
@@ -72,45 +72,28 @@ def query_model(model_name: str, messages: List[Dict[str, str]]) -> str:
72
  return f"{model_name} error: {str(e)}"
73
 
74
  def respond(message: str, history: List[List[str]]) -> str:
75
- """Handle sequential model responses with continuous context"""
 
 
 
 
 
76
 
77
- # Build full conversation history from previous interactions
78
- conversation = []
79
- if history:
80
- for user_msg, assistant_msg in history:
81
- conversation.append({"role": "user", "content": user_msg})
82
- if assistant_msg:
83
- # Split assistant message into individual model responses
84
- responses = assistant_msg.split("\n\n")
85
- for resp in responses:
86
- if resp:
87
- conversation.append({"role": "assistant", "content": resp})
88
 
89
- # Add current message
90
- conversation.append({"role": "user", "content": message})
91
-
92
- # Get first model's response
93
- response1 = query_model("Qwen2.5-Coder-32B-Instruct", conversation)
94
  yield f"**Qwen2.5-Coder-32B-Instruct**:\n{response1}"
95
 
96
- # Add first response to context
97
- conversation.append({
98
- "role": "assistant",
99
- "content": f"**Qwen2.5-Coder-32B-Instruct**:\n{response1}"
100
- })
101
-
102
- # Get second model's response
103
- response2 = query_model("Qwen2.5-72B-Instruct", conversation)
104
  yield f"**Qwen2.5-72B-Instruct**:\n{response2}"
105
 
106
- # Add second response to context
107
- conversation.append({
108
- "role": "assistant",
109
- "content": f"**Qwen2.5-72B-Instruct**:\n{response2}"
110
- })
111
-
112
- # Get final model's response
113
- response3 = query_model("Llama3.3-70B-Instruct", conversation)
114
  yield f"**Llama3.3-70B-Instruct**:\n{response3}"
115
 
116
  # Create the Gradio interface
 
72
  return f"{model_name} error: {str(e)}"
73
 
74
  def respond(message: str, history: List[List[str]]) -> str:
75
+ """Handle continuous contextual conversations"""
76
+ # Build full message history from previous interactions
77
+ messages = []
78
+ for user_msg, bot_msg in history:
79
+ messages.append({"role": "user", "content": user_msg})
80
+ messages.append({"role": "assistant", "content": bot_msg})
81
 
82
+ # Add new user message
83
+ messages.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
84
 
85
+ # First model sees current prompt + full history
86
+ response1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
 
 
 
87
  yield f"**Qwen2.5-Coder-32B-Instruct**:\n{response1}"
88
 
89
+ # Second model sees current prompt + history + first response
90
+ messages.append({"role": "assistant", "content": response1})
91
+ response2 = query_model("Qwen2.5-72B-Instruct", messages)
 
 
 
 
 
92
  yield f"**Qwen2.5-72B-Instruct**:\n{response2}"
93
 
94
+ # Third model sees current prompt + history + both responses
95
+ messages.append({"role": "assistant", "content": response2})
96
+ response3 = query_model("Llama3.3-70B-Instruct", messages)
 
 
 
 
 
97
  yield f"**Llama3.3-70B-Instruct**:\n{response3}"
98
 
99
  # Create the Gradio interface