luminoussg commited on
Commit
73c4292
·
verified ·
1 Parent(s): cac494e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -39
app.py CHANGED
@@ -71,53 +71,46 @@ def query_model(model_name: str, messages: List[Dict[str, str]]) -> str:
71
  except Exception as e:
72
  return f"{model_name} error: {str(e)}"
73
 
74
- class ConversationState:
75
- def __init__(self):
76
- self.messages = []
77
-
78
- def add_user_message(self, message: str):
79
- self.messages.append({"role": "user", "content": message})
80
-
81
- def add_assistant_message(self, model_name: str, message: str):
82
- self.messages.append({
83
- "role": "assistant",
84
- "model": model_name,
85
- "content": message
86
- })
87
-
88
- def get_context(self) -> List[Dict[str, str]]:
89
- return [
90
- {
91
- "role": msg["role"],
92
- "content": f"{msg.get('model', '')}: {msg['content']}" if msg["role"] == "assistant" else msg["content"]
93
- }
94
- for msg in self.messages
95
- ]
96
-
97
- conversation_state = ConversationState()
98
-
99
  def respond(message: str, history: List[List[str]]) -> str:
100
- """Handle sequential model responses with continuous conversation context"""
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
- # Add current message to conversation state
103
- conversation_state.add_user_message(message)
104
- current_context = conversation_state.get_context()
105
 
106
  # Get first model's response
107
- response1 = query_model("Qwen2.5-Coder-32B-Instruct", current_context)
108
- conversation_state.add_assistant_message("Qwen2.5-Coder-32B-Instruct", response1)
109
  yield f"**Qwen2.5-Coder-32B-Instruct**:\n{response1}"
110
 
111
- # Get second model's response with updated context
112
- current_context = conversation_state.get_context()
113
- response2 = query_model("Qwen2.5-72B-Instruct", current_context)
114
- conversation_state.add_assistant_message("Qwen2.5-72B-Instruct", response2)
 
 
 
 
115
  yield f"**Qwen2.5-72B-Instruct**:\n{response2}"
116
 
117
- # Get final model's response with complete context
118
- current_context = conversation_state.get_context()
119
- response3 = query_model("Llama3.3-70B-Instruct", current_context)
120
- conversation_state.add_assistant_message("Llama3.3-70B-Instruct", response3)
 
 
 
 
121
  yield f"**Llama3.3-70B-Instruct**:\n{response3}"
122
 
123
  # Create the Gradio interface
 
71
  except Exception as e:
72
  return f"{model_name} error: {str(e)}"
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  def respond(message: str, history: List[List[str]]) -> str:
75
+ """Handle sequential model responses with continuous context"""
76
+
77
+ # Build full conversation history from previous interactions
78
+ conversation = []
79
+ if history:
80
+ for user_msg, assistant_msg in history:
81
+ conversation.append({"role": "user", "content": user_msg})
82
+ if assistant_msg:
83
+ # Split assistant message into individual model responses
84
+ responses = assistant_msg.split("\n\n")
85
+ for resp in responses:
86
+ if resp:
87
+ conversation.append({"role": "assistant", "content": resp})
88
 
89
+ # Add current message
90
+ conversation.append({"role": "user", "content": message})
 
91
 
92
  # Get first model's response
93
+ response1 = query_model("Qwen2.5-Coder-32B-Instruct", conversation)
 
94
  yield f"**Qwen2.5-Coder-32B-Instruct**:\n{response1}"
95
 
96
+ # Add first response to context
97
+ conversation.append({
98
+ "role": "assistant",
99
+ "content": f"**Qwen2.5-Coder-32B-Instruct**:\n{response1}"
100
+ })
101
+
102
+ # Get second model's response
103
+ response2 = query_model("Qwen2.5-72B-Instruct", conversation)
104
  yield f"**Qwen2.5-72B-Instruct**:\n{response2}"
105
 
106
+ # Add second response to context
107
+ conversation.append({
108
+ "role": "assistant",
109
+ "content": f"**Qwen2.5-72B-Instruct**:\n{response2}"
110
+ })
111
+
112
+ # Get final model's response
113
+ response3 = query_model("Llama3.3-70B-Instruct", conversation)
114
  yield f"**Llama3.3-70B-Instruct**:\n{response3}"
115
 
116
  # Create the Gradio interface