luminoussg commited on
Commit
cac494e
·
verified ·
1 Parent(s): 4052330

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -37
app.py CHANGED
@@ -71,49 +71,54 @@ def query_model(model_name: str, messages: List[Dict[str, str]]) -> str:
71
  except Exception as e:
72
  return f"{model_name} error: {str(e)}"
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  def respond(message: str, history: List[List[str]]) -> str:
75
- """Handle sequential model responses with continuous context"""
76
- # Build full conversation history
77
- messages = []
78
- for user_msg, assistant_msg in history:
79
- messages.append({"role": "user", "content": user_msg})
80
- if assistant_msg:
81
- # Split the assistant message to get individual model responses
82
- model_responses = assistant_msg.split("\n\n")
83
- for response in model_responses:
84
- if "**" in response: # Only add valid model responses
85
- messages.append({"role": "assistant", "content": response})
86
 
87
- # Add current message
88
- messages.append({"role": "user", "content": message})
89
- current_output = ""
90
 
91
  # Get first model's response
92
- response1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
93
- current_output = f"**Qwen2.5-Coder-32B-Instruct**:\n{response1}"
94
- yield current_output
95
-
96
- # Add first response to context
97
- messages.append({
98
- "role": "assistant",
99
- "content": f"Previous response: {response1}"
100
- })
101
-
102
- # Get second model's response
103
- response2 = query_model("Qwen2.5-72B-Instruct", messages)
104
- current_output = f"{current_output}\n\n**Qwen2.5-72B-Instruct**:\n{response2}"
105
- yield current_output
106
 
107
- # Add second response to context
108
- messages.append({
109
- "role": "assistant",
110
- "content": f"Previous responses: {response1}\n{response2}"
111
- })
112
 
113
- # Get final model's response
114
- response3 = query_model("Llama3.3-70B-Instruct", messages)
115
- current_output = f"{current_output}\n\n**Llama3.3-70B-Instruct**:\n{response3}"
116
- yield current_output
 
117
 
118
  # Create the Gradio interface
119
  chat_interface = gr.ChatInterface(
 
71
  except Exception as e:
72
  return f"{model_name} error: {str(e)}"
73
 
74
+ class ConversationState:
75
+ def __init__(self):
76
+ self.messages = []
77
+
78
+ def add_user_message(self, message: str):
79
+ self.messages.append({"role": "user", "content": message})
80
+
81
+ def add_assistant_message(self, model_name: str, message: str):
82
+ self.messages.append({
83
+ "role": "assistant",
84
+ "model": model_name,
85
+ "content": message
86
+ })
87
+
88
+ def get_context(self) -> List[Dict[str, str]]:
89
+ return [
90
+ {
91
+ "role": msg["role"],
92
+ "content": f"{msg.get('model', '')}: {msg['content']}" if msg["role"] == "assistant" else msg["content"]
93
+ }
94
+ for msg in self.messages
95
+ ]
96
+
97
+ conversation_state = ConversationState()
98
+
99
  def respond(message: str, history: List[List[str]]) -> str:
100
+ """Handle sequential model responses with continuous conversation context"""
 
 
 
 
 
 
 
 
 
 
101
 
102
+ # Add current message to conversation state
103
+ conversation_state.add_user_message(message)
104
+ current_context = conversation_state.get_context()
105
 
106
  # Get first model's response
107
+ response1 = query_model("Qwen2.5-Coder-32B-Instruct", current_context)
108
+ conversation_state.add_assistant_message("Qwen2.5-Coder-32B-Instruct", response1)
109
+ yield f"**Qwen2.5-Coder-32B-Instruct**:\n{response1}"
 
 
 
 
 
 
 
 
 
 
 
110
 
111
+ # Get second model's response with updated context
112
+ current_context = conversation_state.get_context()
113
+ response2 = query_model("Qwen2.5-72B-Instruct", current_context)
114
+ conversation_state.add_assistant_message("Qwen2.5-72B-Instruct", response2)
115
+ yield f"**Qwen2.5-72B-Instruct**:\n{response2}"
116
 
117
+ # Get final model's response with complete context
118
+ current_context = conversation_state.get_context()
119
+ response3 = query_model("Llama3.3-70B-Instruct", current_context)
120
+ conversation_state.add_assistant_message("Llama3.3-70B-Instruct", response3)
121
+ yield f"**Llama3.3-70B-Instruct**:\n{response3}"
122
 
123
  # Create the Gradio interface
124
  chat_interface = gr.ChatInterface(