luminoussg commited on
Commit
dd7d400
·
verified ·
1 Parent(s): f9c29be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -3
app.py CHANGED
@@ -74,10 +74,12 @@ def query_model(model_name: str, messages: List[Dict[str, str]]) -> str:
74
  def respond(message: str, history: List[List[str]]) -> str:
75
  """Handle sequential model responses with individual updates"""
76
  messages = [{"role": "user", "content": message}]
 
77
 
78
  # Get first model's response
79
  response1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
80
- yield f"**Qwen2.5-Coder-32B-Instruct**:\n{response1}"
 
81
 
82
  # Add first response to context
83
  messages.append({
@@ -87,7 +89,8 @@ def respond(message: str, history: List[List[str]]) -> str:
87
 
88
  # Get second model's response
89
  response2 = query_model("Qwen2.5-72B-Instruct", messages)
90
- yield f"**Qwen2.5-72B-Instruct**:\n{response2}"
 
91
 
92
  # Add second response to context
93
  messages.append({
@@ -97,7 +100,8 @@ def respond(message: str, history: List[List[str]]) -> str:
97
 
98
  # Get final model's response
99
  response3 = query_model("Llama3.3-70B-Instruct", messages)
100
- yield f"**Llama3.3-70B-Instruct**:\n{response3}"
 
101
 
102
  # Create the Gradio interface
103
  chat_interface = gr.ChatInterface(
 
74
  def respond(message: str, history: List[List[str]]) -> str:
75
  """Handle sequential model responses with individual updates"""
76
  messages = [{"role": "user", "content": message}]
77
+ current_output = ""
78
 
79
  # Get first model's response
80
  response1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
81
+ current_output = f"**Qwen2.5-Coder-32B-Instruct**:\n{response1}"
82
+ yield current_output
83
 
84
  # Add first response to context
85
  messages.append({
 
89
 
90
  # Get second model's response
91
  response2 = query_model("Qwen2.5-72B-Instruct", messages)
92
+ current_output = f"{current_output}\n\n**Qwen2.5-72B-Instruct**:\n{response2}"
93
+ yield current_output
94
 
95
  # Add second response to context
96
  messages.append({
 
100
 
101
  # Get final model's response
102
  response3 = query_model("Llama3.3-70B-Instruct", messages)
103
+ current_output = f"{current_output}\n\n**Llama3.3-70B-Instruct**:\n{response3}"
104
+ yield current_output
105
 
106
  # Create the Gradio interface
107
  chat_interface = gr.ChatInterface(