luminoussg commited on
Commit
0fac2da
Β·
verified Β·
1 Parent(s): 57a76f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -40
app.py CHANGED
@@ -74,7 +74,7 @@ def query_model(model_name: str, messages: List[Dict[str, str]]) -> str:
74
  except Exception as e:
75
  return f"{model_name} error: {str(e)}"
76
 
77
- def respond(message: str, history: List[List[str]], session_id: str) -> tuple[str, str]:
78
  """Handle sequential model responses with context preservation"""
79
  # Load or initialize session
80
  session = session_manager.load_session(session_id)
@@ -97,9 +97,8 @@ def respond(message: str, history: List[List[str]], session_id: str) -> tuple[st
97
  "content": message
98
  })
99
 
100
- responses = []
101
-
102
- # Get first model's response
103
  response1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
104
  session["history"].append({
105
  "timestamp": datetime.now().isoformat(),
@@ -108,9 +107,10 @@ def respond(message: str, history: List[List[str]], session_id: str) -> tuple[st
108
  "content": response1
109
  })
110
  messages.append({"role": "assistant", "content": f"Qwen2.5-Coder-32B-Instruct: {response1}"})
111
- responses.append(f"**Qwen2.5-Coder-32B-Instruct**:\n{response1}")
112
 
113
- # Get second model's response
 
114
  response2 = query_model("Qwen2.5-72B-Instruct", messages)
115
  session["history"].append({
116
  "timestamp": datetime.now().isoformat(),
@@ -119,9 +119,10 @@ def respond(message: str, history: List[List[str]], session_id: str) -> tuple[st
119
  "content": response2
120
  })
121
  messages.append({"role": "assistant", "content": f"Qwen2.5-72B-Instruct: {response2}"})
122
- responses.append(f"**Qwen2.5-72B-Instruct**:\n{response2}")
123
 
124
- # Get final model's response
 
125
  response3 = query_model("Llama3.3-70B-Instruct", messages)
126
  session["history"].append({
127
  "timestamp": datetime.now().isoformat(),
@@ -130,44 +131,49 @@ def respond(message: str, history: List[List[str]], session_id: str) -> tuple[st
130
  "content": response3
131
  })
132
  messages.append({"role": "assistant", "content": f"Llama3.3-70B-Instruct: {response3}"})
133
- responses.append(f"**Llama3.3-70B-Instruct**:\n{response3}")
134
 
135
  # Save final session state
136
  session_manager.save_session(session_id, session)
137
 
138
- # Return response as a single tuple for Gradio chat
139
- return message, "\n\n".join(responses)
 
 
 
 
 
 
 
 
 
140
 
141
  # Create the Gradio interface
142
- with gr.Blocks() as demo:
143
- session_id = gr.State(session_manager.create_session)
144
-
145
- gr.Markdown("## Multi-LLM Collaboration Chat")
146
- gr.Markdown("A group chat with Qwen2.5-72B, Llama3.3-70B, and Qwen2.5-Coder-32B")
147
-
148
- chatbot = gr.Chatbot()
149
- msg = gr.Textbox(label="Message")
150
- clear = gr.Button("Clear")
151
-
152
- def user(message, history, session_id):
153
- return "", history + [[message, None]]
154
-
155
- def bot(history, session_id):
156
- if history[-1][1] is None:
157
- message = history[-1][0]
158
- _, response = respond(message, history[:-1], session_id)
159
- history[-1][1] = response
160
- return history
161
- return history
162
-
163
- msg.submit(user, [msg, chatbot, session_id], [msg, chatbot]).then(
164
- bot, [chatbot, session_id], [chatbot]
165
- )
166
-
167
- clear.click(lambda: (session_manager.create_session(), None, []),
168
- None,
169
- [session_id, msg, chatbot],
170
- queue=False)
171
 
172
  if __name__ == "__main__":
173
  demo.launch(share=True)
 
74
  except Exception as e:
75
  return f"{model_name} error: {str(e)}"
76
 
77
+ def respond(message: str, history: List[List[str]], session_id: str) -> Generator[tuple[str, str], None, None]:
78
  """Handle sequential model responses with context preservation"""
79
  # Load or initialize session
80
  session = session_manager.load_session(session_id)
 
97
  "content": message
98
  })
99
 
100
+ # First model response
101
+ yield message, "πŸ’­ *Qwen2.5-Coder-32B-Instruct is thinking...*"
 
102
  response1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
103
  session["history"].append({
104
  "timestamp": datetime.now().isoformat(),
 
107
  "content": response1
108
  })
109
  messages.append({"role": "assistant", "content": f"Qwen2.5-Coder-32B-Instruct: {response1}"})
110
+ yield message, f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}"
111
 
112
+ # Second model response
113
+ yield message, f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\nπŸ’­ *Qwen2.5-72B-Instruct is thinking...*"
114
  response2 = query_model("Qwen2.5-72B-Instruct", messages)
115
  session["history"].append({
116
  "timestamp": datetime.now().isoformat(),
 
119
  "content": response2
120
  })
121
  messages.append({"role": "assistant", "content": f"Qwen2.5-72B-Instruct: {response2}"})
122
+ yield message, f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\n🟣 **Qwen2.5-72B-Instruct**\n{response2}"
123
 
124
+ # Final model response
125
+ yield message, f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\n🟣 **Qwen2.5-72B-Instruct**\n{response2}\n\nπŸ’­ *Llama3.3-70B-Instruct is thinking...*"
126
  response3 = query_model("Llama3.3-70B-Instruct", messages)
127
  session["history"].append({
128
  "timestamp": datetime.now().isoformat(),
 
131
  "content": response3
132
  })
133
  messages.append({"role": "assistant", "content": f"Llama3.3-70B-Instruct: {response3}"})
 
134
 
135
  # Save final session state
136
  session_manager.save_session(session_id, session)
137
 
138
+ # Return final combined response
139
+ yield message, f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\n🟣 **Qwen2.5-72B-Instruct**\n{response2}\n\n🟑 **Llama3.3-70B-Instruct**\n{response3}"
140
+
141
+ # Custom CSS for styling
142
+ css = """
143
+ .message { padding: 15px; margin: 10px 0; border-radius: 10px; }
144
+ .assistant { background: #f8fafc; border-left: 4px solid #3b82f6; }
145
+ .user { background: #eff6ff; border-left: 4px solid #60a5fa; }
146
+ .model-name { font-weight: 600; color: #1e40af; margin-bottom: 8px; }
147
+ .thinking { color: #6b7280; font-style: italic; }
148
+ """
149
 
150
  # Create the Gradio interface
151
+ demo = gr.ChatInterface(
152
+ fn=respond,
153
+ title="Multi-LLM Collaboration Chat",
154
+ description="Experience collaborative AI thinking with three powerful language models",
155
+ examples=[
156
+ ["Explain how quantum computing works"],
157
+ ["Write a Python function to find prime numbers"],
158
+ ],
159
+ additional_inputs=[gr.State(session_manager.create_session)],
160
+ chatbot=gr.Chatbot(
161
+ height=600,
162
+ show_label=False,
163
+ avatar_images=("πŸ‘€", "πŸ€–"),
164
+ bubble_full_width=False,
165
+ show_copy_button=True,
166
+ container=True,
167
+ sanitize_html=False,
168
+ ),
169
+ theme=gr.themes.Soft(
170
+ primary_hue="blue",
171
+ secondary_hue="indigo",
172
+ neutral_hue="slate",
173
+ font=("Inter", "sans-serif"),
174
+ ),
175
+ css=css,
176
+ )
 
 
 
177
 
178
  if __name__ == "__main__":
179
  demo.launch(share=True)