DrishtiSharma commited on
Commit
2482a10
Β·
verified Β·
1 Parent(s): 860a6d7

Delete interim.py

Browse files
Files changed (1) hide show
  1. interim.py +0 -234
interim.py DELETED
@@ -1,234 +0,0 @@
1
- import streamlit as st
2
- import json
3
- from typing import Iterable
4
- from moa.agent import MOAgent
5
- from moa.agent.moa import ResponseChunk
6
- from streamlit_ace import st_ace
7
- import copy
8
-
9
- # Default configuration
10
- default_config = {
11
- "main_model": "llama3-70b-8192",
12
- "cycles": 3,
13
- "layer_agent_config": {}
14
- }
15
-
16
- layer_agent_config_def = {
17
- "layer_agent_1": {
18
- "system_prompt": "Think through your response step by step. {helper_response}",
19
- "model_name": "llama3-8b-8192"
20
- },
21
- "layer_agent_2": {
22
- "system_prompt": "Respond with a thought and then your response to the question. {helper_response}",
23
- "model_name": "gemma-7b-it",
24
- "temperature": 0.7
25
- },
26
- "layer_agent_3": {
27
- "system_prompt": "You are an expert at logic and reasoning. Always take a logical approach to the answer. {helper_response}",
28
- "model_name": "llama3-8b-8192"
29
- },
30
- }
31
-
32
- # Recommended Configuration
33
- rec_config = {
34
- "main_model": "llama3-70b-8192",
35
- "cycles": 2,
36
- "layer_agent_config": {}
37
- }
38
-
39
- layer_agent_config_rec = {
40
- "layer_agent_1": {
41
- "system_prompt": "Think through your response step by step. {helper_response}",
42
- "model_name": "llama3-8b-8192",
43
- "temperature": 0.1
44
- },
45
- "layer_agent_2": {
46
- "system_prompt": "Respond with a thought and then your response to the question. {helper_response}",
47
- "model_name": "llama3-8b-8192",
48
- "temperature": 0.2
49
- },
50
- "layer_agent_3": {
51
- "system_prompt": "You are an expert at logic and reasoning. Always take a logical approach to the answer. {helper_response}",
52
- "model_name": "llama3-8b-8192",
53
- "temperature": 0.4
54
- },
55
- "layer_agent_4": {
56
- "system_prompt": "You are an expert planner agent. Create a plan for how to answer the human's query. {helper_response}",
57
- "model_name": "mixtral-8x7b-32768",
58
- "temperature": 0.5
59
- },
60
- }
61
-
62
- def stream_response(messages: Iterable[ResponseChunk]):
63
- layer_outputs = {}
64
- progress_bar = st.progress(0)
65
- total_steps = len(messages) # Estimate total messages for progress tracking
66
- current_step = 0
67
-
68
- for message in messages:
69
- current_step += 1
70
- progress_bar.progress(current_step / total_steps)
71
-
72
- if message['response_type'] == 'intermediate':
73
- layer = message['metadata']['layer']
74
- if layer not in layer_outputs:
75
- layer_outputs[layer] = []
76
- layer_outputs[layer].append(message['delta'])
77
-
78
- # Real-time rendering for intermediate outputs
79
- with st.container():
80
- st.markdown(f"**Layer {layer} (In Progress)**")
81
- for output in layer_outputs[layer]:
82
- st.markdown(f"- {output}")
83
-
84
- else:
85
- # Finalize and display accumulated layer outputs
86
- for layer, outputs in layer_outputs.items():
87
- st.markdown(f"### Layer {layer} Final Output")
88
- for output in outputs:
89
- st.write(output)
90
- layer_outputs = {} # Reset for next layers
91
-
92
- # Yield the main agent's output
93
- yield message['delta']
94
-
95
- progress_bar.empty() # Clear progress bar once done
96
-
97
- def set_moa_agent(
98
- main_model: str = default_config['main_model'],
99
- cycles: int = default_config['cycles'],
100
- layer_agent_config: dict[dict[str, any]] = copy.deepcopy(layer_agent_config_def),
101
- main_model_temperature: float = 0.1,
102
- override: bool = False
103
- ):
104
- if override or ("main_model" not in st.session_state):
105
- st.session_state.main_model = main_model
106
-
107
- if override or ("cycles" not in st.session_state):
108
- st.session_state.cycles = cycles
109
-
110
- if override or ("layer_agent_config" not in st.session_state):
111
- st.session_state.layer_agent_config = layer_agent_config
112
-
113
- if override or ("main_temp" not in st.session_state):
114
- st.session_state.main_temp = main_model_temperature
115
-
116
- cls_ly_conf = copy.deepcopy(st.session_state.layer_agent_config)
117
- if override or ("moa_agent" not in st.session_state):
118
- st.session_state.moa_agent = MOAgent.from_config(
119
- main_model=st.session_state.main_model,
120
- cycles=st.session_state.cycles,
121
- layer_agent_config=cls_ly_conf,
122
- temperature=st.session_state.main_temp
123
- )
124
-
125
- del cls_ly_conf
126
-
127
- st.set_page_config(
128
- page_title="Mixture of Agents",
129
- layout="wide",
130
- menu_items={'About': "## Mixture-of-Agents\nPowered by Groq"}
131
- )
132
-
133
- valid_model_names = [
134
- 'llama3-70b-8192',
135
- 'llama3-8b-8192',
136
- 'gemma-7b-it',
137
- 'gemma2-9b-it',
138
- 'mixtral-8x7b-32768'
139
- ]
140
-
141
- if "messages" not in st.session_state:
142
- st.session_state.messages = []
143
-
144
- set_moa_agent()
145
-
146
- # Sidebar Configuration
147
- with st.sidebar:
148
- st.title("MOA Configuration")
149
- with st.form("Agent Configuration", clear_on_submit=False):
150
- if st.form_submit_button("Use Recommended Config"):
151
- set_moa_agent(
152
- main_model=rec_config['main_model'],
153
- cycles=rec_config['cycles'],
154
- layer_agent_config=layer_agent_config_rec,
155
- override=True
156
- )
157
- st.session_state.messages = []
158
- st.success("Configuration updated successfully!")
159
-
160
- # Config toggling
161
- show_advanced = st.checkbox("Show Advanced Configurations")
162
- if show_advanced:
163
- new_main_model = st.selectbox(
164
- "Main Model",
165
- valid_model_names,
166
- index=valid_model_names.index(st.session_state.main_model)
167
- )
168
-
169
- new_cycles = st.number_input(
170
- "Number of Layers",
171
- min_value=1,
172
- max_value=10,
173
- value=st.session_state.cycles
174
- )
175
-
176
- main_temperature = st.slider(
177
- "Main Model Temperature",
178
- min_value=0.0,
179
- max_value=1.0,
180
- value=st.session_state.main_temp,
181
- step=0.05
182
- )
183
-
184
- new_layer_agent_config = st_ace(
185
- value=json.dumps(st.session_state.layer_agent_config, indent=2),
186
- language="json",
187
- show_gutter=False,
188
- wrap=True,
189
- auto_update=True
190
- )
191
-
192
- if st.form_submit_button("Update Config"):
193
- try:
194
- parsed_config = json.loads(new_layer_agent_config)
195
- set_moa_agent(
196
- main_model=new_main_model,
197
- cycles=new_cycles,
198
- layer_agent_config=parsed_config,
199
- main_model_temperature=main_temperature,
200
- override=True
201
- )
202
- st.session_state.messages = []
203
- st.success("Configuration updated successfully!")
204
- except json.JSONDecodeError:
205
- st.error("Invalid JSON in Layer Agent Config.")
206
- except Exception as e:
207
- st.error(f"Error updating config: {str(e)}")
208
-
209
- # Main app layout
210
- st.header("Mixture of Agents")
211
- st.markdown("Real-time response tracking with intermediate and final results.")
212
- with st.expander("Current MOA Configuration", expanded=False):
213
- st.json(st.session_state.layer_agent_config)
214
-
215
- # Chat interface
216
- for message in st.session_state.messages:
217
- with st.chat_message(message["role"]):
218
- st.markdown(message["content"])
219
-
220
- if query := st.chat_input("Ask a question"):
221
- st.session_state.messages.append({"role": "user", "content": query})
222
- with st.chat_message("user"):
223
- st.markdown(query)
224
-
225
- moa_agent: MOAgent = st.session_state.moa_agent
226
- with st.chat_message("assistant"):
227
- message_placeholder = st.empty()
228
- ast_mess = stream_response(moa_agent.chat(query, output_format="json"))
229
- response = st.write_stream(ast_mess)
230
-
231
- st.session_state.messages.append({"role": "assistant", "content": response})
232
-
233
- st.markdown("---")
234
- st.markdown("Powered by [Groq](https://groq.com).")