acecalisto3 commited on
Commit
6e48490
·
verified ·
1 Parent(s): f33d94f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +322 -292
app.py CHANGED
@@ -3,27 +3,13 @@ import subprocess
3
  import random
4
  from huggingface_hub import InferenceClient
5
  import gradio as gr
6
- from safe_search import safe_search # You need to implement this
7
  from i_search import google
8
  from i_search import i_search as i_s
9
- from prompts import (
10
- ACTION_PROMPT,
11
- ADD_PROMPT,
12
- COMPRESS_HISTORY_PROMPT,
13
- LOG_PROMPT,
14
- LOG_RESPONSE,
15
- MODIFY_PROMPT,
16
- PREFIX,
17
- SEARCH_QUERY,
18
- READ_PROMPT,
19
- TASK_PROMPT,
20
- UNDERSTAND_TEST_RESULTS_PROMPT,
21
- WEB_DEV,
22
- AI_SYSTEM_PROMPT,
23
- PYTHON_CODE_DEV,
24
- )
25
- from utils import parse_action, parse_file_content, read_python_module_structure
26
  from datetime import datetime
 
 
 
27
  now = datetime.now()
28
  date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
29
 
@@ -31,305 +17,349 @@ client = InferenceClient(
31
  "mistralai/Mixtral-8x7B-Instruct-v0.1"
32
  )
33
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  ############################################
35
 
36
  VERBOSE = True
37
- MAX_HISTORY = 100
38
-
39
- # Default values for inputs
40
- DEFAULT_AGENT = "WEB_DEV"
41
- DEFAULT_SYSTEM_PROMPT = ""
42
- DEFAULT_TEMPERATURE = 0.9
43
- DEFAULT_MAX_TOKENS = 256
44
- DEFAULT_TOP_P = 0.95
45
- DEFAULT_REPETITION_PENALTY = 1.0
46
-
47
- def format_prompt(message, history, system_prompt):
48
- prompt = "<s>"
49
- prompt += f"[INST] {system_prompt}, {message} [/INST]"
50
- for user_prompt, bot_response in history:
51
- prompt += f"[INST] {user_prompt} [/INST]"
52
- prompt += f" {bot_response}</s> "
53
- return prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
  def run_gpt(
56
  prompt_template,
57
  stop_tokens,
58
  max_tokens,
59
  purpose,
60
- temperature,
61
- top_p,
62
- repetition_penalty,
63
- system_prompt,
64
  **prompt_kwargs,
65
  ):
66
- seed = random.randint(1, 1111111111111111)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  generate_kwargs = dict(
68
  temperature=temperature,
69
- max_new_tokens=max_tokens,
70
  top_p=top_p,
71
  repetition_penalty=repetition_penalty,
72
  do_sample=True,
73
  seed=seed,
74
  )
75
 
76
- content = PREFIX.format(
77
- date_time_str=date_time_str,
78
- purpose=purpose,
79
- safe_search=safe_search,
80
- ) + prompt_template.format(**prompt_kwargs)
81
- if VERBOSE:
82
- print(LOG_PROMPT.format(content))
83
 
84
- formatted_prompt = format_prompt(content, prompt_kwargs.get("history", []), system_prompt)
 
85
 
86
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
87
- resp = ""
88
  for response in stream:
89
- resp += response.token.text
 
 
90
 
91
- if VERBOSE:
92
- print(LOG_RESPONSE.format(resp))
93
- return resp
94
 
95
- def compress_history(purpose, task, history, directory):
96
- resp = run_gpt(
97
- COMPRESS_HISTORY_PROMPT,
98
- stop_tokens=["observation:", "task:", "action:", "thought:"],
99
- max_tokens=512,
100
- purpose=purpose,
101
- task=task,
102
- history=history,
103
- temperature=DEFAULT_TEMPERATURE,
104
- top_p=DEFAULT_TOP_P,
105
- repetition_penalty=DEFAULT_REPETITION_PENALTY,
106
- system_prompt=DEFAULT_SYSTEM_PROMPT,
107
- )
108
- history = "observation: {}\n".format(resp)
109
- return history
 
 
 
 
 
 
110
 
111
- def call_search(purpose, task, history, directory, action_input):
112
- print("CALLING SEARCH")
113
- try:
114
- if "http" in action_input:
115
- if "<" in action_input:
116
- action_input = action_input.strip("<")
117
- if ">" in action_input:
118
- action_input = action_input.strip(">")
119
- response = i_s(action_input)
120
- print(response)
121
- history += "observation: search result is: {}\n".format(response)
122
- else:
123
- history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
124
- except Exception as e:
125
- history += "observation: {}'\n".format(e)
126
- return "MAIN", None, history, task
127
-
128
- def call_main(purpose, task, history, directory, action_input):
129
- resp = run_gpt(
130
- ACTION_PROMPT,
131
- stop_tokens=["observation:", "task:", "action:", "thought:"],
132
- max_tokens=2096,
133
- purpose=purpose,
134
- task=task,
135
- history=history,
136
- temperature=DEFAULT_TEMPERATURE,
137
- top_p=DEFAULT_TOP_P,
138
- repetition_penalty=DEFAULT_REPETITION_PENALTY,
139
- system_prompt=DEFAULT_SYSTEM_PROMPT,
140
- )
141
- lines = resp.strip().strip("\n").split("\n")
142
- for line in lines:
143
- if line == "":
144
- continue
145
- if line.startswith("thought: "):
146
- history += "{}\n".format(line)
147
- elif line.startswith("action: "):
148
- action_name, action_input = parse_action(line)
149
- print(f"ACTION_NAME :: {action_name}")
150
- print(f"ACTION_INPUT :: {action_input}")
151
- history += "{}\n".format(line)
152
- if "COMPLETE" in action_name or "COMPLETE" in action_input:
153
- task = "END"
154
- return action_name, action_input, history, task
155
- else:
156
- return action_name, action_input, history, task
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  else:
158
- history += "{}\n".format(line)
159
- return "MAIN", None, history, task
160
-
161
- def call_set_task(purpose, task, history, directory, action_input):
162
- task = run_gpt(
163
- TASK_PROMPT,
164
- stop_tokens=[],
165
- max_tokens=64,
166
- purpose=purpose,
167
- task=task,
168
- history=history,
169
- temperature=DEFAULT_TEMPERATURE,
170
- top_p=DEFAULT_TOP_P,
171
- repetition_penalty=DEFAULT_REPETITION_PENALTY,
172
- system_prompt=DEFAULT_SYSTEM_PROMPT,
173
- ).strip("\n")
174
- history += "observation: task has been updated to: {}\n".format(task)
175
- return "MAIN", None, history, task
176
-
177
- def end_fn(purpose, task, history, directory, action_input):
178
- task = "END"
179
- return "COMPLETE", "COMPLETE", history, task
180
-
181
- NAME_TO_FUNC = {
182
- "MAIN": call_main,
183
- "UPDATE-TASK": call_set_task,
184
- "SEARCH": call_search,
185
- "COMPLETE": end_fn,
186
- }
187
-
188
- def run_action(purpose, task, history, directory, action_name, action_input):
189
- print(f"action_name::{action_name}")
190
- try:
191
- if "RESPONSE" in action_name or "COMPLETE" in action_name:
192
- action_name = "COMPLETE"
193
- task = "END"
194
- return action_name, "COMPLETE", history, task
195
-
196
- # compress the history when it is long
197
- if len(history.split("\n")) > MAX_HISTORY:
198
- if VERBOSE:
199
- print("COMPRESSING HISTORY")
200
- history = compress_history(purpose, task, history, directory)
201
- if not action_name in NAME_TO_FUNC:
202
- action_name = "MAIN"
203
- if action_name == "" or action_name == None:
204
- action_name = "MAIN"
205
- assert action_name in NAME_TO_FUNC
206
-
207
- print("RUN: ", action_name, action_input)
208
- return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
209
- except Exception as e:
210
- history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
211
- return "MAIN", None, history, task
212
-
213
- def run(purpose, history, agent_name=DEFAULT_AGENT, system_prompt=DEFAULT_SYSTEM_PROMPT, temperature=DEFAULT_TEMPERATURE, max_tokens=DEFAULT_MAX_TOKENS, top_p=DEFAULT_TOP_P, repetition_penalty=DEFAULT_REPETITION_PENALTY):
214
- task = None
215
- directory = "./"
216
- if history:
217
- history = str(history).strip("[]")
218
- if not history:
219
- history = ""
220
-
221
- action_name = "UPDATE-TASK" if task is None else "MAIN"
222
- action_input = None
223
- while True:
224
- print("")
225
- print("")
226
- print("---")
227
- print("purpose:", purpose)
228
- print("task:", task)
229
- print("---")
230
- print(history)
231
- print("---")
232
-
233
- action_name, action_input, history, task = run_action(
234
- purpose,
235
- task,
236
- history,
237
- directory,
238
- action_name,
239
- action_input,
240
- )
241
- yield (history)
242
- if task == "END":
243
- return (history)
244
-
245
- def process_input(user_input, history, chatbot, agent_name, system_prompt, temperature, max_tokens, top_p, repetition_penalty):
246
- """Processes user input and updates the chatbot."""
247
- purpose = "General"
248
- history = history + [(user_input, "")]
249
- chatbot.append(user_input)
250
-
251
- for response in run(
252
- purpose,
253
- history,
254
- agent_name=agent_name,
255
- system_prompt=system_prompt,
256
- temperature=temperature,
257
- max_tokens=max_tokens,
258
- top_p=top_p,
259
- repetition_penalty=repetition_penalty,
260
- ):
261
- chatbot.append(response)
262
- yield chatbot
263
-
264
- with gr.Blocks() as iface:
265
- with gr.Row():
266
- chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, layout="panel")
267
- with gr.Column():
268
- msg = gr.Textbox(label="Enter your message")
269
- with gr.Row():
270
- submit_b = gr.Button("Submit")
271
- clear = gr.ClearButton([msg, chatbot])
272
 
273
- # Input fields for configuration
274
- with gr.Column():
275
- agent_dropdown = gr.Dropdown(
276
- label="Agent",
277
- choices=[s for s in ["WEB_DEV", "AI_SYSTEM_PROMPT", "PYTHON_CODE_DEV"]],
278
- value=DEFAULT_AGENT,
279
- interactive=True,
280
- )
281
- system_prompt_textbox = gr.Textbox(
282
- label="System Prompt",
283
- value=DEFAULT_SYSTEM_PROMPT,
284
- interactive=True,
285
- )
286
- temperature_slider = gr.Slider(
287
- label="Temperature",
288
- value=DEFAULT_TEMPERATURE,
289
- minimum=0.0,
290
- maximum=1.0,
291
- step=0.05,
292
- interactive=True,
293
- info="Higher values produce more diverse outputs",
294
- )
295
- max_tokens_slider = gr.Slider(
296
- label="Max new tokens",
297
- value=DEFAULT_MAX_TOKENS,
298
- minimum=0,
299
- maximum=1048 * 10,
300
- step=64,
301
- interactive=True,
302
- info="The maximum numbers of new tokens",
303
- )
304
- top_p_slider = gr.Slider(
305
- label="Top-p (nucleus sampling)",
306
- value=DEFAULT_TOP_P,
307
- minimum=0.0,
308
- maximum=1,
309
- step=0.05,
310
- interactive=True,
311
- info="Higher values sample more low-probability tokens",
312
- )
313
- repetition_penalty_slider = gr.Slider(
314
- label="Repetition penalty",
315
- value=DEFAULT_REPETITION_PENALTY,
316
- minimum=1.0,
317
- maximum=2.0,
318
- step=0.05,
319
- interactive=True,
320
- info="Penalize repeated tokens",
321
- )
322
-
323
- # Connect input fields to the processing function
324
- submit_b.click(
325
- process_input,
326
- [msg, chatbot, chatbot, agent_dropdown, system_prompt_textbox, temperature_slider, max_tokens_slider, top_p_slider, repetition_penalty_slider],
327
- chatbot,
328
- )
329
- msg.submit(
330
- process_input,
331
- [msg, chatbot, chatbot, agent_dropdown, system_prompt_textbox, temperature_slider, max_tokens_slider, top_p_slider, repetition_penalty_slider],
332
- chatbot,
333
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334
 
335
- iface.launch()
 
 
3
  import random
4
  from huggingface_hub import InferenceClient
5
  import gradio as gr
6
+ from safe_search import safe_search
7
  from i_search import google
8
  from i_search import i_search as i_s
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  from datetime import datetime
10
+ import logging
11
+ import json
12
+
13
  now = datetime.now()
14
  date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
15
 
 
17
  "mistralai/Mixtral-8x7B-Instruct-v0.1"
18
  )
19
 
20
+ # --- Set up logging ---
21
+ logging.basicConfig(
22
+ filename="app.log", # Name of the log file
23
+ level=logging.INFO, # Set the logging level (INFO, DEBUG, etc.)
24
+ format="%(asctime)s - %(levelname)s - %(message)s",
25
+ )
26
+
27
+ agents =[
28
+ "WEB_DEV",
29
+ "AI_SYSTEM_PROMPT",
30
+ "PYTHON_CODE_DEV"
31
+ ]
32
  ############################################
33
 
34
  VERBOSE = True
35
+ MAX_HISTORY = 5
36
+ #MODEL = "gpt-3.5-turbo" # "gpt-4"
37
+
38
+ PREFIX = """
39
+ {date_time_str}
40
+ Purpose: {purpose}
41
+ Safe Search: {safe_search}
42
+ """
43
+
44
+ LOG_PROMPT = """
45
+ PROMPT: {content}
46
+ """
47
+
48
+ LOG_RESPONSE = """
49
+ RESPONSE: {resp}
50
+ """
51
+
52
+ COMPRESS_HISTORY_PROMPT = """
53
+ You are a helpful AI assistant. Your task is to compress the following history into a summary that is no longer than 512 tokens.
54
+ History:
55
+ {history}
56
+ """
57
+
58
+ ACTION_PROMPT = """
59
+ You are a helpful AI assistant. You are working on the task: {task}
60
+ Your current history is:
61
+ {history}
62
+ What is your next thought?
63
+ thought:
64
+ What is your next action?
65
+ action:
66
+ """
67
+
68
+ TASK_PROMPT = """
69
+ You are a helpful AI assistant. Your current history is:
70
+ {history}
71
+ What is the next task?
72
+ task:
73
+ """
74
+
75
+ UNDERSTAND_TEST_RESULTS_PROMPT = """
76
+ You are a helpful AI assistant. The test results are:
77
+ {test_results}
78
+ What do you want to know about the test results?
79
+ thought:
80
+ """
81
+
82
+ def format_prompt(message, history, max_history_turns=2):
83
+ prompt = "<s>"
84
+ # Keep only the last 'max_history_turns' turns
85
+ for user_prompt, bot_response in history[-max_history_turns:]:
86
+ prompt += f"[INST] {user_prompt} [/INST]"
87
+ prompt += f" {bot_response}</s> "
88
+ prompt += f"[INST] {message} [/INST]"
89
+ return prompt
90
 
91
  def run_gpt(
92
  prompt_template,
93
  stop_tokens,
94
  max_tokens,
95
  purpose,
 
 
 
 
96
  **prompt_kwargs,
97
  ):
98
+ seed = random.randint(1,1111111111111111)
99
+ logging.info(f"Seed: {seed}") # Log the seed
100
+
101
+ content = PREFIX.format(
102
+ date_time_str=date_time_str,
103
+ purpose=purpose,
104
+ safe_search=safe_search,
105
+ ) + prompt_template.format(**prompt_kwargs)
106
+ if VERBOSE:
107
+ logging.info(LOG_PROMPT.format(content)) # Log the prompt
108
+
109
+ resp = client.text_generation(content, max_new_tokens=max_tokens, stop_sequences=stop_tokens, temperature=0.7, top_p=0.8, repetition_penalty=1.5)
110
+ if VERBOSE:
111
+ logging.info(LOG_RESPONSE.format(resp)) # Log the response
112
+ return resp
113
+
114
+ def generate(prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.7, max_new_tokens=2048, top_p=0.8, repetition_penalty=1.5):
115
+ seed = random.randint(1,1111111111111111)
116
+
117
+ # Correct the line:
118
+ if agent_name == "WEB_DEV":
119
+ agent = "You are a helpful AI assistant. You are a web developer."
120
+ if agent_name == "AI_SYSTEM_PROMPT":
121
+ agent = "You are a helpful AI assistant. You are an AI system."
122
+ if agent_name == "PYTHON_CODE_DEV":
123
+ agent = "You are a helpful AI assistant. You are a Python code developer."
124
+ system_prompt = agent
125
+ temperature = float(temperature)
126
+ if temperature < 1e-2:
127
+ temperature = 1e-2
128
+ top_p = float(top_p)
129
+
130
  generate_kwargs = dict(
131
  temperature=temperature,
132
+ max_new_tokens=max_new_tokens,
133
  top_p=top_p,
134
  repetition_penalty=repetition_penalty,
135
  do_sample=True,
136
  seed=seed,
137
  )
138
 
139
+ formatted_prompt = format_prompt(prompt, history, max_history_turns=5) # Truncated history
140
+ logging.info(f"Formatted Prompt: {formatted_prompt}")
141
+
142
+ messages = [{"role": "user", "content": formatted_prompt}]
 
 
 
143
 
144
+ stream = client.text_generation(messages, **generate_kwargs, stream=True, details=True, return_full_text=False)
145
+ output = ""
146
 
 
 
147
  for response in stream:
148
+ output += response.token.text
149
+ yield output
150
+ return output
151
 
 
 
 
152
 
153
+ additional_inputs=[
154
+ gr.Dropdown(
155
+ label="Agents",
156
+ choices=[s for s in agents],
157
+ value=agents[0],
158
+ interactive=True,
159
+ ),
160
+ gr.Textbox(
161
+ label="System Prompt",
162
+ max_lines=1,
163
+ interactive=True,
164
+ ),
165
+ gr.Slider(
166
+ label="Temperature",
167
+ value=0.9,
168
+ minimum=0.0,
169
+ maximum=1.0,
170
+ step=0.05,
171
+ interactive=True,
172
+ info="Higher values produce more diverse outputs",
173
+ ),
174
 
175
+ gr.Slider(
176
+ label="Max new tokens",
177
+ value=1048*10,
178
+ minimum=0,
179
+ maximum=1048*10,
180
+ step=64,
181
+ interactive=True,
182
+ info="The maximum numbers of new tokens",
183
+ ),
184
+ gr.Slider(
185
+ label="Top-p (nucleus sampling)",
186
+ value=0.90,
187
+ minimum=0.0,
188
+ maximum=1,
189
+ step=0.05,
190
+ interactive=True,
191
+ info="Higher values sample more low-probability tokens",
192
+ ),
193
+ gr.Slider(
194
+ label="Repetition penalty",
195
+ value=1.2,
196
+ minimum=1.0,
197
+ maximum=2.0,
198
+ step=0.05,
199
+ interactive=True,
200
+ info="Penalize repeated tokens",
201
+ ),
202
+
203
+
204
+ ]
205
+
206
+ examples = [
207
+ ["Help me set up TypeScript configurations and integrate ts-loader in my existing React project.",
208
+ "Update Webpack Configurations",
209
+ "Install Dependencies",
210
+ "Configure Ts-Loader",
211
+ "TypeChecking Rules Setup",
212
+ "React Specific Settings",
213
+ "Compilation Options",
214
+ "Test Runner Configuration"],
215
+
216
+ ["Guide me through building a serverless microservice using AWS Lambda and API Gateway, connecting to DynamoDB for storage.",
217
+ "Set Up AWS Account",
218
+ "Create Lambda Function",
219
+ "APIGateway Integration",
220
+ "Define DynamoDB Table Scheme",
221
+ "Connect Service To DB",
222
+ "Add Authentication Layers",
223
+ "Monitor Metrics and Set Alarms"],
224
+
225
+ ["Migrate our current monolithic PHP application towards containerized services using Docker and Kubernetes for scalability.",
226
+ "Architectural Restructuring Plan",
227
+ "Containerisation Process With Docker",
228
+ "Service Orchestration With Kubernetes",
229
+ "Load Balancing Strategies",
230
+ "Persistent Storage Solutions",
231
+ "Network Policies Enforcement",
232
+ "Continuous Integration / Continuous Delivery"],
233
+
234
+ ["Provide guidance on integrating WebAssembly modules compiled from C++ source files into an ongoing web project.",
235
+ "Toolchain Selection (Emscripten vs. LLVM)",
236
+ "Setting Up Compiler Environment",
237
+ ".cpp Source Preparation",
238
+ "Module Building Approach",
239
+ "Memory Management Considerations",
240
+ "Performance Tradeoffs",
241
+ "Seamless Web Assembly Embedding"]
242
+ ]
243
+
244
+ def parse_action(line):
245
+ action_name, action_input = line.strip("action: ").split("=")
246
+ action_input = action_input.strip()
247
+ return action_name, action_input
248
+
249
+ def get_file_tree(path):
250
+ """
251
+ Recursively explores a directory and returns a nested dictionary representing its file tree.
252
+ """
253
+ tree = {}
254
+ for item in os.listdir(path):
255
+ item_path = os.path.join(path, item)
256
+ if os.path.isdir(item_path):
257
+ tree[item] = get_file_tree(item_path)
258
  else:
259
+ tree[item] = None
260
+ return tree
261
+
262
+ def display_file_tree(tree, indent=0):
263
+ """
264
+ Prints a formatted representation of the file tree.
265
+ """
266
+ for name, subtree in tree.items():
267
+ print(f"{' ' * indent}{name}")
268
+ if subtree is not None:
269
+ display_file_tree(subtree, indent + 1)
270
+
271
+ def project_explorer(path):
272
+ """
273
+ Displays the file tree of a given path in a Streamlit app.
274
+ """
275
+ tree = get_file_tree(path)
276
+ display_file_tree(tree)
277
+
278
+ def chat_app_logic(message, history, purpose, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty):
279
+ # Your existing code here
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
 
281
+ try:
282
+ # Attempt to join the generator output
283
+ response = ''.join(generate(
284
+ model=model,
285
+ messages=messages,
286
+ stream=True,
287
+ temperature=0.7,
288
+ max_tokens=1500
289
+ ))
290
+ except TypeError:
291
+ # If joining fails, collect the output in a list
292
+ response_parts = []
293
+ for part in generate(
294
+ model=model,
295
+ messages=messages,
296
+ stream=True,
297
+ temperature=0.7,
298
+ max_tokens=1500
299
+ ):
300
+ if isinstance(part, str):
301
+ response_parts.append(part)
302
+ elif isinstance(part, dict) and 'content' in part:
303
+ response_parts.append(part['content']),
304
+
305
+ response = ''.join(response_parts,
306
+ # Run the model and get the response (convert generator to string)
307
+ prompt=message,
308
+ history=history,
309
+ agent_name=agent_name,
310
+ sys_prompt=sys_prompt,
311
+ temperature=temperature,
312
+ max_new_tokens=max_new_tokens,
313
+ top_p=top_p,
314
+ repetition_penalty=repetition_penalty,
315
+ )
316
+ history.append((message, response))
317
+ return history
318
+
319
+ return history
320
+
321
+ def main():
322
+ with gr.Blocks() as demo:
323
+ gr.Markdown("## FragMixt")
324
+ gr.Markdown("### Agents w/ Agents")
325
+
326
+ # Chat Interface
327
+ chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
328
+ #chatbot.load(examples)
329
+
330
+ # Input Components
331
+ message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
332
+ purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
333
+ agent_name = gr.Dropdown(label="Agents", choices=[s for s in agents], value=agents[0], interactive=True)
334
+ sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
335
+ temperature = gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
336
+ max_new_tokens = gr.Slider(label="Max new tokens", value=1048*10, minimum=0, maximum=1048*10, step=64, interactive=True, info="The maximum numbers of new tokens")
337
+ top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
338
+ repetition_penalty = gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
339
+
340
+ # Button to submit the message
341
+ submit_button = gr.Button(value="Send")
342
+
343
+ # Project Explorer Tab
344
+ with gr.Tab("Project Explorer"):
345
+ project_path = gr.Textbox(label="Project Path", placeholder="/home/user/app/current_project")
346
+ explore_button = gr.Button(value="Explore")
347
+ project_output = gr.Textbox(label="File Tree", lines=20)
348
+
349
+ # Chat App Logic Tab
350
+ with gr.Tab("Chat App"):
351
+ history = gr.State([])
352
+ for example in examples:
353
+ gr.Button(value=example[0]).click(lambda: chat_app_logic(example[0], history, purpose, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty), outputs=chatbot)
354
+
355
+ # Connect components to the chat app logic
356
+ submit_button.click(chat_app_logic, inputs=[message, history, purpose, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty], outputs=chatbot)
357
+ message.submit(chat_app_logic, inputs=[message, history, purpose, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty], outputs=chatbot)
358
+
359
+ # Connect components to the project explorer
360
+ explore_button.click(project_explorer, inputs=project_path, outputs=project_output)
361
+
362
+ demo.launch(show_api=True)
363
 
364
+ if __name__ == "__main__":
365
+ main()