acecalisto3 commited on
Commit
06d0c93
·
verified ·
1 Parent(s): 8f208cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +177 -60
app.py CHANGED
@@ -5,7 +5,6 @@ import time
5
  from typing import Dict, List, Tuple
6
  from datetime import datetime
7
  import logging
8
-
9
  import gradio as gr
10
  from huggingface_hub import InferenceClient
11
  from safe_search import safe_search
@@ -19,7 +18,6 @@ TEMPERATURE = 0.7
19
  TOP_P = 0.8
20
  REPETITION_PENALTY = 1.5
21
  MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1"
22
-
23
  API_KEY = os.getenv("HUGGINGFACE_API_KEY")
24
 
25
  # --- Logging Setup ---
@@ -44,62 +42,54 @@ PREFIX = """
44
  Purpose: {purpose}
45
  Safe Search: {safe_search}
46
  """
47
-
48
  LOG_PROMPT = """
49
  PROMPT: {content}
50
  """
51
-
52
  LOG_RESPONSE = """
53
  RESPONSE: {resp}
54
  """
55
-
56
  COMPRESS_HISTORY_PROMPT = """
57
  You are a helpful AI assistant. Your task is to compress the following history into a summary that is no longer than 512 tokens.
58
- History:
59
- {history}
60
  """
61
-
62
  ACTION_PROMPT = """
63
  You are a helpful AI assistant. You are working on the task: {task}
64
- Your current history is:
65
- {history}
66
  What is your next thought?
67
- thought:
68
  What is your next action?
69
- action:
70
  """
71
-
72
  TASK_PROMPT = """
73
- You are a helpful AI assistant. Your current history is:
74
- {history}
75
  What is the next task?
76
- task:
77
  """
78
-
79
  UNDERSTAND_TEST_RESULTS_PROMPT = """
80
- You are a helpful AI assistant. The test results are:
81
- {test_results}
82
  What do you want to know about the test results?
83
- thought:
84
  """
85
 
86
  # --- Functions ---
87
  def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 2) -> str:
 
88
  prompt = " "
89
  for user_prompt, bot_response in history[-max_history_turns:]:
90
  prompt += f"[INST] {user_prompt} [/INST] {bot_response} "
91
  prompt += f"[INST] {message} [/INST]"
92
  return prompt
93
 
 
94
  def run_llm(
95
  prompt_template: str,
96
  stop_tokens: List[str],
97
  purpose: str,
98
- **prompt_kwargs: Dict
99
  ) -> str:
 
100
  seed = random.randint(1, 1111111111111111)
101
  logging.info(f"Seed: {seed}")
102
-
103
  content = PREFIX.format(
104
  date_time_str=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
105
  purpose=purpose,
@@ -107,13 +97,20 @@ def run_llm(
107
  ) + prompt_template.format(**prompt_kwargs)
108
  if VERBOSE:
109
  logging.info(LOG_PROMPT.format(content=content))
110
-
111
  client = InferenceClient(model=MODEL_NAME, token=API_KEY)
112
- resp = client.text_generation(content, max_new_tokens=MAX_TOKENS, stop_sequences=stop_tokens, temperature=TEMPERATURE, top_p=TOP_P, repetition_penalty=REPETITION_PENALTY)
 
 
 
 
 
 
 
113
  if VERBOSE:
114
  logging.info(LOG_RESPONSE.format(resp=resp))
115
  return resp
116
 
 
117
  def generate(
118
  prompt: str,
119
  history: List[Tuple[str, str]],
@@ -124,6 +121,7 @@ def generate(
124
  top_p: float = TOP_P,
125
  repetition_penalty: float = REPETITION_PENALTY,
126
  ) -> str:
 
127
  content = PREFIX.format(
128
  date_time_str=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
129
  purpose=f"Generating response as {agent_name}",
@@ -131,121 +129,240 @@ def generate(
131
  ) + sys_prompt + "\n" + prompt
132
  if VERBOSE:
133
  logging.info(LOG_PROMPT.format(content=content))
134
-
135
  client = InferenceClient(model=MODEL_NAME, token=API_KEY)
136
- stream = client.text_generation(content, stream=True, details=True, return_full_text=False, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty, max_new_tokens=max_new_tokens)
137
- return "".join(chunk.text for chunk in stream)
 
 
 
 
 
 
 
 
 
 
 
138
 
139
  def main():
 
140
  with gr.Blocks() as demo:
141
  gr.Markdown("## FragMixt: The No-Code Development Powerhouse")
142
  gr.Markdown("### Your AI-Powered Development Companion")
143
-
144
  with gr.Row():
145
  with gr.Column(scale=3):
146
- chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
147
- message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
 
 
 
 
 
 
 
 
148
  submit_button = gr.Button(value="Send")
149
-
150
  with gr.Column(scale=1):
151
- purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
152
- agent_name = gr.Dropdown(label="Agents", choices=[s for s in agents], value=agents[0], interactive=True)
153
- sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
154
- temperature = gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
155
- max_new_tokens = gr.Slider(label="Max new tokens", value=MAX_TOKENS, minimum=0, maximum=1048*10, step=64, interactive=True, info="The maximum numbers of new tokens")
156
- top_p = gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
157
- repetition_penalty = gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
158
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  with gr.Tabs():
160
  with gr.TabItem("Project Explorer"):
161
- project_path = gr.Textbox(label="Project Path", placeholder="/home/user/app/current_project")
 
 
162
  explore_button = gr.Button(value="Explore")
163
  project_output = gr.Textbox(label="File Tree", lines=20)
164
-
165
  with gr.TabItem("Code Editor"):
166
  code_editor = gr.Code(label="Code Editor", language="python")
167
  run_code_button = gr.Button(value="Run Code")
168
  code_output = gr.Textbox(label="Code Output", lines=10)
169
-
170
  with gr.TabItem("File Management"):
171
- file_list = gr.Dropdown(label="Select File", choices=[], interactive=True)
 
 
172
  file_content = gr.Textbox(label="File Content", lines=20)
173
  save_file_button = gr.Button(value="Save File")
174
  create_file_button = gr.Button(value="Create New File")
175
  delete_file_button = gr.Button(value="Delete File")
176
-
177
  history = gr.State([])
178
 
179
- def chat(purpose: str, message: str, agent_name: str, sys_prompt: str, temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
 
 
 
 
 
 
 
 
 
 
 
180
  prompt = format_prompt(message, history)
181
- response = generate(prompt, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
 
 
 
 
 
 
 
 
 
182
  history.append((message, response))
183
  return history, history
184
 
185
- submit_button.click(chat, inputs=[purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history], outputs=[chatbot, history])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
 
187
  def explore_project(project_path: str) -> str:
 
188
  try:
189
  tree = subprocess.check_output(["tree", project_path]).decode("utf-8")
190
  return tree
191
  except Exception as e:
192
  return f"Error exploring project: {e}"
193
 
194
- explore_button.click(explore_project, inputs=[project_path], outputs=[project_output])
 
 
195
 
196
  def run_code(code: str) -> str:
 
197
  try:
198
  exec_globals = {}
199
  exec(code, exec_globals)
200
- output = exec_globals.get('__builtins__', {}).get('print', print)
201
  return str(output)
202
  except Exception as e:
203
  return f"Error running code: {e}"
204
 
205
- run_code_button.click(run_code, inputs=[code_editor], outputs=[code_output])
 
 
206
 
207
  def load_file_list(project_path: str) -> List[str]:
 
208
  try:
209
- return [f for f in os.listdir(project_path) if os.path.isfile(os.path.join(project_path, f))]
 
 
 
 
210
  except Exception as e:
211
  return [f"Error loading file list: {e}"]
212
 
213
  def load_file_content(project_path: str, file_name: str) -> str:
 
214
  try:
215
- with open(os.path.join(project_path, file_name), 'r') as file:
216
  return file.read()
217
  except Exception as e:
218
  return f"Error loading file content: {e}"
219
 
220
  def save_file(project_path: str, file_name: str, content: str) -> str:
 
221
  try:
222
- with open(os.path.join(project_path, file_name), 'w') as file:
223
  file.write(content)
224
  return f"File {file_name} saved successfully."
225
  except Exception as e:
226
  return f"Error saving file: {e}"
227
 
228
  def create_file(project_path: str, file_name: str) -> str:
 
229
  try:
230
- open(os.path.join(project_path, file_name), 'a').close()
231
  return f"File {file_name} created successfully."
232
  except Exception as e:
233
  return f"Error creating file: {e}"
234
 
235
  def delete_file(project_path: str, file_name: str) -> str:
 
236
  try:
237
  os.remove(os.path.join(project_path, file_name))
238
  return f"File {file_name} deleted successfully."
239
  except Exception as e:
240
  return f"Error deleting file: {e}"
241
 
242
- project_path.change(load_file_list, inputs=[project_path], outputs=[file_list])
243
- file_list.change(load_file_content, inputs=[project_path, file_list], outputs=[file_content])
244
- save_file_button.click(save_file, inputs=[project_path, file_list, file_content], outputs=[gr.Textbox()])
245
- create_file_button.click(create_file, inputs=[project_path, gr.Textbox(label="New File Name")], outputs=[gr.Textbox()])
246
- delete_file_button.click(delete_file, inputs=[project_path, file_list], outputs=[gr.Textbox()])
247
-
 
 
 
 
 
 
 
 
 
 
 
248
  demo.launch()
249
 
 
250
  if __name__ == "__main__":
251
  main()
 
5
  from typing import Dict, List, Tuple
6
  from datetime import datetime
7
  import logging
 
8
  import gradio as gr
9
  from huggingface_hub import InferenceClient
10
  from safe_search import safe_search
 
18
  TOP_P = 0.8
19
  REPETITION_PENALTY = 1.5
20
  MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1"
 
21
  API_KEY = os.getenv("HUGGINGFACE_API_KEY")
22
 
23
  # --- Logging Setup ---
 
42
  Purpose: {purpose}
43
  Safe Search: {safe_search}
44
  """
 
45
  LOG_PROMPT = """
46
  PROMPT: {content}
47
  """
 
48
  LOG_RESPONSE = """
49
  RESPONSE: {resp}
50
  """
 
51
  COMPRESS_HISTORY_PROMPT = """
52
  You are a helpful AI assistant. Your task is to compress the following history into a summary that is no longer than 512 tokens.
53
+ History: {history}
 
54
  """
 
55
  ACTION_PROMPT = """
56
  You are a helpful AI assistant. You are working on the task: {task}
57
+ Your current history is: {history}
 
58
  What is your next thought?
59
+ thought:
60
  What is your next action?
61
+ action:
62
  """
 
63
  TASK_PROMPT = """
64
+ You are a helpful AI assistant. Your current history is: {history}
 
65
  What is the next task?
66
+ task:
67
  """
 
68
  UNDERSTAND_TEST_RESULTS_PROMPT = """
69
+ You are a helpful AI assistant. The test results are: {test_results}
 
70
  What do you want to know about the test results?
71
+ thought:
72
  """
73
 
74
  # --- Functions ---
75
  def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 2) -> str:
76
+ """Formats the prompt for the LLM, including the message and recent history."""
77
  prompt = " "
78
  for user_prompt, bot_response in history[-max_history_turns:]:
79
  prompt += f"[INST] {user_prompt} [/INST] {bot_response} "
80
  prompt += f"[INST] {message} [/INST]"
81
  return prompt
82
 
83
+
84
  def run_llm(
85
  prompt_template: str,
86
  stop_tokens: List[str],
87
  purpose: str,
88
+ **prompt_kwargs: Dict,
89
  ) -> str:
90
+ """Runs the LLM with the given prompt template, stop tokens, and purpose."""
91
  seed = random.randint(1, 1111111111111111)
92
  logging.info(f"Seed: {seed}")
 
93
  content = PREFIX.format(
94
  date_time_str=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
95
  purpose=purpose,
 
97
  ) + prompt_template.format(**prompt_kwargs)
98
  if VERBOSE:
99
  logging.info(LOG_PROMPT.format(content=content))
 
100
  client = InferenceClient(model=MODEL_NAME, token=API_KEY)
101
+ resp = client.text_generation(
102
+ content,
103
+ max_new_tokens=MAX_TOKENS,
104
+ stop_sequences=stop_tokens,
105
+ temperature=TEMPERATURE,
106
+ top_p=TOP_P,
107
+ repetition_penalty=REPETITION_PENALTY,
108
+ )
109
  if VERBOSE:
110
  logging.info(LOG_RESPONSE.format(resp=resp))
111
  return resp
112
 
113
+
114
  def generate(
115
  prompt: str,
116
  history: List[Tuple[str, str]],
 
121
  top_p: float = TOP_P,
122
  repetition_penalty: float = REPETITION_PENALTY,
123
  ) -> str:
124
+ """Generates a response from the LLM based on the prompt, history, and other parameters."""
125
  content = PREFIX.format(
126
  date_time_str=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
127
  purpose=f"Generating response as {agent_name}",
 
129
  ) + sys_prompt + "\n" + prompt
130
  if VERBOSE:
131
  logging.info(LOG_PROMPT.format(content=content))
 
132
  client = InferenceClient(model=MODEL_NAME, token=API_KEY)
133
+ stream = client.text_generation(
134
+ content,
135
+ stream=True,
136
+ details=True,
137
+ return_full_text=False,
138
+ temperature=temperature,
139
+ top_p=top_p,
140
+ repetition_penalty=repetition_penalty,
141
+ max_new_tokens=max_new_tokens,
142
+ )
143
+ response = "".join(chunk.text for chunk in stream)
144
+ return response
145
+
146
 
147
  def main():
148
+ """Main function to launch the Gradio interface."""
149
  with gr.Blocks() as demo:
150
  gr.Markdown("## FragMixt: The No-Code Development Powerhouse")
151
  gr.Markdown("### Your AI-Powered Development Companion")
 
152
  with gr.Row():
153
  with gr.Column(scale=3):
154
+ chatbot = gr.Chatbot(
155
+ show_label=False,
156
+ show_share_button=False,
157
+ show_copy_button=True,
158
+ likeable=True,
159
+ layout="panel",
160
+ )
161
+ message = gr.Textbox(
162
+ label="Enter your message", placeholder="Ask me anything!"
163
+ )
164
  submit_button = gr.Button(value="Send")
 
165
  with gr.Column(scale=1):
166
+ purpose = gr.Textbox(
167
+ label="Purpose", placeholder="What is the purpose of this interaction?"
168
+ )
169
+ agent_name = gr.Dropdown(
170
+ label="Agents",
171
+ choices=[s for s in agents],
172
+ value=agents[0],
173
+ interactive=True,
174
+ )
175
+ sys_prompt = gr.Textbox(
176
+ label="System Prompt", max_lines=1, interactive=True
177
+ )
178
+ temperature = gr.Slider(
179
+ label="Temperature",
180
+ value=TEMPERATURE,
181
+ minimum=0.0,
182
+ maximum=1.0,
183
+ step=0.05,
184
+ interactive=True,
185
+ info="Higher values produce more diverse outputs",
186
+ )
187
+ max_new_tokens = gr.Slider(
188
+ label="Max new tokens",
189
+ value=MAX_TOKENS,
190
+ minimum=0,
191
+ maximum=1048 * 10,
192
+ step=64,
193
+ interactive=True,
194
+ info="The maximum numbers of new tokens",
195
+ )
196
+ top_p = gr.Slider(
197
+ label="Top-p (nucleus sampling)",
198
+ value=TOP_P,
199
+ minimum=0.0,
200
+ maximum=1,
201
+ step=0.05,
202
+ interactive=True,
203
+ info="Higher values sample more low-probability tokens",
204
+ )
205
+ repetition_penalty = gr.Slider(
206
+ label="Repetition penalty",
207
+ value=REPETITION_PENALTY,
208
+ minimum=1.0,
209
+ maximum=2.0,
210
+ step=0.05,
211
+ interactive=True,
212
+ info="Penalize repeated tokens",
213
+ )
214
  with gr.Tabs():
215
  with gr.TabItem("Project Explorer"):
216
+ project_path = gr.Textbox(
217
+ label="Project Path", placeholder="/home/user/app/current_project"
218
+ )
219
  explore_button = gr.Button(value="Explore")
220
  project_output = gr.Textbox(label="File Tree", lines=20)
 
221
  with gr.TabItem("Code Editor"):
222
  code_editor = gr.Code(label="Code Editor", language="python")
223
  run_code_button = gr.Button(value="Run Code")
224
  code_output = gr.Textbox(label="Code Output", lines=10)
 
225
  with gr.TabItem("File Management"):
226
+ file_list = gr.Dropdown(
227
+ label="Select File", choices=[], interactive=True
228
+ )
229
  file_content = gr.Textbox(label="File Content", lines=20)
230
  save_file_button = gr.Button(value="Save File")
231
  create_file_button = gr.Button(value="Create New File")
232
  delete_file_button = gr.Button(value="Delete File")
 
233
  history = gr.State([])
234
 
235
+ def chat(
236
+ purpose: str,
237
+ message: str,
238
+ agent_name: str,
239
+ sys_prompt: str,
240
+ temperature: float,
241
+ max_new_tokens: int,
242
+ top_p: float,
243
+ repetition_penalty: float,
244
+ history: List[Tuple[str, str]],
245
+ ) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
246
+ """Handles the chat interaction, generating responses and updating history."""
247
  prompt = format_prompt(message, history)
248
+ response = generate(
249
+ prompt,
250
+ history,
251
+ agent_name,
252
+ sys_prompt,
253
+ temperature,
254
+ max_new_tokens,
255
+ top_p,
256
+ repetition_penalty,
257
+ )
258
  history.append((message, response))
259
  return history, history
260
 
261
+ submit_button.click(
262
+ chat,
263
+ inputs=[
264
+ purpose,
265
+ message,
266
+ agent_name,
267
+ sys_prompt,
268
+ temperature,
269
+ max_new_tokens,
270
+ top_p,
271
+ repetition_penalty,
272
+ history,
273
+ ],
274
+ outputs=[chatbot, history],
275
+ )
276
 
277
  def explore_project(project_path: str) -> str:
278
+ """Explores the project directory and displays the file tree."""
279
  try:
280
  tree = subprocess.check_output(["tree", project_path]).decode("utf-8")
281
  return tree
282
  except Exception as e:
283
  return f"Error exploring project: {e}"
284
 
285
+ explore_button.click(
286
+ explore_project, inputs=[project_path], outputs=[project_output]
287
+ )
288
 
289
  def run_code(code: str) -> str:
290
+ """Executes the Python code in the code editor and returns the output."""
291
  try:
292
  exec_globals = {}
293
  exec(code, exec_globals)
294
+ output = exec_globals.get("__builtins__", {}).get("print", print)
295
  return str(output)
296
  except Exception as e:
297
  return f"Error running code: {e}"
298
 
299
+ run_code_button.click(
300
+ run_code, inputs=[code_editor], outputs=[code_output]
301
+ )
302
 
303
  def load_file_list(project_path: str) -> List[str]:
304
+ """Loads the list of files in the project directory."""
305
  try:
306
+ return [
307
+ f
308
+ for f in os.listdir(project_path)
309
+ if os.path.isfile(os.path.join(project_path, f))
310
+ ]
311
  except Exception as e:
312
  return [f"Error loading file list: {e}"]
313
 
314
  def load_file_content(project_path: str, file_name: str) -> str:
315
+ """Loads the content of the selected file."""
316
  try:
317
+ with open(os.path.join(project_path, file_name), "r") as file:
318
  return file.read()
319
  except Exception as e:
320
  return f"Error loading file content: {e}"
321
 
322
  def save_file(project_path: str, file_name: str, content: str) -> str:
323
+ """Saves the content to the selected file."""
324
  try:
325
+ with open(os.path.join(project_path, file_name), "w") as file:
326
  file.write(content)
327
  return f"File {file_name} saved successfully."
328
  except Exception as e:
329
  return f"Error saving file: {e}"
330
 
331
  def create_file(project_path: str, file_name: str) -> str:
332
+ """Creates a new file in the project directory."""
333
  try:
334
+ open(os.path.join(project_path, file_name), "a").close()
335
  return f"File {file_name} created successfully."
336
  except Exception as e:
337
  return f"Error creating file: {e}"
338
 
339
  def delete_file(project_path: str, file_name: str) -> str:
340
+ """Deletes the selected file from the project directory."""
341
  try:
342
  os.remove(os.path.join(project_path, file_name))
343
  return f"File {file_name} deleted successfully."
344
  except Exception as e:
345
  return f"Error deleting file: {e}"
346
 
347
+ project_path.change(
348
+ load_file_list, inputs=[project_path], outputs=[file_list]
349
+ )
350
+ file_list.change(
351
+ load_file_content, inputs=[project_path, file_list], outputs=[file_content]
352
+ )
353
+ save_file_button.click(
354
+ save_file, inputs=[project_path, file_list, file_content], outputs=[gr.Textbox()]
355
+ )
356
+ create_file_button.click(
357
+ create_file,
358
+ inputs=[project_path, gr.Textbox(label="New File Name")],
359
+ outputs=[gr.Textbox()],
360
+ )
361
+ delete_file_button.click(
362
+ delete_file, inputs=[project_path, file_list], outputs=[gr.Textbox()]
363
+ )
364
  demo.launch()
365
 
366
+
367
  if __name__ == "__main__":
368
  main()