acecalisto3 commited on
Commit
bd6c875
·
verified ·
1 Parent(s): 675c9d3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +304 -224
app.py CHANGED
@@ -1,19 +1,24 @@
1
  import os
2
  import subprocess
3
  import random
4
- from huggingface_hub import InferenceClient
 
 
 
5
  import gradio as gr
 
6
  from safe_search import safe_search
7
- from i_search import google
8
- from i_search import i_search as i_s
9
- from datetime import datetime
10
- import logging
11
- import json
12
 
13
  # --- Configuration ---
14
- MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1" # Model to use
15
- MAX_HISTORY_TURNS = 5 # Number of history turns to keep
16
  VERBOSE = True # Enable verbose logging
 
 
 
 
 
 
 
17
 
18
  # --- Logging Setup ---
19
  logging.basicConfig(
@@ -22,170 +27,247 @@ logging.basicConfig(
22
  format="%(asctime)s - %(levelname)s - %(message)s",
23
  )
24
 
25
- # --- Agent Definitions ---
26
- agents = {
27
- "WEB_DEV": {
28
- "description": "Specialized in web development tasks.",
29
- "system_prompt": "You are a helpful AI assistant specializing in web development. You can generate code, answer questions, and provide guidance on web technologies.",
30
- },
31
- "AI_SYSTEM_PROMPT": {
32
- "description": "Focuses on generating system prompts for AI agents.",
33
- "system_prompt": "You are a helpful AI assistant that generates effective system prompts for AI agents. Your prompts should be clear, concise, and provide specific instructions.",
34
- },
35
- "PYTHON_CODE_DEV": {
36
- "description": "Expert in Python code development.",
37
- "system_prompt": "You are a helpful AI assistant specializing in Python code development. You can generate Python code, debug code, and answer questions about Python.",
38
- },
39
- "DATA_SCIENCE": {
40
- "description": "Expert in data science tasks.",
41
- "system_prompt": "You are a helpful AI assistant specializing in data science. You can analyze data, build models, and provide insights.",
42
- },
43
- "GAME_DEV": {
44
- "description": "Expert in game development tasks.",
45
- "system_prompt": "You are a helpful AI assistant specializing in game development. You can generate game logic, design levels, and provide guidance on game engines.",
46
- },
47
- # Add more agents as needed
48
- }
49
-
50
- # --- Function to format prompt with history ---
51
- def format_prompt(message, history, agent_name, system_prompt):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  prompt = " "
53
- for user_prompt, bot_response in history[-MAX_HISTORY_TURNS:]:
 
54
  prompt += f"[INST] {user_prompt} [/ "
55
  prompt += f" {bot_response}"
56
  prompt += f"[INST] {message} [/ "
57
-
58
- # Add system prompt if provided
59
- if system_prompt:
60
- prompt = f"{system_prompt}\n\n{prompt}"
61
-
62
  return prompt
63
 
64
- # --- Function to run the LLM with specified parameters ---
65
  def run_llm(
66
- prompt,
67
- stop_sequences,
68
- max_tokens,
69
- temperature=0.7,
70
- top_p=0.8,
71
- repetition_penalty=1.5,
72
- ):
73
  seed = random.randint(1, 1111111111111111)
74
  logging.info(f"Seed: {seed}") # Log the seed
75
 
76
- client = InferenceClient(MODEL_NAME)
77
- resp = client.text_generation(
78
- prompt,
79
- max_new_tokens=max_tokens,
80
- stop_sequences=stop_sequences,
81
- temperature=temperature,
82
- top_p=top_p,
83
- repetition_penalty=repetition_penalty,
84
- )
85
  if VERBOSE:
86
- logging.info(f"Prompt: {prompt}")
87
- logging.info(f"Response: {resp}")
 
 
 
88
  return resp
89
 
90
- # --- Function to handle agent interactions ---
91
- def agent_interaction(
92
- purpose,
93
- message,
94
- agent_name,
95
- system_prompt,
96
- history,
97
- temperature,
98
- max_new_tokens,
99
- top_p,
100
- repetition_penalty,
101
- ):
102
- # Format the prompt with history
103
- prompt = format_prompt(message, history, agent_name, system_prompt)
104
-
105
- # Run the LLM
106
- response = run_llm(
107
- prompt,
108
- stop_sequences=["observation:", "task:", "action:", "thought:"],
109
- max_tokens=max_new_tokens,
110
- temperature=temperature,
111
- top_p=top_p,
112
- repetition_penalty=repetition_penalty,
113
- )
114
 
115
- # Update history
116
- history.append((message, response))
117
- return history, history
 
118
 
119
- # --- Function to parse actions from LLM response ---
120
- def parse_action(line):
121
- """Parse the action line to get the action name and input."""
122
- parts = line.split(":", 1)
123
- if len(parts) == 2:
124
- action_name = parts[0].replace("action", "").strip()
125
- action_input = parts[1].strip()
126
- else:
127
- action_name = parts[0].replace("action", "").strip()
128
- action_input = ""
129
- return action_name, action_input
130
 
131
- # --- Function to execute actions based on agent's response ---
132
- def execute_action(purpose, task, history, action_name, action_input):
133
- logging.info(f"Executing Action: {action_name} - {action_input}")
134
-
135
- if action_name == "SEARCH":
136
- try:
137
- if "http" in action_input:
138
- if "<" in action_input:
139
- action_input = action_input.strip("<")
140
- if ">" in action_input:
141
- action_input = action_input.strip(">")
142
- response = i_s(action_input)
143
- logging.info(f"Search Result: {response}")
144
- history += "observation: search result is: {}\n".format(response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
  else:
146
- history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
147
- except Exception as e:
148
- history += "observation: {}\n".format(e)
149
- return "MAIN", None, history, task
150
-
151
- elif action_name == "COMPLETE":
152
- task = "END"
153
- return "COMPLETE", "COMPLETE", history, task
154
-
155
- elif action_name == "GENERATE_CODE":
156
- # Simulate OpenAI API response for code generation (using Hugging Face model)
157
- # ... (Implement code generation logic using a suitable Hugging Face model)
158
- # Example:
159
- # code = generate_code_from_huggingface_model(action_input) # Replace with actual code generation function
160
- # history += f"observation: Here's the code: {code}\n"
161
- # return "MAIN", None, history, task
162
- pass # Placeholder for code generation logic
163
-
164
- elif action_name == "RUN_CODE":
165
- # Simulate OpenAI API response for code execution (using Hugging Face model)
166
- # ... (Implement code execution logic using a suitable Hugging Face model)
167
- # Example:
168
- # output = execute_code_from_huggingface_model(action_input) # Replace with actual code execution function
169
- # history += f"observation: Code output: {output}\n"
170
- # return "MAIN", None, history, task
171
- pass # Placeholder for code execution logic
 
 
 
 
 
172
 
173
- else:
174
- # Default action: "MAIN"
175
- return "MAIN", action_input, history, task
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
 
177
- # --- Function to handle the main loop of agent interaction ---
178
- def run_agent(purpose, history):
179
  task = None
180
  directory = "./"
181
  if history:
182
  history = str(history).strip("[]")
183
  if not history:
184
- history = ""
185
-
186
  action_name = "UPDATE-TASK" if task is None else "MAIN"
187
  action_input = None
188
-
189
  while True:
190
  logging.info(f"---")
191
  logging.info(f"Purpose: {purpose}")
@@ -194,67 +276,76 @@ def run_agent(purpose, history):
194
  logging.info(f"History: {history}")
195
  logging.info(f"---")
196
 
197
- # Get the agent's next action
198
- prompt = f"""
199
- You are a helpful AI assistant. You are working on the task: {task}
200
- Your current history is:
201
- {history}
202
- What is your next thought?
203
- thought:
204
- What is your next action?
205
- action:
206
- """
207
-
208
- response = run_llm(
209
- prompt,
210
- stop_sequences=["observation:", "task:", "action:", "thought:"],
211
- max_tokens=32000,
212
- )
213
-
214
- # Parse the action
215
- lines = response.strip().strip("\n").split("\n")
216
- for line in lines:
217
- if line.startswith("thought: "):
218
- history += "{}\n".format(line)
219
- logging.info(f"Thought: {line}")
220
- elif line.startswith("action: "):
221
- action_name, action_input = parse_action(line)
222
- logging.info(f"Action: {action_name} - {action_input}")
223
- history += "{}\n".format(line)
224
- break
225
-
226
- # Execute the action
227
- action_name, action_input, history, task = execute_action(
228
- purpose, task, history, action_name, action_input
229
  )
230
-
231
  yield (history)
232
  if task == "END":
233
  return (history)
234
 
235
- # --- Gradio Interface ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
  def main():
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  with gr.Blocks() as demo:
238
- gr.Markdown("## FragMixt - No-Code Development Powerhouse")
239
- gr.Markdown("### Your AI-Powered Development Companion")
240
 
241
  # Chat Interface
242
  chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
243
-
244
  # Input Components
245
  message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
246
  purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
247
- agent_name = gr.Dropdown(label="Agents", choices=list(agents.keys()), value=list(agents.keys())[0], interactive=True)
248
- system_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
249
- temperature = gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
250
- max_new_tokens = gr.Slider(label="Max new tokens", value=1048 * 10, minimum=0, maximum=1048 * 10, step=64, interactive=True, info="The maximum numbers of new tokens")
251
- top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
252
- repetition_penalty = gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
253
 
254
  # Button to submit the message
255
  submit_button = gr.Button(value="Send")
256
 
257
- # Project Explorer Tab (Placeholder)
258
  with gr.Tab("Project Explorer"):
259
  project_path = gr.Textbox(label="Project Path", placeholder="/home/user/app/current_project")
260
  explore_button = gr.Button(value="Explore")
@@ -266,43 +357,32 @@ def main():
266
  examples = [
267
  ["What is the purpose of this AI agent?", "I am designed to assist with no-code development tasks."],
268
  ["Can you help me generate a Python function to calculate the factorial of a number?", "Sure! Here is a Python function to calculate the factorial of a number:"],
 
 
 
269
  ]
270
 
271
- def chat(purpose, message, agent_name, system_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history):
272
- # Get the system prompt for the selected agent
273
- system_prompt = agents.get(agent_name, {}).get("system_prompt", "")
274
-
275
- # Run the agent interaction
276
- history, history_output = agent_interaction(
277
- purpose,
278
- message,
279
- agent_name,
280
- system_prompt,
281
- history,
282
- temperature,
283
- max_new_tokens,
284
- top_p,
285
- repetition_penalty,
286
- )
287
- return history, history_output
288
-
289
- submit_button.click(
290
- chat,
291
- inputs=[
292
- purpose,
293
- message,
294
- agent_name,
295
- system_prompt,
296
- temperature,
297
- max_new_tokens,
298
- top_p,
299
- repetition_penalty,
300
- history,
301
- ],
302
- outputs=[chatbot, history],
303
- )
304
 
305
  demo.launch()
306
 
307
  if __name__ == "__main__":
308
- main()
 
1
  import os
2
  import subprocess
3
  import random
4
+ import time
5
+ from typing import Dict, List, Tuple
6
+ from datetime import datetime
7
+
8
  import gradio as gr
9
+ from huggingface_hub import InferenceClient, cached_download
10
  from safe_search import safe_search
11
+ from i_search import google, i_search as i_s
 
 
 
 
12
 
13
  # --- Configuration ---
 
 
14
  VERBOSE = True # Enable verbose logging
15
+ MAX_HISTORY = 5 # Maximum history turns to keep
16
+ MAX_TOKENS = 2048 # Maximum tokens for LLM responses
17
+ TEMPERATURE = 0.7 # Temperature for LLM responses
18
+ TOP_P = 0.8 # Top-p (nucleus sampling) for LLM responses
19
+ REPETITION_PENALTY = 1.5 # Repetition penalty for LLM responses
20
+ MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1" # Name of the LLM model
21
+ API_KEY = "YOUR_API_KEY" # Replace with your actual Hugging Face API key
22
 
23
  # --- Logging Setup ---
24
  logging.basicConfig(
 
27
  format="%(asctime)s - %(levelname)s - %(message)s",
28
  )
29
 
30
+ # --- Agents ---
31
+ agents = [
32
+ "WEB_DEV",
33
+ "AI_SYSTEM_PROMPT",
34
+ "PYTHON_CODE_DEV",
35
+ "DATA_SCIENCE",
36
+ "UI_UX_DESIGN",
37
+ ]
38
+
39
+ # --- Prompts ---
40
+ PREFIX = """
41
+ {date_time_str}
42
+ Purpose: {purpose}
43
+ Safe Search: {safe_search}
44
+ """
45
+
46
+ LOG_PROMPT = """
47
+ PROMPT: {content}
48
+ """
49
+
50
+ LOG_RESPONSE = """
51
+ RESPONSE: {resp}
52
+ """
53
+
54
+ COMPRESS_HISTORY_PROMPT = """
55
+ You are a helpful AI assistant. Your task is to compress the following history into a summary that is no longer than 512 tokens.
56
+ History:
57
+ {history}
58
+ """
59
+
60
+ ACTION_PROMPT = """
61
+ You are a helpful AI assistant. You are working on the task: {task}
62
+ Your current history is:
63
+ {history}
64
+ What is your next thought?
65
+ thought:
66
+ What is your next action?
67
+ action:
68
+ """
69
+
70
+ TASK_PROMPT = """
71
+ You are a helpful AI assistant. Your current history is:
72
+ {history}
73
+ What is the next task?
74
+ task:
75
+ """
76
+
77
+ UNDERSTAND_TEST_RESULTS_PROMPT = """
78
+ You are a helpful AI assistant. The test results are:
79
+ {test_results}
80
+ What do you want to know about the test results?
81
+ thought:
82
+ """
83
+
84
+ # --- Functions ---
85
+ def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 2) -> str:
86
+ """Formats the prompt for the LLM, including the message and relevant history."""
87
  prompt = " "
88
+ # Keep only the last 'max_history_turns' turns
89
+ for user_prompt, bot_response in history[-max_history_turns:]:
90
  prompt += f"[INST] {user_prompt} [/ "
91
  prompt += f" {bot_response}"
92
  prompt += f"[INST] {message} [/ "
 
 
 
 
 
93
  return prompt
94
 
 
95
  def run_llm(
96
+ prompt_template: str,
97
+ stop_tokens: List[str],
98
+ purpose: str,
99
+ **prompt_kwargs: Dict
100
+ ) -> str:
101
+ """Runs the LLM with the given prompt and parameters."""
 
102
  seed = random.randint(1, 1111111111111111)
103
  logging.info(f"Seed: {seed}") # Log the seed
104
 
105
+ content = PREFIX.format(
106
+ date_time_str=date_time_str,
107
+ purpose=purpose,
108
+ safe_search=safe_search,
109
+ ) + prompt_template.format(**prompt_kwargs)
 
 
 
 
110
  if VERBOSE:
111
+ logging.info(LOG_PROMPT.format(content)) # Log the prompt
112
+
113
+ resp = client.text_generation(content, max_new_tokens=MAX_TOKENS, stop_sequences=stop_tokens, temperature=TEMPERATURE, top_p=TOP_P, repetition_penalty=REPETITION_PENALTY)
114
+ if VERBOSE:
115
+ logging.info(LOG_RESPONSE.format(resp)) # Log the response
116
  return resp
117
 
118
+ def generate(
119
+ prompt: str,
120
+ history: List[Tuple[str, str]],
121
+ agent_name: str = agents[0],
122
+ sys_prompt: str = "",
123
+ temperature: float = TEMPERATURE,
124
+ max_new_tokens: int = MAX_TOKENS,
125
+ top_p: float = TOP_P,
126
+ repetition_penalty: float = REPETITION_PENALTY,
127
+ ) -> str:
128
+ """Generates text using the LLM."""
129
+ content = PREFIX.format(
130
+ date_time_str=date_time_str,
131
+ purpose=purpose,
132
+ safe_search=safe_search,
133
+ ) + prompt_template.format(**prompt_kwargs)
134
+ if VERBOSE:
135
+ logging.info(LOG_PROMPT.format(content)) # Log the prompt
 
 
 
 
 
 
136
 
137
+ stream = client.text_generation(content, stream=True, details=True, return_full_text=False, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty, max_new_tokens=max_new_tokens)
138
+ resp = ""
139
+ for response in stream:
140
+ resp += response.token.text
141
 
142
+ if VERBOSE:
143
+ logging.info(LOG_RESPONSE.format(resp)) # Log the response
144
+ return resp
 
 
 
 
 
 
 
 
145
 
146
+ def compress_history(purpose: str, task: str, history: List[Tuple[str, str]], directory: str) -> str:
147
+ """Compresses the history into a shorter summary."""
148
+ resp = run_llm(
149
+ COMPRESS_HISTORY_PROMPT,
150
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
151
+ purpose=purpose,
152
+ task=task,
153
+ history="\n".join(f"[INST] {user_prompt} [/] {bot_response}" for user_prompt, bot_response in history),
154
+ )
155
+ history = "observation: {}\n".format(resp)
156
+ return history
157
+
158
+ def call_search(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
159
+ """Performs a search based on the action input."""
160
+ logging.info(f"CALLING SEARCH: {action_input}")
161
+ try:
162
+ if "http" in action_input:
163
+ if "<" in action_input:
164
+ action_input = action_input.strip("<")
165
+ if ">" in action_input:
166
+ action_input = action_input.strip(">")
167
+
168
+ response = i_s(action_input)
169
+ logging.info(f"Search Result: {response}")
170
+ history.append(("observation: search result is: {}".format(response), ""))
171
+ else:
172
+ history.append(("observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n", ""))
173
+ except Exception as e:
174
+ history.append(("observation: {}\n".format(e), ""))
175
+ return "MAIN", None, history, task
176
+
177
+ def call_main(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
178
+ """Handles the main agent interaction loop."""
179
+ logging.info(f"CALLING MAIN: {action_input}")
180
+ resp = run_llm(
181
+ ACTION_PROMPT,
182
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
183
+ purpose=purpose,
184
+ task=task,
185
+ history="\n".join(f"[INST] {user_prompt} [/] {bot_response}" for user_prompt, bot_response in history),
186
+ )
187
+ lines = resp.strip().strip("\n").split("\n")
188
+ for line in lines:
189
+ if line == "":
190
+ continue
191
+ if line.startswith("thought: "):
192
+ history.append((line, ""))
193
+ logging.info(f"Thought: {line}")
194
+ elif line.startswith("action: "):
195
+ action_name, action_input = parse_action(line)
196
+ logging.info(f"Action: {action_name} - {action_input}")
197
+ history.append((line, ""))
198
+ if "COMPLETE" in action_name or "COMPLETE" in action_input:
199
+ task = "END"
200
+ return action_name, action_input, history, task
201
  else:
202
+ return action_name, action_input, history, task
203
+ else:
204
+ history.append((line, ""))
205
+ logging.info(f"Other Output: {line}")
206
+ return "MAIN", None, history, task
207
+
208
+ def call_set_task(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
209
+ """Sets a new task for the agent."""
210
+ logging.info(f"CALLING SET_TASK: {action_input}")
211
+ task = run_llm(
212
+ TASK_PROMPT,
213
+ stop_tokens=[],
214
+ purpose=purpose,
215
+ task=task,
216
+ history="\n".join(f"[INST] {user_prompt} [/] {bot_response}" for user_prompt, bot_response in history),
217
+ ).strip("\n")
218
+ history.append(("observation: task has been updated to: {}".format(task), ""))
219
+ return "MAIN", None, history, task
220
+
221
+ def end_fn(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
222
+ """Ends the agent interaction."""
223
+ logging.info(f"CALLING END_FN: {action_input}")
224
+ task = "END"
225
+ return "COMPLETE", "COMPLETE", history, task
226
+
227
+ NAME_TO_FUNC: Dict[str, callable] = {
228
+ "MAIN": call_main,
229
+ "UPDATE-TASK": call_set_task,
230
+ "SEARCH": call_search,
231
+ "COMPLETE": end_fn,
232
+ }
233
 
234
+ def run_action(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_name: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
235
+ """Executes the specified action."""
236
+ logging.info(f"RUNNING ACTION: {action_name} - {action_input}")
237
+ try:
238
+ if "RESPONSE" in action_name or "COMPLETE" in action_name:
239
+ action_name = "COMPLETE"
240
+ task = "END"
241
+ return action_name, "COMPLETE", history, task
242
+
243
+ # compress the history when it is long
244
+ if len(history) > MAX_HISTORY:
245
+ logging.info("COMPRESSING HISTORY")
246
+ history = compress_history(purpose, task, history, directory)
247
+ if not action_name in NAME_TO_FUNC:
248
+ action_name = "MAIN"
249
+ if action_name == "" or action_name is None:
250
+ action_name = "MAIN"
251
+ assert action_name in NAME_TO_FUNC
252
+
253
+ logging.info(f"RUN: {action_name} - {action_input}")
254
+ return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
255
+ except Exception as e:
256
+ history.append(("observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n", ""))
257
+ logging.error(f"Error in run_action: {e}")
258
+ return "MAIN", None, history, task
259
 
260
+ def run(purpose: str, history: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
261
+ """Main agent interaction loop."""
262
  task = None
263
  directory = "./"
264
  if history:
265
  history = str(history).strip("[]")
266
  if not history:
267
+ history = []
268
+
269
  action_name = "UPDATE-TASK" if task is None else "MAIN"
270
  action_input = None
 
271
  while True:
272
  logging.info(f"---")
273
  logging.info(f"Purpose: {purpose}")
 
276
  logging.info(f"History: {history}")
277
  logging.info(f"---")
278
 
279
+ action_name, action_input, history, task = run_action(
280
+ purpose,
281
+ task,
282
+ history,
283
+ directory,
284
+ action_name,
285
+ action_input,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286
  )
 
287
  yield (history)
288
  if task == "END":
289
  return (history)
290
 
291
+ ################################################
292
+
293
+ def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 5) -> str:
294
+ """Formats the prompt for the LLM, including the message and relevant history."""
295
+ prompt = " "
296
+ # Keep only the last 'max_history_turns' turns
297
+ for user_prompt, bot_response in history[-max_history_turns:]:
298
+ prompt += f"[INST] {user_prompt} [/ "
299
+ prompt += f" {bot_response}"
300
+ prompt += f"[INST] {message} [/ "
301
+ return prompt
302
+
303
+ def parse_action(line: str) -> Tuple[str, str]:
304
+ """Parses the action line to get the action name and input."""
305
+ parts = line.split(":", 1)
306
+ if len(parts) == 2:
307
+ action_name = parts[0].replace("action", "").strip()
308
+ action_input = parts[1].strip()
309
+ else:
310
+ action_name = parts[0].replace("action", "").strip()
311
+ action_input = ""
312
+ return action_name, action_input
313
+
314
  def main():
315
+ """Main function to run the Gradio interface."""
316
+ global client
317
+ # Initialize the LLM client with your API key
318
+ try:
319
+ client = InferenceClient(
320
+ MODEL_NAME,
321
+ token=API_KEY # Replace with your actual API key
322
+ )
323
+ except Exception as e:
324
+ logging.error(f"Error initializing LLM client: {e}")
325
+ print("Error initializing LLM client. Please check your API key.")
326
+ return
327
+
328
  with gr.Blocks() as demo:
329
+ gr.Markdown("## FragMixt: The No-Code Development Powerhouse")
330
+ gr.Markdown("### Your AI-Powered Development Companion")
331
 
332
  # Chat Interface
333
  chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
334
+
335
  # Input Components
336
  message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
337
  purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
338
+ agent_name = gr.Dropdown(label="Agents", choices=[s for s in agents], value=agents[0], interactive=True)
339
+ sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
340
+ temperature = gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
341
+ max_new_tokens = gr.Slider(label="Max new tokens", value=MAX_TOKENS, minimum=0, maximum=1048*10, step=64, interactive=True, info="The maximum numbers of new tokens")
342
+ top_p = gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
343
+ repetition_penalty = gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
344
 
345
  # Button to submit the message
346
  submit_button = gr.Button(value="Send")
347
 
348
+ # Project Explorer Tab
349
  with gr.Tab("Project Explorer"):
350
  project_path = gr.Textbox(label="Project Path", placeholder="/home/user/app/current_project")
351
  explore_button = gr.Button(value="Explore")
 
357
  examples = [
358
  ["What is the purpose of this AI agent?", "I am designed to assist with no-code development tasks."],
359
  ["Can you help me generate a Python function to calculate the factorial of a number?", "Sure! Here is a Python function to calculate the factorial of a number:"],
360
+ ["Generate a simple HTML page with a heading and a paragraph.", "```html\n<!DOCTYPE html>\n<html>\n<head>\n<title>My Simple Page</title>\n</head>\n<body>\n<h1>Welcome to my page!</h1>\n<p>This is a simple paragraph.</p>\n</body>\n</html>\n```"],
361
+ ["Create a basic SQL query to select all data from a table named 'users'.", "```sql\nSELECT * FROM users;\n```"],
362
+ ["Design a user interface for a mobile app that allows users to track their daily expenses.", "Here's a basic UI design for a mobile expense tracker app:\n\n**Screen 1: Home**\n- Top: App Name and Balance Display\n- Middle: List of Recent Transactions (Date, Description, Amount)\n- Bottom: Buttons for Add Expense, Add Income, View Categories\n\n**Screen 2: Add Expense**\n- Input fields for Date, Category, Description, Amount\n- Buttons for Save, Cancel\n\n**Screen 3: Expense Categories**\n- List of expense categories (e.g., Food, Transportation, Entertainment)\n- Option to add/edit categories\n\n**Screen 4: Reports**\n- Charts and graphs to visualize spending by category, date range, etc.\n- Filters to customize the reports"],
363
  ]
364
 
365
+ def chat(purpose: str, message: str, agent_name: str, sys_prompt: str, temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
366
+ """Handles the chat interaction."""
367
+ prompt = format_prompt(message, history)
368
+ response = generate(prompt, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
369
+ history.append((message, response))
370
+ return history, history
371
+
372
+ submit_button.click(chat, inputs=[purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history], outputs=[chatbot, history])
373
+
374
+ # Project Explorer Logic
375
+ def explore_project(project_path: str) -> str:
376
+ """Explores the project directory and returns a file tree."""
377
+ try:
378
+ tree = subprocess.check_output(["tree", project_path]).decode("utf-8")
379
+ return tree
380
+ except Exception as e:
381
+ return f"Error exploring project: {e}"
382
+
383
+ explore_button.click(explore_project, inputs=[project_path], outputs=[project_output])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
 
385
  demo.launch()
386
 
387
  if __name__ == "__main__":
388
+ main()