acecalisto3 commited on
Commit
7259a4c
·
verified ·
1 Parent(s): d63b979

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +468 -480
app.py CHANGED
@@ -1,486 +1,474 @@
1
  import os
 
2
  import subprocess
3
- import random
4
- from huggingface_hub import InferenceClient
5
- import gradio as gr
6
- from safe_search import safe_search
7
- from i_search import google
8
- from i_search import i_search as i_s
9
- from agent import (
10
- ACTION_PROMPT,
11
- ADD_PROMPT,
12
- COMPRESS_HISTORY_PROMPT,
13
- LOG_PROMPT,
14
- LOG_RESPONSE,
15
- MODIFY_PROMPT,
16
- PREFIX,
17
- SEARCH_QUERY,
18
- READ_PROMPT,
19
- TASK_PROMPT,
20
- UNDERSTAND_TEST_RESULTS_PROMPT,
21
- )
22
- from utils import parse_action, parse_file_content, read_python_module_structure
23
- from datetime import datetime
24
- now = datetime.now()
25
- date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
26
-
27
- client = InferenceClient(
28
- "mistralai/Mixtral-8x7B-Instruct-v0.1"
29
- )
30
-
31
- ############################################
32
-
33
-
34
- VERBOSE = True
35
- MAX_HISTORY = 100
36
- #MODEL = "gpt-3.5-turbo" # "gpt-4"
37
-
38
-
39
- def format_prompt(message, history):
40
- prompt = "<s>"
41
- for user_prompt, bot_response in history:
42
- prompt += f"[INST] {user_prompt} [/INST]"
43
- prompt += f" {bot_response}</s> "
44
- prompt += f"[INST] {message} [/INST]"
45
- return prompt
46
-
47
-
48
-
49
- def run_gpt(
50
- prompt_template,
51
- stop_tokens,
52
- max_tokens,
53
- module_summary,
54
- purpose,
55
- **prompt_kwargs,
56
- ):
57
- seed = random.randint(1,1111111111111111)
58
-
59
- generate_kwargs = dict(
60
- temperature=0.9,
61
- max_new_tokens=1048,
62
- top_p=0.95,
63
- repetition_penalty=1.0,
64
- do_sample=True,
65
- seed=seed,
66
- )
67
-
68
-
69
- content = PREFIX.format(
70
- date_time_str=date_time_str,
71
- purpose=purpose,
72
- safe_search=safe_search,
73
- ) + prompt_template.format(**prompt_kwargs)
74
- if VERBOSE:
75
- print(LOG_PROMPT.format(content))
76
-
77
-
78
- #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
79
- #formatted_prompt = format_prompt(f'{content}', history)
80
-
81
- stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
82
- resp = ""
83
- for response in stream:
84
- resp += response.token.text
85
-
86
- if VERBOSE:
87
- print(LOG_RESPONSE.format(resp))
88
- return resp
89
-
90
-
91
- def compress_history(purpose, task, history, directory):
92
- module_summary, _, _ = read_python_module_structure(directory)
93
- resp = run_gpt(
94
- COMPRESS_HISTORY_PROMPT,
95
- stop_tokens=["observation:", "task:", "action:", "thought:"],
96
- max_tokens=512,
97
- module_summary=module_summary,
98
- purpose=purpose,
99
- task=task,
100
- history=history,
101
- )
102
- history = "observation: {}\n".format(resp)
103
- return history
104
-
105
- def call_search(purpose, task, history, directory, action_input):
106
- print("CALLING SEARCH")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  try:
108
- if "http" in action_input:
109
- if "<" in action_input:
110
- action_input = action_input.strip("<")
111
- if ">" in action_input:
112
- action_input = action_input.strip(">")
113
- response = i_s(action_input)
114
- #response = google(search_return)
115
- print(response)
116
- history += "observation: search result is: {}\n".format(response)
117
- else:
118
- history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
119
  except Exception as e:
120
- history += "observation: {}'\n".format(e)
121
- return "MAIN", None, history, task
122
-
123
- def call_main(purpose, task, history, directory, action_input):
124
- module_summary, _, _ = read_python_module_structure(directory)
125
- resp = run_gpt(
126
- ACTION_PROMPT,
127
- stop_tokens=["observation:", "task:"],
128
- max_tokens=256,
129
- module_summary=module_summary,
130
- purpose=purpose,
131
- task=task,
132
- history=history,
133
- )
134
- lines = resp.strip().strip("\n").split("\n")
135
- for line in lines:
136
- if line == "":
137
- continue
138
- if line.startswith("thought: "):
139
- history += "{}\n".format(line)
140
- elif line.startswith("action: "):
141
-
142
- action_name, action_input = parse_action(line)
143
- print (f'ACTION_NAME :: {action_name}')
144
- print (f'ACTION_INPUT :: {action_input}')
145
-
146
- history += "{}\n".format(line)
147
- if "COMPLETE" in action_name or "COMPLETE" in action_input:
148
- task = "END"
149
- return action_name, action_input, history, task
150
- else:
151
- return action_name, action_input, history, task
152
- else:
153
- history += "{}\n".format(line)
154
- #history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
155
-
156
- #return action_name, action_input, history, task
157
- #assert False, "unknown action: {}".format(line)
158
- return "MAIN", None, history, task
159
-
160
-
161
- def call_test(purpose, task, history, directory, action_input):
162
- result = subprocess.run(
163
- ["python", "-m", "pytest", "--collect-only", directory],
164
- capture_output=True,
165
- text=True,
166
- )
167
- if result.returncode != 0:
168
- history += "observation: there are no tests! Test should be written in a test folder under {}\n".format(
169
- directory
170
- )
171
- return "MAIN", None, history, task
172
- result = subprocess.run(
173
- ["python", "-m", "pytest", directory], capture_output=True, text=True
174
- )
175
- if result.returncode == 0:
176
- history += "observation: tests pass\n"
177
- return "MAIN", None, history, task
178
- module_summary, content, _ = read_python_module_structure(directory)
179
- resp = run_gpt(
180
- UNDERSTAND_TEST_RESULTS_PROMPT,
181
- stop_tokens=[],
182
- max_tokens=256,
183
- module_summary=module_summary,
184
- purpose=purpose,
185
- task=task,
186
- history=history,
187
- stdout=result.stdout[:5000], # limit amount of text
188
- stderr=result.stderr[:5000], # limit amount of text
189
- )
190
- history += "observation: tests failed: {}\n".format(resp)
191
- return "MAIN", None, history, task
192
-
193
-
194
- def call_set_task(purpose, task, history, directory, action_input):
195
- module_summary, content, _ = read_python_module_structure(directory)
196
- task = run_gpt(
197
- TASK_PROMPT,
198
- stop_tokens=[],
199
- max_tokens=64,
200
- module_summary=module_summary,
201
- purpose=purpose,
202
- task=task,
203
- history=history,
204
- ).strip("\n")
205
- history += "observation: task has been updated to: {}\n".format(task)
206
- return "MAIN", None, history, task
207
-
208
-
209
- def call_read(purpose, task, history, directory, action_input):
210
- if not os.path.exists(action_input):
211
- history += "observation: file does not exist\n"
212
- return "MAIN", None, history, task
213
- module_summary, content, _ = read_python_module_structure(directory)
214
- f_content = (
215
- content[action_input] if content[action_input] else "< document is empty >"
216
- )
217
- resp = run_gpt(
218
- READ_PROMPT,
219
- stop_tokens=[],
220
- max_tokens=256,
221
- module_summary=module_summary,
222
- purpose=purpose,
223
- task=task,
224
- history=history,
225
- file_path=action_input,
226
- file_contents=f_content,
227
- ).strip("\n")
228
- history += "observation: {}\n".format(resp)
229
- return "MAIN", None, history, task
230
-
231
-
232
- def call_modify(purpose, task, history, directory, action_input):
233
- if not os.path.exists(action_input):
234
- history += "observation: file does not exist\n"
235
- return "MAIN", None, history, task
236
- (
237
- module_summary,
238
- content,
239
- _,
240
- ) = read_python_module_structure(directory)
241
- f_content = (
242
- content[action_input] if content[action_input] else "< document is empty >"
243
- )
244
- resp = run_gpt(
245
- MODIFY_PROMPT,
246
- stop_tokens=["action:", "thought:", "observation:"],
247
- max_tokens=2048,
248
- module_summary=module_summary,
249
- purpose=purpose,
250
- task=task,
251
- history=history,
252
- file_path=action_input,
253
- file_contents=f_content,
254
- )
255
- new_contents, description = parse_file_content(resp)
256
- if new_contents is None:
257
- history += "observation: failed to modify file\n"
258
- return "MAIN", None, history, task
259
-
260
- with open(action_input, "w") as f:
261
- f.write(new_contents)
262
-
263
- history += "observation: file successfully modified\n"
264
- history += "observation: {}\n".format(description)
265
- return "MAIN", None, history, task
266
-
267
-
268
- def call_add(purpose, task, history, directory, action_input):
269
- d = os.path.dirname(action_input)
270
- if not d.startswith(directory):
271
- history += "observation: files must be under directory {}\n".format(directory)
272
- elif not action_input.endswith(".py"):
273
- history += "observation: can only write .py files\n"
274
  else:
275
- if d and not os.path.exists(d):
276
- os.makedirs(d)
277
- if not os.path.exists(action_input):
278
- module_summary, _, _ = read_python_module_structure(directory)
279
- resp = run_gpt(
280
- ADD_PROMPT,
281
- stop_tokens=["action:", "thought:", "observation:"],
282
- max_tokens=2048,
283
- module_summary=module_summary,
284
- purpose=purpose,
285
- task=task,
286
- history=history,
287
- file_path=action_input,
288
- )
289
- new_contents, description = parse_file_content(resp)
290
- if new_contents is None:
291
- history += "observation: failed to write file\n"
292
- return "MAIN", None, history, task
293
-
294
- with open(action_input, "w") as f:
295
- f.write(new_contents)
296
-
297
- history += "observation: file successfully written\n"
298
- history += "obsertation: {}\n".format(description)
299
- else:
300
- history += "observation: file already exists\n"
301
- return "MAIN", None, history, task
302
- def end_fn(purpose, task, history, directory, action_input):
303
- task = "END"
304
- return "COMPLETE", None, history, task
305
- NAME_TO_FUNC = {
306
- "MAIN": call_main,
307
- "UPDATE-TASK": call_set_task,
308
- "SEARCH": call_search,
309
- "COMPLETE": end_fn,
310
-
311
- }
312
-
313
-
314
- def run_action(purpose, task, history, directory, action_name, action_input):
315
- if "RESPONSE" in action_name:
316
- task="END"
317
- return action_name, action_input, history, task
318
-
319
- # compress the history when it is long
320
- if len(history.split("\n")) > MAX_HISTORY:
321
- if VERBOSE:
322
- print("COMPRESSING HISTORY")
323
- history = compress_history(purpose, task, history, directory)
324
-
325
- assert action_name in NAME_TO_FUNC
326
-
327
- print("RUN: ", action_name, action_input)
328
- return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
329
-
330
-
331
- def run(purpose,hist):
332
-
333
- print(purpose)
334
- print(hist)
335
- task=None
336
- directory="./"
337
- history = ""
338
- action_name = "UPDATE-TASK" if task is None else "MAIN"
339
- action_input = None
340
- while True:
341
- print("")
342
- print("")
343
- print("---")
344
- print("purpose:", purpose)
345
- print("task:", task)
346
- print("---")
347
- print(history)
348
- print("---")
349
-
350
- action_name, action_input, history, task = run_action(
351
- purpose,
352
- task,
353
- history,
354
- directory,
355
- action_name,
356
- action_input,
357
- )
358
- if task == "END":
359
- return history
360
-
361
-
362
-
363
- ################################################
364
-
365
- def format_prompt(message, history):
366
- prompt = "<s>"
367
- for user_prompt, bot_response in history:
368
- prompt += f"[INST] {user_prompt} [/INST]"
369
- prompt += f" {bot_response}</s> "
370
- prompt += f"[INST] {message} [/INST]"
371
- return prompt
372
- agents =[
373
- "WEB_DEV",
374
- "AI_SYSTEM_PROMPT",
375
- "PYTHON_CODE_DEV"
376
- ]
377
- def generate(
378
- prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
379
- ):
380
- seed = random.randint(1,1111111111111111)
381
-
382
- agent=prompts.WEB_DEV
383
- if agent_name == "WEB_DEV":
384
- agent = prompts.WEB_DEV
385
- if agent_name == "AI_SYSTEM_PROMPT":
386
- agent = prompts.AI_SYSTEM_PROMPT
387
- if agent_name == "PYTHON_CODE_DEV":
388
- agent = prompts.PYTHON_CODE_DEV
389
- system_prompt=agent
390
- temperature = float(temperature)
391
- if temperature < 1e-2:
392
- temperature = 1e-2
393
- top_p = float(top_p)
394
-
395
- generate_kwargs = dict(
396
- temperature=temperature,
397
- max_new_tokens=max_new_tokens,
398
- top_p=top_p,
399
- repetition_penalty=repetition_penalty,
400
- do_sample=True,
401
- seed=seed,
402
- )
403
-
404
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
405
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
406
- output = ""
407
-
408
- for response in stream:
409
- output += response.token.text
410
- yield output
411
- return output
412
-
413
-
414
- additional_inputs=[
415
- gr.Dropdown(
416
- label="Agents",
417
- choices=[s for s in agents],
418
- value=agents[0],
419
- interactive=True,
420
- ),
421
- gr.Textbox(
422
- label="System Prompt",
423
- max_lines=1,
424
- interactive=True,
425
- ),
426
- gr.Slider(
427
- label="Temperature",
428
- value=0.9,
429
- minimum=0.0,
430
- maximum=1.0,
431
- step=0.05,
432
- interactive=True,
433
- info="Higher values produce more diverse outputs",
434
- ),
435
-
436
- gr.Slider(
437
- label="Max new tokens",
438
- value=1048*10,
439
- minimum=0,
440
- maximum=1048*10,
441
- step=64,
442
- interactive=True,
443
- info="The maximum numbers of new tokens",
444
- ),
445
- gr.Slider(
446
- label="Top-p (nucleus sampling)",
447
- value=0.90,
448
- minimum=0.0,
449
- maximum=1,
450
- step=0.05,
451
- interactive=True,
452
- info="Higher values sample more low-probability tokens",
453
- ),
454
- gr.Slider(
455
- label="Repetition penalty",
456
- value=1.2,
457
- minimum=1.0,
458
- maximum=2.0,
459
- step=0.05,
460
- interactive=True,
461
- info="Penalize repeated tokens",
462
- ),
463
-
464
-
465
- ]
466
-
467
- examples=[["What are the biggest news stories today?", None, None, None, None, None, ],
468
- ["When is the next full moon?", None, None, None, None, None, ],
469
- ["I'm planning a vacation to Japan. Can you suggest a one-week itinerary including must-visit places and local cuisines to try?", None, None, None, None, None, ],
470
- ["Can you write a short story about a time-traveling detective who solves historical mysteries?", None, None, None, None, None,],
471
- ["I'm trying to learn French. Can you provide some common phrases that would be useful for a beginner, along with their pronunciations?", None, None, None, None, None,],
472
- ["I have chicken, rice, and bell peppers in my kitchen. Can you suggest an easy recipe I can make with these ingredients?", None, None, None, None, None,],
473
- ["Can you explain how the QuickSort algorithm works and provide a Python implementation?", None, None, None, None, None,],
474
- ["What are some unique features of Rust that make it stand out compared to other systems programming languages like C++?", None, None, None, None, None,],
475
- ]
476
-
477
-
478
- gr.ChatInterface(
479
- fn=run,
480
- chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
481
- title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
482
- examples=examples,
483
- concurrency_limit=20,
484
- ).launch(show_api=False)
485
-
486
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import sys
3
  import subprocess
4
+ import base64
5
+ import json
6
+ from io import StringIO
7
+ from typing import Dict, List
8
+
9
+ import streamlit as st
10
+ from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
11
+ from pylint import lint
12
+
13
+ # Add your Hugging Face API token here
14
+ hf_token = st.secrets["huggingface"]
15
+
16
+ # Global state to manage communication between Tool Box and Workspace Chat App
17
+ if "chat_history" not in st.session_state:
18
+ st.session_state.chat_history = []
19
+ if "terminal_history" not in st.session_state:
20
+ st.session_state.terminal_history = []
21
+ if "workspace_projects" not in st.session_state:
22
+ st.session_state.workspace_projects = {}
23
+
24
+ # Load pre-trained RAG retriever
25
+ rag_retriever = pipeline("retrieval-question-answering", model="facebook/rag-token-base")
26
+
27
+ # Load pre-trained chat model
28
+ chat_model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/DialoGPT-medium")
29
+
30
+ # Load tokenizer
31
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
32
+
33
+ def process_input(user_input: str) -> str:
34
+ # Input pipeline: Tokenize and preprocess user input
35
+ input_ids = tokenizer(user_input, return_tensors="pt").input_ids
36
+ attention_mask = tokenizer(user_input, return_tensors="pt").attention_mask
37
+
38
+ # RAG model: Generate response
39
+ with torch.no_grad():
40
+ output = rag_retriever(input_ids, attention_mask=attention_mask)
41
+ response = output.generator_outputs[0].sequences[0]
42
+
43
+ # Chat model: Refine response
44
+ chat_input = tokenizer(response, return_tensors="pt")
45
+ chat_input["input_ids"] = chat_input["input_ids"].unsqueeze(0)
46
+ chat_input["attention_mask"] = chat_input["attention_mask"].unsqueeze(0)
47
+ with torch.no_grad():
48
+ chat_output = chat_model(**chat_input)
49
+ refined_response = chat_output.sequences[0]
50
+
51
+ # Output pipeline: Return final response
52
+ return refined_response
53
+
54
+ class AIAgent:
55
+ def __init__(self, name: str, description: str, skills: List[str], hf_api=None):
56
+ self.name = name
57
+ self.description = description
58
+ self.skills = skills
59
+ self._hf_api = hf_api
60
+ self._hf_token = hf_token
61
+
62
+ @property
63
+ def hf_api(self):
64
+ if not self._hf_api and self.has_valid_hf_token():
65
+ self._hf_api = HfApi(token=self._hf_token)
66
+ return self._hf_api
67
+
68
+ def has_valid_hf_token(self):
69
+ return bool(self._hf_token)
70
+
71
+ async def autonomous_build(self, chat_history: List[str], workspace_projects: Dict[str, str], project_name: str, selected_model: str):
72
+ # Continuation of previous methods
73
+ summary = "Chat History:\n" + "\n".join(chat_history)
74
+ summary += "\n\nWorkspace Projects:\n" + "\n".join(workspace_projects.items())
75
+
76
+ # Analyze chat history and workspace projects to suggest actions
77
+ # Example:
78
+ # - Check if the user has requested to create a new file
79
+ # - Check if the user has requested to install a package
80
+ # - Check if the user has requested to run a command
81
+ # - Check if the user has requested to generate code
82
+ # - Check if the user has requested to translate code
83
+ # - Check if the user has requested to summarize text
84
+ # - Check if the user has requested to analyze sentiment
85
+
86
+ # Generate a response based on the analysis
87
+ next_step = "Based on the current state, the next logical step is to implement the main application logic."
88
+
89
+ # Ensure project folder exists
90
+ project_path = os.path.join(PROJECT_ROOT, project_name)
91
+ if not os.path.exists(project_path):
92
+ os.makedirs(project_path)
93
+
94
+ # Create requirements.txt if it doesn't exist
95
+ requirements_file = os.path.join(project_path, "requirements.txt")
96
+ if not os.path.exists(requirements_file):
97
+ with open(requirements_file, "w") as f:
98
+ f.write("# Add your project's dependencies here\n")
99
+
100
+ # Create app.py if it doesn't exist
101
+ app_file = os.path.join(project_path, "app.py")
102
+ if not os.path.exists(app_file):
103
+ with open(app_file, "w") as f:
104
+ f.write("# Your project's main application logic goes here\n")
105
+
106
+ # Generate GUI code for app.py if requested
107
+ if "create a gui" in summary.lower():
108
+ gui_code = generate_code(
109
+ "Create a simple GUI for this application", selected_model)
110
+ with open(app_file, "a") as f:
111
+ f.write(gui_code)
112
+
113
+ # Run the default build process
114
+ build_command = "pip install -r requirements.txt && python app.py"
115
+ try:
116
+ result = subprocess.run(
117
+ build_command, shell=True, capture_output=True, text=True, cwd=project_path)
118
+ st.write(f"Build Output:\n{result.stdout}")
119
+ if result.stderr:
120
+ st.error(f"Build Errors:\n{result.stderr}")
121
+ except Exception as e:
122
+ st.error(f"Build Error: {e}")
123
+
124
+ return summary, next_step
125
+
126
+ def get_built_space_files() -> Dict[str, str]:
127
+ # Replace with your logic to gather the files you want to deploy
128
+ return {
129
+ "app.py": "# Your Streamlit app code here",
130
+ "requirements.txt": "streamlit\ntransformers"
131
+ # Add other files as needed
132
+ }
133
+
134
+ def save_agent_to_file(agent: AIAgent):
135
+ """Saves the agent's prompt to a file."""
136
+ if not os.path.exists(AGENT_DIRECTORY):
137
+ os.makedirs(AGENT_DIRECTORY)
138
+ file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
139
+ with open(file_path, "w") as file:
140
+ file.write(agent.create_agent_prompt())
141
+ st.session_state.available_agents.append(agent.name)
142
+
143
+ def load_agent_prompt(agent_name: str) -> str:
144
+ """Loads an agent prompt from a file."""
145
+ file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
146
+ if os.path.exists(file_path):
147
+ with open(file_path, "r") as file:
148
+ agent_prompt = file.read()
149
+ return agent_prompt
150
+ else:
151
+ return None
152
+
153
+ def create_agent_from_text(name: str, text: str) -> str:
154
+ skills = text.split("\n")
155
+ agent = AIAgent(name, "AI agent created from text input.", skills)
156
+ save_agent_to_file(agent)
157
+ return agent.create_agent_prompt()
158
+
159
+ def chat_interface_with_agent(input_text: str, agent_name: str) -> str:
160
+ agent_prompt = load_agent_prompt(agent_name)
161
+ if agent_prompt is None:
162
+ return f"Agent {agent_name} not found."
163
+
164
+ model_name = "MaziyarPanahi/Codestral-22B-v0.1-GGUF"
165
  try:
166
+ generator = pipeline("text-generation", model=model_name)
167
+ generator.tokenizer.pad_token = generator.tokenizer.eos_token
168
+ generated_response = generator(
169
+ f"{agent_prompt}\n\nUser: {input_text}\nAgent:", max_length=100, do_sample=True, top_k=50)[0]["generated_text"]
170
+ return generated_response
 
 
 
 
 
 
171
  except Exception as e:
172
+ return f"Error loading model: {e}"
173
+
174
+ def terminal_interface(command: str, project_name: str = None) -> str:
175
+ if project_name:
176
+ project_path = os.path.join(PROJECT_ROOT, project_name)
177
+ if not os.path.exists(project_path):
178
+ return f"Project {project_name} does not exist."
179
+ result = subprocess.run(
180
+ command, shell=True, capture_output=True, text=True, cwd=project_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
  else:
182
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
183
+ return result.stdout
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
+ def code_editor_interface(code: str) -> str:
186
+ try:
187
+ formatted_code = black.format_str(code, mode=black.FileMode())
188
+ except black.NothingChanged:
189
+ formatted_code = code
190
+
191
+ result = StringIO()
192
+ sys.stdout = result
193
+ sys.stderr = result
194
+
195
+ (pylint_stdout, pylint_stderr) = lint.py_run(code, return_std=True)
196
+ sys.stdout = sys.__stdout__
197
+ sys.stderr = sys.__stderr__
198
+
199
+ lint_message = pylint_stdout.getvalue() + pylint_stderr.getvalue()
200
+
201
+ return formatted_code, lint_message
202
+
203
+ def summarize_text(text: str) -> str:
204
+ summarizer = pipeline("summarization")
205
+ summary = summarizer(text, max_length=130, min_length=30, do_sample=False)
206
+ return summary[0]['summary_text']
207
+
208
+ def sentiment_analysis(text: str) -> str:
209
+ analyzer = pipeline("sentiment-analysis")
210
+ result = analyzer(text)
211
+ return result[0]['label']
212
+
213
+ def translate_code(code: str, source_language: str, target_language: str) -> str:
214
+ # Use a Hugging Face translation model instead of OpenAI
215
+ # Example: English to Spanish
216
+ translator = pipeline(
217
+ "translation", model="bartowski/Codestral-22B-v0.1-GGUF")
218
+ translated_code = translator(code, target_lang=target_language)[0]['translation_text']
219
+ return translated_code
220
+
221
+ def generate_code(code_idea: str, model_name: str) -> str:
222
+ """Generates code using the selected model."""
223
+ try:
224
+ generator = pipeline('text-generation', model=model_name)
225
+ generated_code = generator(code_idea, max_length=1000, num_return_sequences=1)[0]['generated_text']
226
+ return generated_code
227
+ except Exception as e:
228
+ return f"Error generating code: {e}"
229
+
230
+ def chat_interface(input_text: str) -> str:
231
+ """Handles general chat interactions with the user."""
232
+ # Use a Hugging Face chatbot model or your own logic
233
+ chatbot = pipeline("text-generation", model="microsoft/DialoGPT-medium")
234
+ response = chatbot(input_text, max_length=50, num_return_sequences=1)[0]['generated_text']
235
+ return response
236
+
237
+ def workspace_interface(project_name: str) -> str:
238
+ project_path = os.path.join(PROJECT_ROOT, project_name)
239
+ if not os.path.exists(project_path):
240
+ os.makedirs(project_path)
241
+ st.session_state.workspace_projects[project_name] = {'files': []}
242
+ return f"Project '{project_name}' created successfully."
243
+ else:
244
+ return f"Project '{project_name}' already exists."
245
+
246
+ def add_code_to_workspace(project_name: str, code: str, file_name: str) -> str:
247
+ project_path = os.path.join(PROJECT_ROOT, project_name)
248
+ if not os.path.exists(project_path):
249
+ return f"Project '{project_name}' does not exist."
250
+
251
+ file_path = os.path.join(project_path, file_name)
252
+ with open(file_path, "w") as file:
253
+ file.write(code)
254
+ st.session_state.workspace_projects[project_name]['files'].append(file_name)
255
+ return f"Code added to '{file_name}' in project '{project_name}'."
256
+
257
+ def create_space_on_hugging_face(api, name, description, public, files, entrypoint="launch.py"):
258
+ url = f"{hf_hub_url()}spaces/{name}/prepare-repo"
259
+ headers = {"Authorization": f"Bearer {api.access_token}"}
260
+ payload = {
261
+ "public": public,
262
+ "gitignore_template": "web",
263
+ "default_branch": "main",
264
+ "archived": False,
265
+ "files": []
266
+ }
267
+ for filename, contents in files.items():
268
+ data = {
269
+ "content": contents,
270
+ "path": filename,
271
+ "encoding": "utf-8",
272
+ "mode": "overwrite"
273
+ }
274
+ payload["files"].append(data)
275
+ response = requests.post(url, json=payload, headers=headers)
276
+ response.raise_for_status()
277
+ location = response.headers.get("Location")
278
+ # wait_for_processing(location, api) # You might need to implement this if it's not already defined
279
+
280
+ return Repository(name=name, api=api)
281
+
282
+ # Streamlit App
283
+ st.title("AI Agent Creator")
284
+
285
+ # Sidebar navigation
286
+ st.sidebar.title("Navigation")
287
+ app_mode = st.sidebar.selectbox(
288
+ "Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
289
+
290
+ if app_mode == "AI Agent Creator":
291
+ # AI Agent Creator
292
+ st.header("Create an AI Agent from Text")
293
+
294
+ st.subheader("From Text")
295
+ agent_name = st.text_input("Enter agent name:")
296
+ text_input = st.text_area("Enter skills (one per line):")
297
+ if st.button("Create Agent"):
298
+ agent_prompt = create_agent_from_text(agent_name, text_input)
299
+ st.success(f"Agent '{agent_name}' created and saved successfully.")
300
+ st.session_state.available_agents.append(agent_name)
301
+
302
+ elif app_mode == "Tool Box":
303
+ # Tool Box
304
+ st.header("AI-Powered Tools")
305
+
306
+ # Chat Interface
307
+ st.subheader("Chat with CodeCraft")
308
+ chat_input = st.text_area("Enter your message:")
309
+ if st.button("Send"):
310
+ chat_response = chat_interface(chat_input)
311
+ st.session_state.chat_history.append((chat_input, chat_response))
312
+ st.write(f"CodeCraft: {chat_response}")
313
+
314
+ # Terminal Interface
315
+ st.subheader("Terminal")
316
+ terminal_input = st.text_input("Enter a command:")
317
+ if st.button("Run"):
318
+ terminal_output = terminal_interface(terminal_input)
319
+ st.session_state.terminal_history.append(
320
+ (terminal_input, terminal_output))
321
+ st.code(terminal_output, language="bash")
322
+
323
+ # Code Editor Interface
324
+ st.subheader("Code Editor")
325
+ code_editor = st.text_area("Write your code:", height=300)
326
+ if st.button("Format & Lint"):
327
+ formatted_code, lint_message = code_editor_interface(code_editor)
328
+ st.code(formatted_code, language="python")
329
+ st.info(lint_message)
330
+
331
+ # Text Summarization Tool
332
+ st.subheader("Summarize Text")
333
+ text_to_summarize = st.text_area("Enter text to summarize:")
334
+ if st.button("Summarize"):
335
+ summary = summarize_text(text_to_summarize)
336
+ st.write(f"Summary: {summary}")
337
+
338
+ # Sentiment Analysis Tool
339
+ st.subheader("Sentiment Analysis")
340
+ sentiment_text = st.text_area("Enter text for sentiment analysis:")
341
+ if st.button("Analyze Sentiment"):
342
+ sentiment = sentiment_analysis(sentiment_text)
343
+ st.write(f"Sentiment: {sentiment}")
344
+
345
+ # Text Translation Tool (Code Translation)
346
+ st.subheader("Translate Code")
347
+ code_to_translate = st.text_area("Enter code to translate:")
348
+ source_language = st.text_input("Enter source language (e.g., 'Python'):")
349
+ target_language = st.text_input(
350
+ "Enter target language (e.g., 'JavaScript'):")
351
+ if st.button("Translate Code"):
352
+ translated_code = translate_code(
353
+ code_to_translate, source_language, target_language)
354
+ st.code(translated_code, language=target_language.lower())
355
+
356
+ # Code Generation
357
+ st.subheader("Code Generation")
358
+ code_idea = st.text_input("Enter your code idea:")
359
+ if st.button("Generate Code"):
360
+ generated_code = generate_code(code_idea)
361
+ st.code(generated_code, language="python")
362
+
363
+ elif app_mode == "Workspace Chat App":
364
+ # Workspace Chat App
365
+ st.header("Workspace Chat App")
366
+
367
+ # Project Workspace Creation
368
+ st.subheader("Create a New Project")
369
+ project_name = st.text_input("Enter project name:")
370
+ if st.button("Create Project"):
371
+ workspace_status = workspace_interface(project_name)
372
+ st.success(workspace_status)
373
+
374
+ # Automatically create requirements.txt and app.py
375
+ project_path = os.path.join(PROJECT_ROOT, project_name)
376
+ requirements_file = os.path.join(project_path, "requirements.txt")
377
+ if not os.path.exists(requirements_file):
378
+ with open(requirements_file, "w") as f:
379
+ f.write("# Add your project's dependencies here\n")
380
+
381
+ app_file = os.path.join(project_path, "app.py")
382
+ if not os.path.exists(app_file):
383
+ with open(app_file, "w") as f:
384
+ f.write("# Your project's main application logic goes here\n")
385
+
386
+ # Add Code to Workspace
387
+ st.subheader("Add Code to Workspace")
388
+ code_to_add = st.text_area("Enter code to add to workspace:")
389
+ file_name = st.text_input("Enter file name (e.g., 'app.py'):")
390
+ if st.button("Add Code"):
391
+ add_code_status = add_code_to_workspace(
392
+ project_name, code_to_add, file_name)
393
+ st.session_state.terminal_history.append(
394
+ (f"Add Code: {code_to_add}", add_code_status))
395
+ st.success(add_code_status)
396
+
397
+ # Terminal Interface with Project Context
398
+ st.subheader("Terminal (Workspace Context)")
399
+ terminal_input = st.text_input("Enter a command within the workspace:")
400
+ if st.button("Run Command"):
401
+ terminal_output = terminal_interface(terminal_input, project_name)
402
+ st.session_state.terminal_history.append(
403
+ (terminal_input, terminal_output))
404
+ st.code(terminal_output, language="bash")
405
+
406
+ # Chat Interface for Guidance
407
+ st.subheader("Chat with CodeCraft for Guidance")
408
+ chat_input = st.text_area("Enter your message for guidance:")
409
+ if st.button("Get Guidance"):
410
+ chat_response = chat_interface(chat_input)
411
+ st.session_state.chat_history.append((chat_input, chat_response))
412
+ st.write(f"CodeCraft: {chat_response}")
413
+
414
+ # Display Chat History
415
+ st.subheader("Chat History")
416
+ for user_input, response in st.session_state.chat_history:
417
+ st.write(f"User: {user_input}")
418
+ st.write(f"CodeCraft: {response}")
419
+
420
+ # Display Terminal History
421
+ st.subheader("Terminal History")
422
+ for command, output in st.session_state.terminal_history:
423
+ st.write(f"Command: {command}")
424
+ st.code(output, language="bash")
425
+
426
+ # Display Projects and Files
427
+ st.subheader("Workspace Projects")
428
+ for project, details in st.session_state.workspace_projects.items():
429
+ st.write(f"Project: {project}")
430
+ for file in details['files']:
431
+ st.write(f" - {file}")
432
+
433
+ # Chat with AI Agents
434
+ st.subheader("Chat with AI Agents")
435
+ selected_agent = st.selectbox(
436
+ "Select an AI agent", st.session_state.available_agents)
437
+ agent_chat_input = st.text_area("Enter your message for the agent:")
438
+ if st.button("Send to Agent"):
439
+ agent_chat_response = chat_interface_with_agent(
440
+ agent_chat_input, selected_agent)
441
+ st.session_state.chat_history.append(
442
+ (agent_chat_input, agent_chat_response))
443
+ st.write(f"{selected_agent}: {agent_chat_response}")
444
+
445
+ # Code Generation
446
+ st.subheader("Code Generation")
447
+ code_idea = st.text_input("Enter your code idea:")
448
+
449
+ # Model Selection Menu
450
+ selected_model = st.selectbox(
451
+ "Select a code-generative model", AVAILABLE_CODE_GENERATIVE_MODELS)
452
+
453
+ if st.button("Generate Code"):
454
+ generated_code = generate_code(code_idea, selected_model)
455
+ st.code(generated_code, language="python")
456
+
457
+ # Automate Build Process
458
+ st.subheader("Automate Build Process")
459
+ if st.button("Automate"):
460
+ # Load the agent without skills for now
461
+ agent = AIAgent(selected_agent, "", [])
462
+ summary, next_step = agent.autonomous_build(
463
+ st.session_state.chat_history, st.session_state.workspace_projects, project_name, selected_model)
464
+ st.write("Autonomous Build Summary:")
465
+ st.write(summary)
466
+ st.write("Next Step:")
467
+ st.write(next_step)
468
+
469
+ # If everything went well, proceed to deploy the Space
470
+ if agent._hf_api and agent.has_valid_hf_token():
471
+ agent.deploy_built_space_to_hf()
472
+ # Use the hf_token to interact with the Hugging Face API
473
+ api = HfApi(token="hf_token") # Function to create a Space on Hugging Face
474
+ create_space_on_hugging_face(api, agent.name, agent.description, True, get_built_space_files())