acecalisto3 commited on
Commit
6a3821f
·
verified ·
1 Parent(s): ee3c036

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +376 -463
app.py CHANGED
@@ -1,483 +1,396 @@
1
  import os
2
  import subprocess
3
- import random
4
- from huggingface_hub import InferenceClient
5
- import gradio as gr
6
- from i_search import google
7
- from i_search import i_search as i_s
8
- from agent import (
9
- ACTION_PROMPT,
10
- ADD_PROMPT,
11
- COMPRESS_HISTORY_PROMPT,
12
- LOG_PROMPT,
13
- LOG_RESPONSE,
14
- MODIFY_PROMPT,
15
- PREFIX,
16
- SEARCH_QUERY,
17
- READ_PROMPT,
18
- TASK_PROMPT,
19
- UNDERSTAND_TEST_RESULTS_PROMPT,
20
- )
21
- from utils import parse_action, parse_file_content, read_python_module_structure
22
  from datetime import datetime
23
- now = datetime.now()
24
- date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
25
-
26
- client = InferenceClient(
27
- "mistralai/Mixtral-8x7B-Instruct-v0.1"
28
- )
29
-
30
- ############################################
31
-
32
-
33
- VERBOSE = True
34
- MAX_HISTORY = 100
35
- #MODEL = "gpt-3.5-turbo" # "gpt-4"
36
-
37
-
38
- def format_prompt(message, history):
39
- prompt = "<s>"
40
- for user_prompt, bot_response in history:
41
- prompt += f"[INST] {user_prompt} [/INST]"
42
- prompt += f" {bot_response}</s> "
43
- prompt += f"[INST] {message} [/INST]"
44
- return prompt
45
-
46
-
47
- def _parse_text_generation_error(error: Optional[str], error_type: Optional[str]) -> TextGenerationError:
48
- if error_type == "overloaded":
49
- return OverloadedError(error) # type: ignore
50
-
51
- def run_gpt(
52
- prompt_template,
53
- stop_tokens,
54
- max_tokens,
55
- module_summary,
56
- purpose,
57
- **prompt_kwargs,
58
- ):
59
- seed = random.randint(1,1111111111111111)
60
-
61
- generate_kwargs = dict(
62
- temperature=0.9,
63
- max_new_tokens=1048,
64
- top_p=0.95,
65
- repetition_penalty=1.0,
66
- do_sample=True,
67
- seed=seed,
68
- )
69
 
70
-
71
- content = PREFIX.format(
72
- date_time_str=date_time_str,
73
- purpose=purpose,
74
- ) + prompt_template.format(**prompt_kwargs)
75
- if VERBOSE:
76
- print(LOG_PROMPT.format(content))
77
-
78
-
79
- #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
80
- #formatted_prompt = format_prompt(f'{content}', history)
81
-
82
- stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
83
- resp = ""
84
- for response in stream:
85
- resp += response.token.text
86
-
87
- if VERBOSE:
88
- print(LOG_RESPONSE.format(resp))
89
- return resp
90
-
91
-
92
- def compress_history(purpose, task, history, directory):
93
- module_summary, _, _ = read_python_module_structure(directory)
94
- resp = run_gpt(
95
- COMPRESS_HISTORY_PROMPT,
96
- stop_tokens=["observation:", "task:", "action:", "thought:"],
97
- max_tokens=512,
98
- module_summary=module_summary,
99
- purpose=purpose,
100
- task=task,
101
- history=history,
102
- )
103
- history = "observation: {}\n".format(resp)
104
- return history
105
-
106
- def call_search(purpose, task, history, directory, action_input):
107
- print("CALLING SEARCH")
108
- try:
109
- if "http" in action_input:
110
- if "<" in action_input:
111
- action_input = action_input.strip("<")
112
- if ">" in action_input:
113
- action_input = action_input.strip(">")
114
- response = i_s(action_input)
115
- #response = google(search_return)
116
- print(response)
117
- history += "observation: search result is: {}\n".format(response)
118
- else:
119
- history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=URL'\n"
120
- except Exception as e:
121
- history += "observation: {}'\n".format(e)
122
- return "MAIN", None, history, task
123
-
124
- def call_main(purpose, task, history, directory, action_input):
125
- module_summary, _, _ = read_python_module_structure(directory)
126
- resp = run_gpt(
127
- ACTION_PROMPT,
128
- stop_tokens=["observation:", "task:"],
129
- max_tokens=256,
130
- module_summary=module_summary,
131
- purpose=purpose,
132
- task=task,
133
- history=history,
134
- )
135
- lines = resp.strip().strip("\n").split("\n")
136
- for line in lines:
137
- if line == "":
138
- continue
139
- if line.startswith("thought: "):
140
- history += "{}\n".format(line)
141
- elif line.startswith("action: "):
142
-
143
- action_name, action_input = parse_action(line)
144
- print (f'ACTION_NAME :: {action_name}')
145
- print (f'ACTION_INPUT :: {action_input}')
146
-
147
- history += "{}\n".format(line)
148
- if "COMPLETE" in action_name or "COMPLETE" in action_input:
149
- task = "END"
150
- return action_name, action_input, history, task
151
- else:
152
- return action_name, action_input, history, task
153
- else:
154
- history += "{}\n".format(line)
155
- #history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
156
-
157
- #return action_name, action_input, history, task
158
- #assert False, "unknown action: {}".format(line)
159
- return "MAIN", None, history, task
160
-
161
-
162
- def call_test(purpose, task, history, directory, action_input):
163
- result = subprocess.run(
164
- ["python", "-m", "pytest", "--collect-only", directory],
165
- capture_output=True,
166
- text=True,
167
- )
168
- if result.returncode != 0:
169
- history += "observation: there are no tests! Test should be written in a test folder under {}\n".format(
170
- directory
171
- )
172
- return "MAIN", None, history, task
173
- result = subprocess.run(
174
- ["python", "-m", "pytest", directory], capture_output=True, text=True
175
- )
176
- if result.returncode == 0:
177
- history += "observation: tests pass\n"
178
- return "MAIN", None, history, task
179
- module_summary, content, _ = read_python_module_structure(directory)
180
- resp = run_gpt(
181
- UNDERSTAND_TEST_RESULTS_PROMPT,
182
- stop_tokens=[],
183
- max_tokens=256,
184
- module_summary=module_summary,
185
- purpose=purpose,
186
- task=task,
187
- history=history,
188
- stdout=result.stdout[:5000], # limit amount of text
189
- stderr=result.stderr[:5000], # limit amount of text
190
- )
191
- history += "observation: tests failed: {}\n".format(resp)
192
- return "MAIN", None, history, task
193
-
194
-
195
- def call_set_task(purpose, task, history, directory, action_input):
196
- module_summary, content, _ = read_python_module_structure(directory)
197
- task = run_gpt(
198
- TASK_PROMPT,
199
- stop_tokens=[],
200
- max_tokens=64,
201
- module_summary=module_summary,
202
- purpose=purpose,
203
- task=task,
204
- history=history,
205
- ).strip("\n")
206
- history += "observation: task has been updated to: {}\n".format(task)
207
- return "MAIN", None, history, task
208
-
209
-
210
- def call_read(purpose, task, history, directory, action_input):
211
- if not os.path.exists(action_input):
212
- history += "observation: file does not exist\n"
213
- return "MAIN", None, history, task
214
- module_summary, content, _ = read_python_module_structure(directory)
215
- f_content = (
216
- content[action_input] if content[action_input] else "< document is empty >"
217
- )
218
- resp = run_gpt(
219
- READ_PROMPT,
220
- stop_tokens=[],
221
- max_tokens=256,
222
- module_summary=module_summary,
223
- purpose=purpose,
224
- task=task,
225
- history=history,
226
- file_path=action_input,
227
- file_contents=f_content,
228
- ).strip("\n")
229
- history += "observation: {}\n".format(resp)
230
- return "MAIN", None, history, task
231
-
232
-
233
- def call_modify(purpose, task, history, directory, action_input):
234
- if not os.path.exists(action_input):
235
- history += "observation: file does not exist\n"
236
- return "MAIN", None, history, task
237
- (
238
- module_summary,
239
- content,
240
- _,
241
- ) = read_python_module_structure(directory)
242
- f_content = (
243
- content[action_input] if content[action_input] else "< document is empty >"
244
- )
245
- resp = run_gpt(
246
- MODIFY_PROMPT,
247
- stop_tokens=["action:", "thought:", "observation:"],
248
- max_tokens=2048,
249
- module_summary=module_summary,
250
- purpose=purpose,
251
- task=task,
252
- history=history,
253
- file_path=action_input,
254
- file_contents=f_content,
255
- )
256
- new_contents, description = parse_file_content(resp)
257
- if new_contents is None:
258
- history += "observation: failed to modify file\n"
259
- return "MAIN", None, history, task
260
-
261
- with open(action_input, "w") as f:
262
- f.write(new_contents)
263
-
264
- history += "observation: file successfully modified\n"
265
- history += "observation: {}\n".format(description)
266
- return "MAIN", None, history, task
267
-
268
-
269
- def call_add(purpose, task, history, directory, action_input):
270
- d = os.path.dirname(action_input)
271
- if not d.startswith(directory):
272
- history += "observation: files must be under directory {}\n".format(directory)
273
- elif not action_input.endswith(".py"):
274
- history += "observation: can only write .py files\n"
275
- else:
276
- if d and not os.path.exists(d):
277
- os.makedirs(d)
278
- if not os.path.exists(action_input):
279
- module_summary, _, _ = read_python_module_structure(directory)
280
- resp = run_gpt(
281
- ADD_PROMPT,
282
- stop_tokens=["action:", "thought:", "observation:"],
283
- max_tokens=2048,
284
- module_summary=module_summary,
285
- purpose=purpose,
286
- task=task,
287
- history=history,
288
- file_path=action_input,
289
- )
290
- new_contents, description = parse_file_content(resp)
291
- if new_contents is None:
292
- history += "observation: failed to write file\n"
293
- return "MAIN", None, history, task
294
-
295
- with open(action_input, "w") as f:
296
- f.write(new_contents)
297
-
298
- history += "observation: file successfully written\n"
299
- history += "obsertation: {}\n".format(description)
300
- else:
301
- history += "observation: file already exists\n"
302
- return "MAIN", None, history, task
303
- def end_fn(purpose, task, history, directory, action_input):
304
- task = "END"
305
- return "COMPLETE", None, history, task
306
- NAME_TO_FUNC = {
307
- "MAIN": call_main,
308
- "UPDATE-TASK": call_set_task,
309
- "SEARCH": call_search,
310
- "COMPLETE": end_fn,
311
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
  }
313
 
 
 
 
314
 
315
- def run_action(purpose, task, history, directory, action_name, action_input):
316
- if "RESPONSE" in action_name:
317
- task="END"
318
- return action_name, action_input, history, task
 
319
 
320
- # compress the history when it is long
321
- if len(history.split("\n")) > MAX_HISTORY:
322
- if VERBOSE:
323
- print("COMPRESSING HISTORY")
324
- history = compress_history(purpose, task, history, directory)
 
 
 
 
 
 
 
325
 
326
- assert action_name in NAME_TO_FUNC
 
 
 
 
 
327
 
328
- print("RUN: ", action_name, action_input)
329
- return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
 
 
330
 
 
 
 
 
 
331
 
332
- def run(purpose,hist):
333
-
334
- print(purpose)
335
- print(hist)
336
- task=None
337
- directory="./"
338
- history = ""
339
- action_name = "UPDATE-TASK" if task is None else "MAIN"
340
- action_input = None
341
- while True:
342
- print("")
343
- print("")
344
- print("---")
345
- print("purpose:", purpose)
346
- print("task:", task)
347
- print("---")
348
- print(history)
349
- print("---")
350
-
351
- action_name, action_input, history, task = run_action(
352
- purpose,
353
- task,
354
- history,
355
- directory,
356
- action_name,
357
- action_input,
358
- )
359
- if task == "END":
360
- return history
361
 
 
 
 
362
 
 
 
 
 
 
 
 
 
 
363
 
364
- ################################################
 
 
 
365
 
366
- def format_prompt(message, history):
367
- prompt = "<s>"
368
- for user_prompt, bot_response in history:
369
- prompt += f"[INST] {user_prompt} [/INST]"
370
- prompt += f" {bot_response}</s> "
371
- prompt += f"[INST] {message} [/INST]"
372
- return prompt
373
- agents =[
374
- "WEB_DEV",
375
- "AI_SYSTEM_PROMPT",
376
- "PYTHON_CODE_DEV"
377
- ]
378
- def generate(
379
- prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
380
- ):
381
- seed = random.randint(1,1111111111111111)
382
-
383
- agent=prompts.WEB_DEV
384
- if agent_name == "WEB_DEV":
385
- agent = prompts.WEB_DEV
386
- if agent_name == "AI_SYSTEM_PROMPT":
387
- agent = prompts.AI_SYSTEM_PROMPT
388
- if agent_name == "PYTHON_CODE_DEV":
389
- agent = prompts.PYTHON_CODE_DEV
390
- system_prompt=agent
391
- temperature = float(temperature)
392
- if temperature < 1e-2:
393
- temperature = 1e-2
394
- top_p = float(top_p)
395
-
396
- generate_kwargs = dict(
397
- temperature=temperature,
398
- max_new_tokens=max_new_tokens,
399
- top_p=top_p,
400
- repetition_penalty=repetition_penalty,
401
- do_sample=True,
402
- seed=seed,
403
- )
404
-
405
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
406
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
407
- output = ""
408
-
409
- for response in stream:
410
- output += response.token.text
411
- yield output
412
- return output
413
-
414
-
415
- additional_inputs=[
416
- gr.Dropdown(
417
- label="Agents",
418
- choices=[s for s in agents],
419
- value=agents[0],
420
- interactive=True,
421
- ),
422
- gr.Textbox(
423
- label="System Prompt",
424
- max_lines=1,
425
- interactive=True,
426
- ),
427
- gr.Slider(
428
- label="Temperature",
429
- value=0.9,
430
- minimum=0.0,
431
- maximum=1.0,
432
- step=0.05,
433
- interactive=True,
434
- info="Higher values produce more diverse outputs",
435
- ),
436
-
437
- gr.Slider(
438
- label="Max new tokens",
439
- value=1048*10,
440
- minimum=0,
441
- maximum=1048*10,
442
- step=64,
443
- interactive=True,
444
- info="The maximum numbers of new tokens",
445
- ),
446
- gr.Slider(
447
- label="Top-p (nucleus sampling)",
448
- value=0.90,
449
- minimum=0.0,
450
- maximum=1,
451
- step=0.05,
452
- interactive=True,
453
- info="Higher values sample more low-probability tokens",
454
- ),
455
- gr.Slider(
456
- label="Repetition penalty",
457
- value=1.2,
458
- minimum=1.0,
459
- maximum=2.0,
460
- step=0.05,
461
- interactive=True,
462
- info="Penalize repeated tokens",
463
- ),
464
 
 
 
 
 
 
 
 
 
465
 
466
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
467
 
468
- examples=[["I'm planning a vacation to Japan. Can you suggest a one-week itinerary including must-visit places and local cuisines to try?", None, None, None, None, None, ],
469
- ["Can you write a short story about a time-traveling detective who solves historical mysteries?", None, None, None, None, None,],
470
- ["I'm trying to learn French. Can you provide some common phrases that would be useful for a beginner, along with their pronunciations?", None, None, None, None, None,],
471
- ["I have chicken, rice, and bell peppers in my kitchen. Can you suggest an easy recipe I can make with these ingredients?", None, None, None, None, None,],
472
- ["Can you explain how the QuickSort algorithm works and provide a Python implementation?", None, None, None, None, None,],
473
- ["What are some unique features of Rust that make it stand out compared to other systems programming languages like C++?", None, None, None, None, None,],
474
- ]
475
-
476
-
477
- gr.ChatInterface(
478
- fn=run,
479
- chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
480
- title="Mixtral 46.7B\nMicro-Agent\nInternet Search",
481
- examples=examples,
482
- concurrency_limit=20,
483
- ).launch(show_api=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import subprocess
3
+ import streamlit as st
4
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, AutoModel, RagRetriever, AutoModelForSeq2SeqLM
5
+ import black
6
+ from pylint import lint
7
+ from io import StringIO
8
+ import sys
9
+ import torch
10
+ from huggingface_hub import hf_hub_url, cached_download, HfApi
 
 
 
 
 
 
 
 
 
 
 
11
  from datetime import datetime
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ # Set your Hugging Face API key here
14
+ hf_token = "YOUR_HUGGING_FACE_API_KEY" # Replace with your actual token
15
+
16
+ HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
17
+ PROJECT_ROOT = "projects"
18
+ AGENT_DIRECTORY = "agents"
19
+
20
+ # Global state to manage communication between Tool Box and Workspace Chat App
21
+ if 'chat_history' not in st.session_state:
22
+ st.session_state.chat_history = []
23
+ if 'terminal_history' not in st.session_state:
24
+ st.session_state.terminal_history = []
25
+ if 'workspace_projects' not in st.session_state:
26
+ st.session_state.workspace_projects = {}
27
+ if 'available_agents' not in st.session_state:
28
+ st.session_state.available_agents = []
29
+ if 'current_state' not in st.session_state:
30
+ st.session_state.current_state = {
31
+ 'toolbox': {},
32
+ 'workspace_chat': {}
33
+ }
34
+
35
+ # List of top downloaded free code-generative models from Hugging Face Hub
36
+ AVAILABLE_CODE_GENERATIVE_MODELS = [
37
+ "bigcode/starcoder", # Popular and powerful
38
+ "Salesforce/codegen-350M-mono", # Smaller, good for quick tasks
39
+ "microsoft/CodeGPT-small", # Smaller, good for quick tasks
40
+ "google/flan-t5-xl", # Powerful, good for complex tasks
41
+ "facebook/bart-large-cnn", # Good for text-to-code tasks
42
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
+ # Load pre-trained RAG retriever
45
+ rag_retriever = RagRetriever.from_pretrained("facebook/rag-token-base") # Use a Hugging Face RAG model
46
+
47
+ # Load pre-trained chat model
48
+ chat_model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/DialoGPT-medium") # Use a Hugging Face chat model
49
+
50
+ # Load tokenizer
51
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
52
+
53
+ # Place the CSS here
54
+ st.markdown("""
55
+ <style>
56
+ /* Advanced and Accommodating CSS */
57
+ body {
58
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
59
+ background-color: #f4f4f9;
60
+ color: #333;
61
+ margin: 0;
62
+ padding: 0;
63
  }
64
 
65
+ h1, h2, h3, h4, h5, h6 {
66
+ color: #333;
67
+ }
68
 
69
+ .container {
70
+ width: 90%;
71
+ margin: 0 auto;
72
+ padding: 20px;
73
+ }
74
 
75
+ /* Navigation Sidebar */
76
+ .sidebar {
77
+ background-color: #2c3e50;
78
+ color: #ecf0f1;
79
+ padding: 20px;
80
+ height: 100vh;
81
+ position: fixed;
82
+ top: 0;
83
+ left: 0;
84
+ width: 250px;
85
+ overflow-y: auto;
86
+ }
87
 
88
+ .sidebar a {
89
+ color: #ecf0f1;
90
+ text-decoration: none;
91
+ display: block;
92
+ padding: 10px 0;
93
+ }
94
 
95
+ .sidebar a:hover {
96
+ background-color: #34495e;
97
+ border-radius: 5px;
98
+ }
99
 
100
+ /* Main Content */
101
+ .main-content {
102
+ margin-left: 270px;
103
+ padding: 20px;
104
+ }
105
 
106
+ /* Buttons */
107
+ button {
108
+ background-color: #3498db;
109
+ color: #fff;
110
+ border: none;
111
+ padding: 10px 20px;
112
+ border-radius: 5px;
113
+ cursor: pointer;
114
+ font-size: 16px;
115
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
+ button:hover {
118
+ background-color: #2980b9;
119
+ }
120
 
121
+ /* Text Areas and Inputs */
122
+ textarea, input[type="text"] {
123
+ width: 100%;
124
+ padding: 10px;
125
+ margin: 10px 0;
126
+ border: 1px solid #ddd;
127
+ border-radius: 5px;
128
+ box-sizing: border-box;
129
+ }
130
 
131
+ textarea:focus, input[type="text"]:focus {
132
+ border-color: #3498db;
133
+ outline: none;
134
+ }
135
 
136
+ /* Terminal Output */
137
+ .code-output {
138
+ background-color: #1e1e1e;
139
+ color: #dcdcdc;
140
+ padding: 20px;
141
+ border-radius: 5px;
142
+ font-family: 'Courier New', Courier, monospace;
143
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
+ /* Chat History */
146
+ .chat-history {
147
+ background-color: #ecf0f1;
148
+ padding: 20px;
149
+ border-radius: 5px;
150
+ max-height: 300px;
151
+ overflow-y: auto;
152
+ }
153
 
154
+ .chat-message {
155
+ margin-bottom: 10px;
156
+ }
157
+
158
+ .chat-message.user {
159
+ text-align: right;
160
+ color: #3498db;
161
+ }
162
+
163
+ .chat-message.agent {
164
+ text-align: left;
165
+ color: #e74c3c;
166
+ }
167
+
168
+ /* Project Management */
169
+ .project-list {
170
+ background-color: #ecf0f1;
171
+ padding: 20px;
172
+ border-radius: 5px;
173
+ max-height: 300px;
174
+ overflow-y: auto;
175
+ }
176
+
177
+ .project-item {
178
+ margin-bottom: 10px;
179
+ }
180
+
181
+ .project-item a {
182
+ color: #3498db;
183
+ text-decoration: none;
184
+ }
185
+
186
+ .project-item a:hover {
187
+ text-decoration: underline;
188
+ }
189
+ </style>
190
+ """, unsafe_allow_html=True)
191
+
192
+
193
+ class AIAgent:
194
+ def __init__(self, name, description, skills, hf_api=None):
195
+ self.name = name
196
+ self.description = description
197
+ self.skills = skills
198
+ self._hf_api = hf_api
199
+ self._hf_token = hf_token # Store the token here
200
+
201
+ @property
202
+ def hf_api(self):
203
+ if not self._hf_api and self.has_valid_hf_token():
204
+ self._hf_api = HfApi(token=self._hf_token)
205
+ return self._hf_api
206
+
207
+ def has_valid_hf_token(self):
208
+ return bool(self._hf_token)
209
+
210
+ async def autonomous_build(self, chat_history, workspace_projects, project_name, selected_model, hf_token):
211
+ self._hf_token = hf_token
212
+ # Continuation of previous methods
213
+
214
+ def deploy_built_space_to_hf(self):
215
+ if not self._hf_api or not self._hf_token:
216
+ raise ValueError("Cannot deploy the Space since no valid Hugging Face API connection was established.")
217
+ repository_name = f"my-awesome-space_{datetime.now().timestamp()}"
218
+ files = get_built_space_files()
219
+ commit_response = self.hf_api.commit_repo(
220
+ repo_id=repository_name,
221
+ branch="main",
222
+ commits=[{"message": "Built Space Commit", "tree": tree_payload}]
223
+ )
224
+ print("Commit successful:", commit_response)
225
+ self.publish_space(repository_name)
226
+
227
+ def publish_space(self, repository_name):
228
+ publishing_response = self.hf_api.create_model_version(
229
+ model_name=repository_name,
230
+ repo_id=repository_name,
231
+ model_card={},
232
+ library_card={}
233
+ )
234
+ print("Space published:", publishing_response)
235
+
236
+ def process_input(user_input):
237
+ # Input pipeline: Tokenize and preprocess user input
238
+ input_ids = tokenizer(user_input, return_tensors="pt").input_ids
239
+ attention_mask = tokenizer(user_input, return_tensors="pt").attention_mask
240
+
241
+ # RAG model: Generate response
242
+ with torch.no_grad():
243
+ output = rag_retriever(input_ids, attention_mask=attention_mask)
244
+ response = output.generator_outputs[0].sequences[0]
245
+
246
+ # Chat model: Refine response
247
+ chat_input = tokenizer(response, return_tensors="pt")
248
+ chat_input["input_ids"] = chat_input["input_ids"].unsqueeze(0)
249
+ chat_input["attention_mask"] = chat_input["attention_mask"].unsqueeze(0)
250
+ with torch.no_grad():
251
+ chat_output = chat_model(**chat_input)
252
+ refined_response = chat_output.sequences[0]
253
+
254
+ # Output pipeline: Return final response
255
+ return refined_response
256
+
257
+ def workspace_interface(project_name):
258
+ project_path = os.path.join(PROJECT_ROOT, project_name)
259
+ if not os.path.exists(project_path):
260
+ os.makedirs(project_path)
261
+ st.session_state.workspace_projects[project_name] = {'files': []}
262
+ return f"Project '{project_name}' created successfully."
263
+ else:
264
+ return f"Project '{project_name}' already exists."
265
 
266
+ def add_code_to_workspace(project_name, code, file_name):
267
+ project_path = os.path.join(PROJECT_ROOT, project_name)
268
+ if not os.path.exists(project_path):
269
+ return f"Project '{project_name}' does not exist."
270
+
271
+ file_path = os.path.join(project_path, file_name)
272
+ with open(file_path, "w") as file:
273
+ file.write(code)
274
+ st.session_state.workspace_projects[project_name]['files'].append(file_name)
275
+ return f"Code added to '{file_name}' in project '{project_name}'."
276
+
277
+ def run_code(command, project_name=None):
278
+ if project_name:
279
+ project_path = os.path.join(PROJECT_ROOT, project_name)
280
+ result = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=project_path)
281
+ else:
282
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
283
+ return result.stdout
284
+
285
+ def display_chat_history(history):
286
+ chat_history = ""
287
+ for user_input, response in history:
288
+ chat_history += f"User: {user_input}\nAgent: {response}\n\n"
289
+ return chat_history
290
+
291
+ def display_workspace_projects(projects):
292
+ workspace_projects = ""
293
+ for project, details in projects.items():
294
+ workspace_projects += f"Project: {project}\nFiles:\n"
295
+ for file in details['files']:
296
+ workspace_projects += f" - {file}\n"
297
+ return workspace_projects
298
+
299
+ # Streamlit App
300
+ st.title("AI Agent Creator")
301
+
302
+ # Sidebar navigation
303
+ st.sidebar.title("Navigation")
304
+ app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
305
+
306
+ if app_mode == "AI Agent Creator":
307
+ # AI Agent Creator
308
+ st.header("Create an AI Agent from Text")
309
+
310
+ st.subheader("From Text")
311
+ agent_name = st.text_input("Enter agent name:")
312
+ text_input = st.text_area("Enter skills (one per line):")
313
+ if st.button("Create Agent"):
314
+ skills = text_input.split('\n')
315
+ agent = AIAgent(agent_name, "AI agent created from text input", skills)
316
+ st.success(f"Agent '{agent_name}' created and saved successfully.")
317
+ st.session_state.available_agents.append(agent_name)
318
+
319
+ elif app_mode == "Tool Box":
320
+ # Tool Box
321
+ st.header("AI-Powered Tools")
322
+
323
+ # Chat Interface
324
+ st.subheader("Chat with CodeCraft")
325
+ chat_input = st.text_area("Enter your message:")
326
+ if st.button("Send"):
327
+ response = process_input(chat_input)
328
+ st.session_state.chat_history.append((chat_input, response))
329
+ st.write(f"CodeCraft: {response}")
330
+
331
+ # Terminal Interface
332
+ st.subheader("Terminal")
333
+ terminal_input = st.text_input("Enter a command:")
334
+ if st.button("Run"):
335
+ output = run_code(terminal_input)
336
+ st.session_state.terminal_history.append((terminal_input, output))
337
+ st.code(output, language="bash")
338
+
339
+ # Project Management
340
+ st.subheader("Project Management")
341
+ project_name_input = st.text_input("Enter Project Name:")
342
+ if st.button("Create Project"):
343
+ status = workspace_interface(project_name_input)
344
+ st.write(status)
345
+
346
+ code_to_add = st.text_area("Enter Code to Add to Workspace:", height=150)
347
+ file_name_input = st.text_input("Enter File Name (e.g., 'app.py'):")
348
+ if st.button("Add Code"):
349
+ status = add_code_to_workspace(project_name_input, code_to_add, file_name_input)
350
+ st.write(status)
351
+
352
+ # Display Chat History
353
+ st.subheader("Chat History")
354
+ chat_history = display_chat_history(st.session_state.chat_history)
355
+ st.text_area("Chat History", value=chat_history, height=200)
356
+
357
+ # Display Workspace Projects
358
+ st.subheader("Workspace Projects")
359
+ workspace_projects = display_workspace_projects(st.session_state.workspace_projects)
360
+ st.text_area("Workspace Projects", value=workspace_projects, height=200)
361
+
362
+ elif app_mode == "Workspace Chat App":
363
+ # Workspace Chat App
364
+ st.header("Workspace Chat App")
365
+
366
+ # Chat Interface with AI Agents
367
+ st.subheader("Chat with AI Agents")
368
+ selected_agent = st.selectbox("Select an AI agent", st.session_state.available_agents)
369
+ agent_chat_input = st.text_area("Enter your message for the agent:")
370
+ if st.button("Send to Agent"):
371
+ response = process_input(agent_chat_input)
372
+ st.session_state.chat_history.append((agent_chat_input, response))
373
+ st.write(f"{selected_agent}: {response}")
374
+
375
+ # Code Generation
376
+ st.subheader("Code Generation")
377
+ code_idea = st.text_input("Enter your code idea:")
378
+ selected_model = st.selectbox("Select a code-generative model", AVAILABLE_CODE_GENERATIVE_MODELS)
379
+ if st.button("Generate Code"):
380
+ generated_code = run_code(code_idea)
381
+ st.code(generated_code, language="python")
382
+
383
+ # Automate Build Process
384
+ st.subheader("Automate Build Process")
385
+ if st.button("Automate"):
386
+ agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
387
+ summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects, project_name, selected_model, hf_token)
388
+ st.write("Autonomous Build Summary:")
389
+ st.write(summary)
390
+ st.write("Next Step:")
391
+ st.write(next_step)
392
+
393
+ if agent._hf_api and agent.has_valid_hf_token():
394
+ repository = agent.deploy_built_space_to_hf()
395
+ st.markdown("## Congratulations! Successfully deployed Space 🚀 ##")
396
+ st.markdown("[Check out your new Space here](hf.co/" + repository.name + ")")