File size: 10,951 Bytes
d63b979
6ef3b1d
 
4b169a2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6ef3b1d
 
4b169a2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6ef3b1d
 
4b169a2
6ef3b1d
 
 
 
 
 
 
4b169a2
 
 
 
 
6ef3b1d
4b169a2
 
 
 
 
 
6ef3b1d
4b169a2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6ef3b1d
 
4b169a2
 
 
 
 
7dfabc2
f64f25f
7dfabc2
4b169a2
7dfabc2
4b169a2
 
7dfabc2
 
4b169a2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
import os
from huggingface_hub import InferenceClient
import gradio as gr
import random
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import subprocess
import threading
import time
import json
import streamlit as st

# Initialize the session state
if 'current_state' not in st.session_state:
    st.session_state.current_state = None
# Initialize the InferenceClient for Mixtral-8x7B-Instruct-v0.1
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")

# Load the model and tokenizer from a different repository
model_name = "bigscience/bloom-1b7"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# Define the agents
agents = {
    "WEB_DEV": {
        "description": "Expert in web development technologies and frameworks.",
        "skills": ["HTML", "CSS", "JavaScript", "React", "Vue.js", "Flask", "Django", "Node.js", "Express.js"],
        "system_prompt": "You are a web development expert. Your goal is to assist the user in building and deploying web applications. Provide code snippets, explanations, and guidance on best practices.",
    },
    "AI_SYSTEM_PROMPT": {
        "description": "Expert in designing and implementing AI systems.",
        "skills": ["Machine Learning", "Deep Learning", "Natural Language Processing", "Computer Vision", "Reinforcement Learning"],
        "system_prompt": "You are an AI system expert. Your goal is to assist the user in designing and implementing AI systems. Provide code snippets, explanations, and guidance on best practices.",
    },
    "PYTHON_CODE_DEV": {
        "description": "Expert in Python programming and development.",
        "skills": ["Python", "Data Structures", "Algorithms", "Object-Oriented Programming", "Functional Programming"],
        "system_prompt": "You are a Python code development expert. Your goal is to assist the user in writing and debugging Python code. Provide code snippets, explanations, and guidance on best practices.",
    },
    "CODE_REVIEW_ASSISTANT": {
        "description": "Expert in code review and quality assurance.",
        "skills": ["Code Style", "Best Practices", "Security", "Performance", "Maintainability"],
        "system_prompt": "You are a code review assistant. Your goal is to assist the user in reviewing code for quality and efficiency. Provide feedback on code style, best practices, security, performance, and maintainability.",
    },
    "CONTENT_WRITER_EDITOR": {
        "description": "Expert in content writing and editing.",
        "skills": ["Grammar", "Style", "Clarity", "Conciseness", "SEO"],
        "system_prompt": "You are a content writer and editor. Your goal is to assist the user in creating high-quality content. Provide suggestions on grammar, style, clarity, conciseness, and SEO.",
    },
    "QUESTION_GENERATOR": {
        "description": "Expert in generating questions for learning and assessment.",
        "skills": ["Question Types", "Cognitive Levels", "Assessment Design"],
        "system_prompt": "You are a question generator. Your goal is to assist the user in generating questions for learning and assessment. Provide questions that are relevant to the topic and aligned with the cognitive levels.",
    },
    "HUGGINGFACE_FILE_DEV": {
        "description": "Expert in developing Hugging Face files for machine learning models.",
        "skills": ["Transformers", "Datasets", "Model Training", "Model Deployment"],
        "system_prompt": "You are a Hugging Face file development expert. Your goal is to assist the user in creating and deploying Hugging Face files for machine learning models. Provide code snippets, explanations, and guidance on best practices.",
    },
}

class AIAgent:
    def __init__(self, name, description, skills, system_prompt):
        self.name = name
        self.description = description
        self.skills = skills
        self.system_prompt = system_prompt
        self.active = False

    def activate(self):
        self.active = True

    def deactivate(self):
        self.active = False

    def create_agent_prompt(self):
        skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
        agent_prompt = f"""
As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
{skills_str}
I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
"""
        return agent_prompt

    def autonomous_build(self, chat_history, workspace_projects):
        summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
        summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
        next_step = "Based on the current state, the next logical step is to implement the main application logic."
        return summary, next_step

def format_prompt(message, history, agent_prompt):
    prompt = "<s>"
    for user_prompt, bot_response in history:
        prompt += f"[INST] {user_prompt} [/INST]"
        prompt += f" {bot_response}</s> "
    prompt += f"[INST] {agent_prompt}, {message} [/INST]"
    return prompt

def generate(prompt, history, agent_name, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
    seed = random.randint(1, 1111111111111111)
    agent = agents[agent_name]
    system_prompt = agent["system_prompt"]

    generate_kwargs = dict(
        temperature=float(temperature),
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=seed,
    )

    formatted_prompt = format_prompt(prompt, history, system_prompt)
    input_ids = tokenizer.encode(formatted_prompt, return_tensors="pt")
    output = model.generate(input_ids, **generate_kwargs)
    response = tokenizer.decode(output[0], skip_special_tokens=True)
    return response

def chat_interface(chat_input, agent_name):
    if agents[agent_name].active:
        response = generate(chat_input, st.session_state.chat_history, agent_name)
        return response
    else:
        return "Agent is not active. Please activate the agent."

def terminal_interface(command, project_name):
    try:
        result = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=project_name)
        return result.stdout if result.returncode == 0 else result.stderr
    except Exception as e:
        return str(e)

def add_code_to_workspace(project_name, code, file_name):
    project_path = os.path.join(os.getcwd(), project_name)
    if not os.path.exists(project_path):
        os.makedirs(project_path)
    file_path = os.path.join(project_path, file_name)
    with open(file_path, 'w') as file:
        file.write(code)
    if project_name not in st.session_state.workspace_projects:
        st.session_state.workspace_projects[project_name] = {'files': []}
    st.session_state.workspace_projects[project_name]['files'].append(file_name)
    return f"Added {file_name} to {project_name}"

# Streamlit UI
st.title("DevToolKit: AI-Powered Development Environment")

# Project Management
st.header("Project Management")
project_name = st.text_input("Enter project name:")
if st.button("Create Project"):
    if project_name not in st.session_state.workspace_projects:
        st.session_state.workspace_projects[project_name] = {'files': []}
        st.success(f"Created project: {project_name}")
    else:
        st.warning(f"Project {project_name} already exists")

# Code Addition
st.subheader("Add Code to Workspace")
code_to_add = st.text_area("Enter code to add to workspace:")
file_name = st.text_input("Enter file name (e.g. 'app.py'):")
if st.button("Add Code"):
    add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
    st.success(add_code_status)

# Terminal Interface
st.subheader("Terminal (Workspace Context)")
terminal_input = st.text_input("Enter a command within the workspace:")
if st.button("Run Command"):
    terminal_output = terminal_interface(terminal_input, project_name)
    st.code(terminal_output, language="bash")

# Chat Interface
st.subheader("Chat with AI Agents")
selected_agent = st.selectbox("Select an AI agent", list(agents.keys()))
agent_chat_input = st.text_area("Enter your message for the agent:")
if st.button("Send to Agent"):
    agent_chat_response = chat_interface(agent_chat_input, selected_agent)
    st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
    st.write(f"{selected_agent}: {agent_chat_response}")

# Agent Control
st.subheader("Agent Control")
for agent_name in agents:
    agent = agents[agent_name]
    with st.expander(f"{agent_name} ({agent['description']})"):
        if st.button(f"Activate {agent_name}"):
            agent.activate()
            st.success(f"{agent_name} activated.")
        if st.button(f"Deactivate {agent_name}"):
            agent.deactivate()
            st.success(f"{agent_name} deactivated.")

# Automate Build Process
st.subheader("Automate Build Process")
if st.button("Automate"):
    # Select the appropriate agent based on the current context
    # ...
    # Implement the autonomous build process
    # ...
    pass

# Display current state for debugging
st.sidebar.subheader("Current State")
st.sidebar.json(st.session_state.current_state)

# Gradio Interface
additional_inputs = [
    gr.Dropdown(label="Agents", choices=[s for s in agents.keys()], value=list(agents.keys())[0], interactive=True),
    gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
    gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
    gr.Slider(label="Max new tokens", value=1048*10, minimum=0, maximum=1000*10, step=64, interactive=True, info="The maximum numbers of new tokens"),
    gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
    gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens"),
]

examples = [
    ["Create a simple web application using Flask", "WEB_DEV"],
    ["Generate a Python script to perform a linear regression analysis", "PYTHON_CODE_DEV"],
    ["Create a Dockerfile for a Node.js application", "AI_SYSTEM_PROMPT"],
    # Add more examples as needed
]

gr.ChatInterface(
    fn=chat_interface,
    chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
    additional_inputs=additional_inputs,
    title="DevToolKit AI Assistant",
    examples=examples,
    concurrency_limit=20,
).launch(show_api=True)