Spaces:
Sleeping
Sleeping
import os | |
import streamlit as st | |
from huggingface_hub import InferenceClient | |
import gradio as gr | |
import random | |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer | |
import subprocess | |
# --- Agent Definitions --- | |
class AIAgent: | |
def __init__(self, name, description, skills, model_name="mistralai/Mixtral-8x7B-Instruct-v0.1"): | |
self.name = name | |
self.description = description | |
self.skills = skills | |
self.model_name = model_name | |
self.client = InferenceClient(self.model_name) | |
def create_agent_prompt(self): | |
skills_str = '\n'.join([f"* {skill}" for skill in self.skills]) | |
agent_prompt = f""" | |
As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas: | |
{skills_str} | |
I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter. | |
""" | |
return agent_prompt | |
def generate_response(self, prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0): | |
formatted_prompt = self.format_prompt(prompt, history) | |
stream = self.client.text_generation(formatted_prompt, | |
temperature=temperature, | |
max_new_tokens=max_new_tokens, | |
top_p=top_p, | |
repetition_penalty=repetition_penalty, | |
do_sample=True, | |
seed=random.randint(1, 1111111111111111), | |
stream=True, | |
details=True, | |
return_full_text=False) | |
output = "" | |
for response in stream: | |
output += response.token.text | |
yield output | |
return output | |
def format_prompt(self, message, history): | |
prompt = "<s>" | |
for user_prompt, bot_response in history: | |
prompt += f"[INST] {user_prompt} [/INST]" | |
prompt += f" {bot_response}</s> " | |
prompt += f"[INST] {message} [/INST]" | |
return prompt | |
def autonomous_build(self, chat_history, workspace_projects): | |
summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history]) | |
summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()]) | |
next_step = "Based on the current state, the next logical step is to implement the main application logic." | |
return summary, next_step | |
# --- Agent Definitions --- | |
agents = { | |
"WEB_DEV": AIAgent("WEB_DEV", "Web development expert", ["HTML", "CSS", "JavaScript", "Flask", "React"]), | |
"AI_SYSTEM_PROMPT": AIAgent("AI_SYSTEM_PROMPT", "AI system prompt expert", ["Prompt Engineering", "LLM Interaction", "Fine-tuning"]), | |
"PYTHON_CODE_DEV": AIAgent("PYTHON_CODE_DEV", "Python code development expert", ["Python", "Data Structures", "Algorithms", "Libraries"]), | |
"CODE_REVIEW_ASSISTANT": AIAgent("CODE_REVIEW_ASSISTANT", "Code review assistant", ["Code Quality", "Best Practices", "Security"]), | |
"CONTENT_WRITER_EDITOR": AIAgent("CONTENT_WRITER_EDITOR", "Content writer and editor", ["Writing", "Editing", "SEO"]), | |
"QUESTION_GENERATOR": AIAgent("QUESTION_GENERATOR", "Question generator", ["Question Generation", "Knowledge Testing"]), | |
"HUGGINGFACE_FILE_DEV": AIAgent("HUGGINGFACE_FILE_DEV", "Hugging Face file development expert", ["Hugging Face Hub", "Model Training", "Dataset Creation"]), | |
} | |
# --- Streamlit UI --- | |
st.title("DevToolKit: AI-Powered Development Environment") | |
# --- Project Management --- | |
st.header("Project Management") | |
project_name = st.text_input("Enter project name:") | |
if st.button("Create Project"): | |
if project_name not in st.session_state.workspace_projects: | |
st.session_state.workspace_projects[project_name] = {'files': []} | |
st.success(f"Created project: {project_name}") | |
else: | |
st.warning(f"Project {project_name} already exists") | |
# --- Code Addition --- | |
st.subheader("Add Code to Workspace") | |
code_to_add = st.text_area("Enter code to add to workspace:") | |
file_name = st.text_input("Enter file name (e.g. 'app.py'):") | |
if st.button("Add Code"): | |
add_code_status = add_code_to_workspace(project_name, code_to_add, file_name) | |
st.success(add_code_status) | |
# --- Terminal Interface --- | |
st.subheader("Terminal (Workspace Context)") | |
terminal_input = st.text_input("Enter a command within the workspace:") | |
if st.button("Run Command"): | |
terminal_output = terminal_interface(terminal_input, project_name) | |
st.code(terminal_output, language="bash") | |
# --- Chat Interface --- | |
st.subheader("Chat with DevToolKit for Guidance") | |
chat_input = st.text_area("Enter your message for guidance:") | |
if st.button("Get Guidance"): | |
chat_response = chat_interface(chat_input) | |
st.session_state.chat_history.append((chat_input, chat_response)) | |
st.write(f"DevToolKit: {chat_response}") | |
# --- Display Chat History --- | |
st.subheader("Chat History") | |
for user_input, response in st.session_state.chat_history: | |
st.write(f"User: {user_input}") | |
st.write(f"DevToolKit: {response}") | |
# --- Display Terminal History --- | |
st.subheader("Terminal History") | |
for command, output in st.session_state.terminal_history: | |
st.write(f"Command: {command}") | |
st.code(output, language="bash") | |
# --- Display Projects and Files --- | |
st.subheader("Workspace Projects") | |
for project, details in st.session_state.workspace_projects.items(): | |
st.write(f"Project: {project}") | |
for file in details['files']: | |
st.write(f" - {file}") | |
# --- Chat with AI Agents --- | |
st.subheader("Chat with AI Agents") | |
selected_agent_name = st.selectbox("Select an AI agent", list(agents.keys())) | |
selected_agent = agents[selected_agent_name] | |
agent_chat_input = st.text_area("Enter your message for the agent:") | |
if st.button("Send to Agent"): | |
agent_chat_response = selected_agent.generate_response(agent_chat_input, st.session_state.chat_history) | |
st.session_state.chat_history.append((agent_chat_input, agent_chat_response)) | |
st.write(f"{selected_agent.name}: {agent_chat_response}") | |
# --- Automate Build Process --- | |
st.subheader("Automate Build Process") | |
if st.button("Automate"): | |
summary, next_step = selected_agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects) | |
st.write("Autonomous Build Summary:") | |
st.write(summary) | |
st.write("Next Step:") | |
st.write(next_step) | |
# --- Display current state for debugging --- | |
st.sidebar.subheader("Current State") | |
st.sidebar.json(st.session_state.current_state) | |
# --- Gradio Interface --- | |
additional_inputs = [ | |
gr.Dropdown(label="Agents", choices=list(agents.keys()), value=list(agents.keys())[0], interactive=True), | |
gr.Textbox(label="System Prompt", max_lines=1, interactive=True), | |
gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"), | |
gr.Slider(label="Max new tokens", value=1048*10, minimum=0, maximum=1000*10, step=64, interactive=True, info="The maximum numbers of new tokens"), | |
gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"), | |
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens"), | |
] | |
examples = [ | |
["Create a simple web application using Flask", "WEB_DEV", None, None, None, None, ], | |
["Generate a Python script to perform a linear regression analysis", "PYTHON_CODE_DEV", None, None, None, None, ], | |
["Create a Dockerfile for a Node.js application", "AI_SYSTEM_PROMPT", None, None, None, None, ], | |
# Add more examples as needed | |
] | |
gr.ChatInterface( | |
fn=generate, | |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), | |
additional_inputs=additional_inputs, | |
title="DevToolKit AI Assistant", | |
examples=examples, | |
concurrency_limit=20, | |
).launch(show_api=True) | |
# --- Helper Functions (Moved to separate file) --- | |
def generate(prompt, history, agent_name, sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0): | |
# ... (Implementation in utils.py) | |
def chat_interface(chat_input): | |
# ... (Implementation in utils.py) | |
def chat_interface_with_agent(chat_input, agent_name): | |
# ... (Implementation in utils.py) | |
def terminal_interface(command, project_name): | |
# ... (Implementation in utils.py) | |
def add_code_to_workspace(project_name, code, file_name): | |
# ... (Implementation in utils.py) | |
2. requirements.txt (Dependencies) | |
streamlit | |
huggingface_hub | |
gradio | |
transformers | |
subprocess | |
3. utils.py (Helper Functions) | |
import os | |
import subprocess | |
import streamlit as st | |
def generate(prompt, history, agent_name, sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0): | |
seed = random.randint(1, 1111111111111111) | |
agent = agents[agent_name] | |
system_prompt = agent.create_agent_prompt() if sys_prompt is None else sys_prompt | |
generate_kwargs = dict( | |
temperature=float(temperature), | |
max_new_tokens=max_new_tokens, | |
top_p=top_p, | |
repetition_penalty=repetition_penalty, | |
do_sample=True, | |
seed=seed, | |
) | |
formatted_prompt = agent.format_prompt(f"{system_prompt}, {prompt}", history) | |
stream = agent.client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
output = "" | |
for response in stream: | |
output += response.token.text | |
yield output | |
return output | |
def chat_interface(chat_input): | |
response = generate(chat_input, st.session_state.chat_history) | |
return response | |
def chat_interface_with_agent(chat_input, agent_name): | |
agent_prompt = agents[agent_name].create_agent_prompt() | |
response = generate(chat_input, st.session_state.chat_history, agent_name=agent_name, sys_prompt=agent_prompt) | |
return response | |
def terminal_interface(command, project_name): | |
try: | |
result = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=project_name) | |
return result.stdout if result.returncode == 0 else result.stderr | |
except Exception as e: | |
return str(e) | |
def add_code_to_workspace(project_name, code, file_name): | |
project_path = os.path.join(os.getcwd(), project_name) | |
if not os.path.exists(project_path): | |
os.makedirs(project_path) | |
file_path = os.path.join(project_path, file_name) | |
with open(file_path, 'w') as file: | |
file.write(code) | |
if project_name not in st.session_state.workspace_projects: | |
st.session_state.workspace_projects[project_name] = {'files': []} | |
st.session_state.workspace_projects[project_name]['files'].append(file_name) | |
return f"Added {file_name} to {project_name}" |