Spaces:
Sleeping
Sleeping
import os | |
import sys | |
import subprocess | |
import streamlit as st | |
from huggingface_hub import InferenceClient | |
import gradio as gr | |
import random | |
import prompts | |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer | |
import black | |
from pylint import lint | |
from io import StringIO | |
# Initialize the InferenceClient for Mixtral-8x7B-Instruct-v0.1 | |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") | |
# Initialize the pipeline for Llama-3-8B-Instruct-Coder-GGUF | |
pipe = pipeline("text-generation", model="bartowski/Llama-3-8B-Instruct-Coder-GGUF") | |
HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit" | |
PROJECT_ROOT = "projects" | |
AGENT_DIRECTORY = "agents" | |
# Global state management | |
if 'chat_history' not in st.session_state: | |
st.session_state.chat_history = [] | |
if 'terminal_history' not in st.session_state: | |
st.session_state.terminal_history = [] | |
if 'workspace_projects' not in st.session_state: | |
st.session_state.workspace_projects = {} | |
if 'available_agents' not in st.session_state: | |
st.session_state.available_agents = [] | |
if 'current_state' not in st.session_state: | |
st.session_state.current_state = { | |
'toolbox': {}, | |
'workspace_chat': {} | |
} | |
# Define the agents | |
agents = [ | |
"WEB_DEV", | |
"AI_SYSTEM_PROMPT", | |
"PYTHON_CODE_DEV", | |
"CODE_REVIEW_ASSISTANT", | |
"CONTENT_WRITER_EDITOR", | |
"QUESTION_GENERATOR", | |
"HUGGINGFACE_FILE_DEV", | |
] | |
class AIAgent: | |
def __init__(self, name, description, skills): | |
self.name = name | |
self.description = description | |
self.skills = skills | |
def create_agent_prompt(self): | |
skills_str = '\n'.join([f"* {skill}" for skill in self.skills]) | |
agent_prompt = f""" | |
As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas: | |
{skills_str} | |
I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter. | |
""" | |
return agent_prompt | |
def autonomous_build(self, chat_history, workspace_projects): | |
summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history]) | |
summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()]) | |
next_step = "Based on the current state, the next logical step is to implement the main application logic." | |
return summary, next_step | |
def format_prompt(message, history): | |
prompt = "<s>" | |
for user_prompt, bot_response in history: | |
prompt += f"[INST] {user_prompt} [/INST]" | |
prompt += f" {bot_response}</s> " | |
prompt += f"[INST] {message} [/INST]" | |
return prompt | |
def generate(prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0): | |
seed = random.randint(1, 1111111111111111) | |
agent = getattr(prompts, agent_name, prompts.WEB_DEV_SYSTEM_PROMPT) | |
system_prompt = agent | |
generate_kwargs = dict( | |
temperature=float(temperature), | |
max_new_tokens=max_new_tokens, | |
top_p=float(top_p), | |
repetition_penalty=repetition_penalty, | |
do_sample=True, | |
seed=seed, | |
) | |
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history) | |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
output = "" | |
for response in stream: | |
output += response.token.text | |
yield output | |
return output | |
def chat_interface(chat_input): | |
response = generate(chat_input, st.session_state.chat_history) | |
return response | |
def chat_interface_with_agent(chat_input, agent_name): | |
agent_prompt = getattr(prompts, agent_name, prompts.WEB_DEV_SYSTEM_PROMPT) | |
response = generate(chat_input, st.session_state.chat_history, agent_name=agent_name, sys_prompt=agent_prompt) | |
return response | |
def terminal_interface(command, project_name): | |
# Implement terminal functionality here | |
return f"Executed command: {command} in project: {project_name}" | |
def add_code_to_workspace(project_name, code, file_name): | |
if project_name not in st.session_state.workspace_projects: | |
st.session_state.workspace_projects[project_name] = {'files': []} | |
st.session_state.workspace_projects[project_name]['files'].append(file_name) | |
return f"Added {file_name} to {project_name}" | |
# Streamlit UI | |
st.title("DevToolKit: AI-Powered Development Environment") | |
# Project Management | |
st.header("Project Management") | |
project_name = st.text_input("Enter project name:") | |
if st.button("Create Project"): | |
if project_name not in st.session_state.workspace_projects: | |
st.session_state.workspace_projects[project_name] = {'files': []} | |
st.success(f"Created project: {project_name}") | |
else: | |
st.warning(f"Project {project_name} already exists") | |
# Code Addition | |
st.subheader("Add Code to Workspace") | |
code_to_add = st.text_area("Enter code to add to workspace:") | |
file_name = st.text_input("Enter file name (e.g. 'app.py'):") | |
if st.button("Add Code"): | |
add_code_status = add_code_to_workspace(project_name, code_to_add, file_name) | |
st.success(add_code_status) | |
# Terminal Interface | |
st.subheader("Terminal (Workspace Context)") | |
terminal_input = st.text_input("Enter a command within the workspace:") | |
if st.button("Run Command"): | |
terminal_output = terminal_interface(terminal_input, project_name) | |
st.code(terminal_output, language="bash") | |
# Chat Interface | |
st.subheader("Chat with DevToolKit for Guidance") | |
chat_input = st.text_area("Enter your message for guidance:") | |
if st.button("Get Guidance"): | |
chat_response = chat_interface(chat_input) | |
st.session_state.chat_history.append((chat_input, chat_response)) | |
st.write(f"DevToolKit: {chat_response}") | |
# Display Chat History | |
st.subheader("Chat History") | |
for user_input, response in st.session_state.chat_history: | |
st.write(f"User: {user_input}") | |
st.write(f"DevToolKit: {response}") | |
# Display Terminal History | |
st.subheader("Terminal History") | |
for command, output in st.session_state.terminal_history: | |
st.write(f"Command: {command}") | |
st.code(output, language="bash") | |
# Display Projects and Files | |
st.subheader("Workspace Projects") | |
for project, details in st.session_state.workspace_projects.items(): | |
st.write(f"Project: {project}") | |
for file in details['files']: | |
st.write(f" - {file}") | |
# Chat with AI Agents | |
st.subheader("Chat with AI Agents") | |
selected_agent = st.selectbox("Select an AI agent", agents) | |
agent_chat_input = st.text_area("Enter your message for the agent:") | |
if st.button("Send to Agent"): | |
agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent) | |
st.session_state.chat_history.append((agent_chat_input, agent_chat_response)) | |
st.write(f"{selected_agent}: {agent_chat_response}") | |
# Automate Build Process | |
st.subheader("Automate Build Process") | |
if st.button("Automate"): | |
agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now | |
summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects) | |
st.write("Autonomous Build Summary:") | |
st.write(summary) | |
st.write("Next Step:") | |
st.write(next_step) | |
# Display current state for debugging | |
st.sidebar.subheader("Current State") | |
st.sidebar.json(st.session_state.current_state) | |
# Gradio Interface | |
additional_inputs = [ | |
gr.Dropdown(label="Agents", choices=[s for s in agents], value=agents[0], interactive=True), | |
gr.Textbox(label="System Prompt", max_lines=1, interactive=True), | |
gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"), | |
gr.Slider(label="Max new tokens", value=1048*10, minimum=0, maximum=1000*10, step=64, interactive=True, info="The maximum numbers of new tokens"), | |
gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"), | |
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens"), | |
] | |
examples = [ | |
["Create a simple web application using Flask", agents[0], None, None, None, None, ], | |
["Generate a Python script to perform a linear regression analysis", agents[2], None, None, None, None, ], | |
["Create a Dockerfile for a Node.js application", agents[1], None, None, None, None, ], | |
# Add more examples as needed | |
] | |
gr.ChatInterface( | |
fn=generate, | |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), | |
additional_inputs=additional_inputs, | |
title="DevToolKit AI Assistant", | |
examples=examples, | |
concurrency_limit=20, | |
).launch(show_api=True) |