MistriDevLab / app.py
acecalisto3's picture
import os import subprocess import random import json from datetime import datetime from huggingface_hub import ( InferenceClient, cached_download, hf_hub_url ) import gradio as gr from safe_search import safe_search from i_search import google from i_search import i_search as i_s from agent import ( ACTION_PROMPT, ADD_PROMPT, COMPRESS_HISTORY_PROMPT, LOG_PROMPT, LOG_RESPONSE, MODIFY_PROMPT, PRE_PREFIX, SEARCH_QUERY, READ_PROMPT, TASK_PROMPT, UNDERSTAND_TEST_RESULTS_PROMPT, ) from utils import ( parse_action, parse_file_content, read_python_module_structure ) from datetime import datetime import json #--- Global Variables for App State --- app_state = {"components": []} terminal_history = "" #--- Component Library --- components_registry = { "Button": { "properties": {"label": "Click Me", "onclick": ""}, "description": "A clickable button", "code_snippet": 'gr.Button(value="{label}", variant="primary")', }, "Text Input": { "properties": {"value": "", "placeholder": "Enter text"}, "description": "A field for entering text", "code_snippet": 'gr.Textbox(label="{placeholder}")', }, "Image": { "properties": {"src": "#", "alt": "Image"}, "description": "Displays an image", "code_snippet": 'gr.Image(label="{alt}")', }, "Dropdown": { "properties": {"choices": ["Option 1", "Option 2"], "value": ""}, "description": "A dropdown menu for selecting options", "code_snippet": 'gr.Dropdown(choices={choices}, label="Dropdown")'}, # Add more components here... } #--- NLP Model (Example using Hugging Face) --- nlp_model_names = [ "google/flan-t5-small", "Qwen/CodeQwen1.5-7B-Chat-GGUF", "bartowski/Codestral-22B-v0.1-GGUF", "bartowski/AutoCoder-GGUF" ] nlp_models = [] for nlp_model_name in nlp_model_names: try: cached_download(hf_hub_url(nlp_model_name, revision="main")) nlp_models.append(InferenceClient(nlp_model_name)) except: nlp_models.append(None) #--- Function to get NLP model response --- def get_nlp_response(input_text, model_index): if nlp_models[model_index]: response = nlp_models[model_index].text_generation(input_text) return response.generated_text else: return "NLP model not available." # --- Component Class --- class Component: def __init__(self, type, properties=None, id=None): self.id = id or random.randint(1000, 9999) self.type = type self.properties = properties or components_registry[type]["properties"].copy() def to_dict(self): return { "id": self.id, "type": self.type, "properties": self.properties, } def render(self): # Properly format choices for Dropdown if self.type == "Dropdown": self.properties["choices"] = ( str(self.properties["choices"]) .replace("[", "") .replace("]", "") .replace("'", "") ) return components_registry[self.type]["code_snippet"].format( **self.properties ) # --- Function to update the app canvas (for preview) --- def update_app_canvas(): components_html = "".join( [ f"<div>Component ID: {component['id']}, Type: {component['type']}, Properties: {component['properties']}</div>" for component in app_state["components"] ] ) return components_html # --- Function to handle component addition --- def add_component(component_type): if component_type in components_registry: new_component = Component(component_type) app_state["components"].append(new_component.to_dict()) return ( update_app_canvas(), f"System: Added component: {component_type}\n", ) else: return None, f"Error: Invalid component type: {component_type}\n" # --- Function to handle terminal input --- def run_terminal_command(command, history): global terminal_history output = "" try: # Basic command parsing (expand with NLP) if command.startswith("add "): component_type = command.split("add ", 1)[1].strip() _, output = add_component(component_type) elif command.startswith("set "): _, output = set_component_property(command) elif command.startswith("search "): search_query = command.split("search ", 1)[1].strip() output = i_s(search_query) elif command.startswith("deploy "): app_name = command.split("deploy ", 1)[1].strip() output = deploy_to_huggingface(app_name) else: # Attempt to execute command as Python code try: result = subprocess.check_output( command, shell=True, stderr=subprocess.STDOUT, text=True ) output = result except Exception as e: output = f"Error executing Python code: {str(e)}" except Exception as e: output = f"Error: {str(e)}" finally: terminal_history += f"User: {command}\n" terminal_history += f"{output}\n" return terminal_history def set_component_property(command): try: # Improved 'set' command parsing set_parts = command.split(" ", 2)[1:] if len(set_parts) != 2: raise ValueError("Invalid 'set' command format.") component_id = int(set_parts[0]) # Use component ID property_name, property_value = set_parts[1].split("=", 1) # Find component by ID component_found = False for component in app_state["components"]: if component["id"] == component_id: if property_name in component["properties"]: component["properties"][ property_name.strip() ] = property_value.strip() component_found = True return ( update_app_canvas(), f"System: Property '{property_name}' set to '{property_value}' for component {component_id}\n", ) else: return ( None, f"Error: Property '{property_name}' not found in component {component_id}\n", ) if not component_found: return ( None, f"Error: Component with ID {component_id} not found.\n", ) except Exception as e: return None, f"Error: Invalid 'set' command format or error setting property: {str(e)}\n" #--- Function to handle chat interaction --- def run_chat(message, history): global terminal_history if message.startswith("!"): command = message[1:] terminal_history = run_terminal_command(command, history) else: # ... (Your regular chat response generation) model_index = 0 # Select the model to use for chat response response = get_nlp_response(message, model_index) if response: return history, terminal_history + f"User: {message}\nAssistant: {response}" else: return history, terminal_history + f"User: {message}\nAssistant: I'm sorry, I couldn't generate a response. Please try again.\n" --- Code Generation --- def generate_python_code(app_name): code = f"""import gradio as gr Define your Gradio components here with gr.Blocks() as {app_name}: """ for component in app_state["components"]: code += " " + Component(**component).render() + "\n" code += f""" {app_name}.launch() """ return code --- Hugging Face Deployment --- def deploy_to_huggingface(app_name): Generate Python code code = generate_python_code(app_name) Create requirements.txt with open("requirements.txt", "w") as f: f.write("gradio==3.32.0\n") Create the app.py file with open("app.py", "w") as f: f.write(code) Execute the deployment command try: subprocess.run( ["huggingface-cli", "repo", "create", "--type", "space", "--space_sdk", "gradio", app_name], check=True ) subprocess.run( ["git", "init"], cwd=f"./{app_name}', check=True ) subprocess.run( ["git", "add", "."], cwd=f'./{app_name}', check=True ) subprocess.run( ['git', 'commit', '-m', '"Initial commit"'], cwd=f'./{app_name}', check=True ) subprocess.run( ["git", "push", "https://huggingface.co/spaces/" + app_name, "main"], cwd=f'./{app_name}', check=True ) return ( f"Successfully deployed to Hugging Face Spaces: https://huggingface.co/spaces/{app\_name}" ) except Exception as e: return f"Error deploying to Hugging Face Spaces: {e}" --- Gradio Interface --- with gr.Blocks() as iface: --- Chat Interface --- chat_history = gr.Chatbot(label="Chat with Agent") chat_input = gr.Textbox(label="Your Message") chat_button = gr.Button("Send") chat_button.click( run_chat, inputs=[chat_input, chat_history], outputs=[chat_history, terminal_output], ) --- Terminal --- terminal_output = gr.Textbox( lines=8, label="Terminal", value=terminal_history ) terminal_input = gr.Textbox(label="Enter Command") terminal_button = gr.Button("Run") terminal_button.click( run_terminal_command, inputs=[terminal_input, terminal_output], outputs=terminal_output, ) iface.launch()
4cbab65 verified
raw
history blame
9.23 kB
import os
import subprocess
import random
import json
from datetime import datetime
from huggingface_hub import (
InferenceClient,
cached_download,
hf_hub_url
)
import gradio as gr
from safe_search import safe_search
from i_search import google
from i_search import i_search as i_s
from agent import (
ACTION_PROMPT,
ADD_PROMPT,
COMPRESS_HISTORY_PROMPT,
LOG_PROMPT,
LOG_RESPONSE,
MODIFY_PROMPT,
PRE_PREFIX,
SEARCH_QUERY,
READ_PROMPT,
TASK_PROMPT,
UNDERSTAND_TEST_RESULTS_PROMPT,
)
from utils import (
parse_action,
parse_file_content,
read_python_module_structure
)
from datetime import datetime
import json
#--- Global Variables for App State ---
app_state = {"components": []}
terminal_history = ""
#--- Component Library ---
components_registry = { "Button": { "properties": {"label": "Click Me", "onclick": ""}, "description": "A clickable button", "code_snippet": 'gr.Button(value="{label}", variant="primary")', }, "Text Input": { "properties": {"value": "", "placeholder": "Enter text"}, "description": "A field for entering text", "code_snippet": 'gr.Textbox(label="{placeholder}")', }, "Image": { "properties": {"src": "#", "alt": "Image"}, "description": "Displays an image", "code_snippet": 'gr.Image(label="{alt}")', }, "Dropdown": { "properties": {"choices": ["Option 1", "Option 2"], "value": ""}, "description": "A dropdown menu for selecting options", "code_snippet": 'gr.Dropdown(choices={choices}, label="Dropdown")'}, # Add more components here... }
#--- NLP Model (Example using Hugging Face) ---
nlp_model_names = [
"google/flan-t5-small",
"Qwen/CodeQwen1.5-7B-Chat-GGUF",
"bartowski/Codestral-22B-v0.1-GGUF",
"bartowski/AutoCoder-GGUF"
]
nlp_models = []
for nlp_model_name in nlp_model_names:
try:
cached_download(hf_hub_url(nlp_model_name, revision="main"))
nlp_models.append(InferenceClient(nlp_model_name))
except:
nlp_models.append(None)
#--- Function to get NLP model response ---
def get_nlp_response(input_text, model_index):
if nlp_models[model_index]:
response = nlp_models[model_index].text_generation(input_text)
return response.generated_text
else:
return "NLP model not available."
# --- Component Class ---
class Component:
def __init__(self, type, properties=None, id=None):
self.id = id or random.randint(1000, 9999)
self.type = type
self.properties = properties or components_registry[type]["properties"].copy()
def to_dict(self):
return {
"id": self.id,
"type": self.type,
"properties": self.properties,
}
def render(self):
# Properly format choices for Dropdown
if self.type == "Dropdown":
self.properties["choices"] = (
str(self.properties["choices"])
.replace("[", "")
.replace("]", "")
.replace("'", "")
)
return components_registry[self.type]["code_snippet"].format(
**self.properties
)
# --- Function to update the app canvas (for preview) ---
def update_app_canvas():
components_html = "".join( [ f"<div>Component ID: {component['id']}, Type: {component['type']}, Properties: {component['properties']}</div>" for component in app_state["components"] ] )
return components_html
# --- Function to handle component addition ---
def add_component(component_type):
if component_type in components_registry:
new_component = Component(component_type)
app_state["components"].append(new_component.to_dict())
return (
update_app_canvas(),
f"System: Added component: {component_type}\n",
)
else:
return None, f"Error: Invalid component type: {component_type}\n"
# --- Function to handle terminal input ---
def run_terminal_command(command, history):
global terminal_history
output = ""
try:
# Basic command parsing (expand with NLP)
if command.startswith("add "):
component_type = command.split("add ", 1)[1].strip()
_, output = add_component(component_type)
elif command.startswith("set "):
_, output = set_component_property(command)
elif command.startswith("search "):
search_query = command.split("search ", 1)[1].strip()
output = i_s(search_query)
elif command.startswith("deploy "):
app_name = command.split("deploy ", 1)[1].strip()
output = deploy_to_huggingface(app_name)
else:
# Attempt to execute command as Python code
try:
result = subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT, text=True
)
output = result
except Exception as e:
output = f"Error executing Python code: {str(e)}"
except Exception as e:
output = f"Error: {str(e)}"
finally:
terminal_history += f"User: {command}\n"
terminal_history += f"{output}\n"
return terminal_history
def set_component_property(command):
try:
# Improved 'set' command parsing
set_parts = command.split(" ", 2)[1:]
if len(set_parts) != 2:
raise ValueError("Invalid 'set' command format.")
component_id = int(set_parts[0]) # Use component ID
property_name, property_value = set_parts[1].split("=", 1)
# Find component by ID
component_found = False
for component in app_state["components"]:
if component["id"] == component_id:
if property_name in component["properties"]:
component["properties"][
property_name.strip()
] = property_value.strip()
component_found = True
return (
update_app_canvas(),
f"System: Property '{property_name}' set to '{property_value}' for component {component_id}\n",
)
else:
return (
None,
f"Error: Property '{property_name}' not found in component {component_id}\n",
)
if not component_found:
return (
None,
f"Error: Component with ID {component_id} not found.\n",
)
except Exception as e:
return None, f"Error: Invalid 'set' command format or error setting property: {str(e)}\n"
#--- Function to handle chat interaction ---
def run_chat(message, history):
global terminal_history
if message.startswith("!"):
command = message[1:]
terminal_history = run_terminal_command(command, history)
else:
# ... (Your regular chat response generation)
model_index = 0 # Select the model to use for chat response
response = get_nlp_response(message, model_index)
if response:
return history, terminal_history + f"User: {message}\nAssistant: {response}"
else:
return history, terminal_history + f"User: {message}\nAssistant: I'm sorry, I couldn't generate a response. Please try again.\n"
--- Code Generation ---
def generate_python_code(app_name):
code = f"""import gradio as gr
Define your Gradio components here
with gr.Blocks() as {app_name}: """ for component in app_state["components"]: code += " " + Component(**component).render() + "\n"
code += f"""
{app_name}.launch() """ return code
--- Hugging Face Deployment ---
def deploy_to_huggingface(app_name):
Generate Python code
code = generate_python_code(app_name)
Create requirements.txt
with open("requirements.txt", "w") as f:
f.write("gradio==3.32.0\n")
Create the app.py file
with open("app.py", "w") as f:
f.write(code)
Execute the deployment command
try:
subprocess.run(
["huggingface-cli", "repo", "create", "--type", "space", "--space_sdk", "gradio", app_name],
check=True
)
subprocess.run(
["git", "init"], cwd=f"./{app_name}', check=True
)
subprocess.run(
["git", "add", "."], cwd=f'./{app_name}', check=True
)
subprocess.run(
['git', 'commit', '-m', '"Initial commit"'], cwd=f'./{app_name}', check=True
)
subprocess.run(
["git", "push", "https://huggingface.co/spaces/" + app_name, "main"], cwd=f'./{app_name}', check=True
)
return (
f"Successfully deployed to Hugging Face Spaces: https://huggingface.co/spaces/{app\_name}"
)
except Exception as e:
return f"Error deploying to Hugging Face Spaces: {e}"
--- Gradio Interface ---
with gr.Blocks() as iface:
--- Chat Interface ---
chat_history = gr.Chatbot(label="Chat with Agent")
chat_input = gr.Textbox(label="Your Message")
chat_button = gr.Button("Send")
chat_button.click(
run_chat,
inputs=[chat_input, chat_history],
outputs=[chat_history, terminal_output],
)
--- Terminal ---
terminal_output = gr.Textbox(
lines=8, label="Terminal", value=terminal_history
)
terminal_input = gr.Textbox(label="Enter Command")
terminal_button = gr.Button("Run")
terminal_button.click(
run_terminal_command,
inputs=[terminal_input, terminal_output],
outputs=terminal_output,
)
iface.launch()