Spaces:
Sleeping
Sleeping
import os | |
import subprocess | |
import random | |
import time | |
from typing import Dict, List, Tuple | |
from datetime import datetime | |
import logging | |
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
from huggingface_hub import InferenceClient, cached_download | |
# --- Configuration --- | |
VERBOSE = True # Enable verbose logging | |
MAX_HISTORY = 5 # Maximum history turns to keep | |
MAX_TOKENS = 2048 # Maximum tokens for LLM responses | |
TEMPERATURE = 0.7 # Temperature for LLM responses | |
TOP_P = 0.8 # Top-p (nucleus sampling) for LLM responses | |
REPETITION_PENALTY = 1.5 # Repetition penalty for LLM responses | |
MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1" # Name of the LLM model | |
API_KEY = "YOUR_API_KEY" # Replace with your actual Hugging Face API key | |
# --- Logging Setup --- | |
logging.basicConfig( | |
filename="app.log", | |
level=logging.INFO, | |
format="%(asctime)s - %(levelname)s - %(message)s", | |
) | |
# --- Agents --- | |
agents = [ | |
"WEB_DEV", | |
"AI_SYSTEM_PROMPT", | |
"PYTHON_CODE_DEV", | |
"DATA_SCIENCE", | |
"UI_UX_DESIGN", | |
] | |
# --- Prompts --- | |
PREFIX = """ | |
{date_time_str} | |
Purpose: {purpose} | |
Agent: {agent_name} | |
""" | |
LOG_PROMPT = """ | |
PROMPT: {content} | |
""" | |
LOG_RESPONSE = """ | |
RESPONSE: {resp} | |
""" | |
# --- Functions --- | |
def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 2) -> str: | |
prompt = "" | |
for user_prompt, bot_response in history[-max_history_turns:]: | |
prompt += f"Human: {user_prompt}\nAssistant: {bot_response}\n" | |
prompt += f"Human: {message}\nAssistant:" | |
return prompt | |
def generate( | |
prompt: str, | |
history: List[Tuple[str, str]], | |
agent_name: str = agents[0], | |
sys_prompt: str = "", | |
temperature: float = TEMPERATURE, | |
max_new_tokens: int = MAX_TOKENS, | |
top_p: float = TOP_P, | |
repetition_penalty: float = REPETITION_PENALTY, | |
) -> str: | |
# Load model and tokenizer | |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME) | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
# Create a text generation pipeline | |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
# Prepare the full prompt | |
date_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
full_prompt = PREFIX.format( | |
date_time_str=date_time_str, | |
purpose=sys_prompt, | |
agent_name=agent_name | |
) + format_prompt(prompt, history) | |
if VERBOSE: | |
logging.info(LOG_PROMPT.format(content=full_prompt)) | |
# Generate response | |
response = generator( | |
full_prompt, | |
max_new_tokens=max_new_tokens, | |
temperature=temperature, | |
top_p=top_p, | |
repetition_penalty=repetition_penalty, | |
do_sample=True | |
)[0]['generated_text'] | |
# Extract the assistant's response | |
assistant_response = response.split("Assistant:")[-1].strip() | |
if VERBOSE: | |
logging.info(LOG_RESPONSE.format(resp=assistant_response)) | |
return assistant_response | |
def main(): | |
with gr.Blocks() as demo: | |
gr.Markdown("## FragMixt: The No-Code Development Powerhouse") | |
gr.Markdown("### Your AI-Powered Development Companion") | |
# Chat Interface | |
chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel") | |
# Input Components | |
message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!") | |
purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?") | |
agent_name = gr.Dropdown(label="Agents", choices=[s for s in agents], value=agents[0], interactive=True) | |
sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True) | |
temperature = gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs") | |
max_new_tokens = gr.Slider(label="Max new tokens", value=MAX_TOKENS, minimum=0, maximum=1048*10, step=64, interactive=True, info="The maximum numbers of new tokens") | |
top_p = gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens") | |
repetition_penalty = gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens") | |
# Button to submit the message | |
submit_button = gr.Button(value="Send") | |
# Project Explorer Tab | |
with gr.Tab("Project Explorer"): | |
project_path = gr.Textbox(label="Project Path", placeholder="/home/user/app/current_project") | |
explore_button = gr.Button(value="Explore") | |
project_output = gr.Textbox(label="File Tree", lines=20) | |
# Chat App Logic Tab | |
with gr.Tab("Chat App"): | |
history = gr.State([]) | |
examples = [ | |
["What is the purpose of this AI agent?", "I am designed to assist with no-code development tasks."], | |
["Can you help me generate a Python function to calculate the factorial of a number?", "Sure! Here is a Python function to calculate the factorial of a number:"], | |
["Generate a simple HTML page with a heading and a paragraph.", "```html\n<!DOCTYPE html>\n<html>\n<head>\n<title>My Simple Page</title>\n</head>\n<body>\n<h1>Welcome to my page!</h1>\n<p>This is a simple paragraph.</p>\n</body>\n</html>\n```"], | |
["Create a basic SQL query to select all data from a table named 'users'.", "```sql\nSELECT * FROM users;\n```"], | |
["Design a user interface for a mobile app that allows users to track their daily expenses.", "Here's a basic UI design for a mobile expense tracker app:\n\n**Screen 1: Home**\n- Top: App Name and Balance Display\n- Middle: List of Recent Transactions (Date, Description, Amount)\n- Bottom: Buttons for Add Expense, Add Income, View Categories\n\n**Screen 2: Add Expense**\n- Input fields for Date, Category, Description, Amount\n- Buttons for Save, Cancel\n\n**Screen 3: Expense Categories**\n- List of expense categories (e.g., Food, Transportation, Entertainment)\n- Option to add/edit categories\n\n**Screen 4: Reports**\n- Charts and graphs to visualize spending by category, date range, etc.\n- Filters to customize the reports"], | |
] | |
def chat(purpose: str, message: str, agent_name: str, sys_prompt: str, temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]: | |
"""Handles the chat interaction.""" | |
response = generate(message, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty) | |
history.append((message, response)) | |
return history, history | |
submit_button.click(chat, inputs=[purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history], outputs=[chatbot, history]) | |
# Project Explorer Logic | |
def explore_project(project_path: str) -> str: | |
"""Explores the project directory and returns a file tree.""" | |
try: | |
tree = subprocess.check_output(["tree", project_path]).decode("utf-8") | |
return tree | |
except Exception as e: | |
return f"Error exploring project: {e}" | |
explore_button.click(explore_project, inputs=[project_path], outputs=[project_output]) | |
demo.launch() | |
if __name__ == "__main__": | |
main() | |
``` |