File size: 17,044 Bytes
675c9d3
 
 
bd6c875
 
 
3dfa6ba
bd6c875
675c9d3
b3ef9b6
 
 
675c9d3
 
b3ef9b6
 
 
 
 
 
 
 
 
 
675c9d3
 
 
b3ef9b6
 
675c9d3
 
 
b3ef9b6
 
 
 
 
 
 
 
 
bd6c875
 
 
 
b3ef9b6
bd6c875
 
 
 
 
 
 
 
 
 
b3ef9b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
647ecd3
bd6c875
 
b3ef9b6
 
 
bd6c875
b3ef9b6
 
 
675c9d3
 
b3ef9b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bd6c875
 
b3ef9b6
bd6c875
 
 
 
 
 
b3ef9b6
 
bd6c875
b3ef9b6
 
 
bd6c875
b3ef9b6
647ecd3
b3ef9b6
 
 
 
647ecd3
3a881df
b3ef9b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
647ecd3
b3ef9b6
 
 
 
 
 
 
 
 
647ecd3
b3ef9b6
647ecd3
b3ef9b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
647ecd3
b3ef9b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
647ecd3
b3ef9b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
647ecd3
b3ef9b6
647ecd3
b3ef9b6
 
 
 
 
 
 
 
 
647ecd3
b3ef9b6
 
 
 
 
 
 
 
 
 
647ecd3
b3ef9b6
 
 
 
647ecd3
b3ef9b6
 
 
 
647ecd3
b3ef9b6
 
 
647ecd3
675c9d3
b3ef9b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
675c9d3
b3ef9b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
647ecd3
 
 
 
b3ef9b6
0c59574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
import os
import subprocess
import random
import time
from typing import Dict, List, Tuple
from datetime import datetime
import logging

import gradio as gr
from huggingface_hub import InferenceClient, cached_download
from safe_search import safe_search
from i_search import google, i_search as i_s

# --- Configuration ---
VERBOSE = True  # Enable verbose logging
MAX_HISTORY = 5  # Maximum history turns to keep
MAX_TOKENS = 2048  # Maximum tokens for LLM responses
TEMPERATURE = 0.7  # Temperature for LLM responses
TOP_P = 0.8  # Top-p (nucleus sampling) for LLM responses
REPETITION_PENALTY = 1.5  # Repetition penalty for LLM responses
MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1"  # Name of the LLM model

import os
API_KEY = os.getenv("HUGGINGFACE_API_KEY")  # Ensure you set the HUGGINGFACE_API_KEY environment variable

# --- Logging Setup ---
logging.basicConfig(
    filename="app.log",  # Name of the log file
    level=logging.INFO,  # Set the logging level (INFO, DEBUG, etc.)
    format="%(asctime)s - %(levelname)s - %(message)s",
)

# --- Agents ---
agents = [
    "WEB_DEV",
    "AI_SYSTEM_PROMPT",
    "PYTHON_CODE_DEV",
    "DATA_SCIENCE",
    "UI_UX_DESIGN",
]

# --- Prompts ---
PREFIX = """
{date_time_str}
Purpose: {purpose}
Safe Search: {safe_search}
"""

LOG_PROMPT = """
PROMPT: {content}
"""

LOG_RESPONSE = """
RESPONSE: {resp}
"""

COMPRESS_HISTORY_PROMPT = """
You are a helpful AI assistant. Your task is to compress the following history into a summary that is no longer than 512 tokens.
History:
{history}
"""

ACTION_PROMPT = """
You are a helpful AI assistant. You are working on the task: {task}
Your current history is:
{history}
What is your next thought?
thought: 
What is your next action?
action: 
"""

TASK_PROMPT = """
You are a helpful AI assistant. Your current history is:
{history}
What is the next task?
task: 
"""

UNDERSTAND_TEST_RESULTS_PROMPT = """
You are a helpful AI assistant. The test results are:
{test_results}
What do you want to know about the test results?
thought: 
"""

# --- Functions ---
def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 2) -> str:
    """Formats the prompt for the LLM, including the message and relevant history."""
    prompt = " "
    # Keep only the last 'max_history_turns' turns
    for user_prompt, bot_response in history[-max_history_turns:]:
        prompt += f"[INST] {user_prompt} [/  "
        prompt += f" {bot_response}"
    prompt += f"[INST] {message} [/  "
    return prompt

def run_llm(
    prompt_template: str,
    stop_tokens: List[str],
    purpose: str,
    **prompt_kwargs: Dict
) -> str:
    """Runs the LLM with the given prompt and parameters."""
    seed = random.randint(1, 1111111111111111)
    logging.info(f"Seed: {seed}")  # Log the seed

    content = PREFIX.format(
        date_time_str=date_time_str,
        purpose=purpose,
        safe_search=safe_search,
    ) + prompt_template.format(**prompt_kwargs)
    if VERBOSE:
        logging.info(LOG_PROMPT.format(content))  # Log the prompt

    resp = client.text_generation(content, max_new_tokens=MAX_TOKENS, stop_sequences=stop_tokens, temperature=TEMPERATURE, top_p=TOP_P, repetition_penalty=REPETITION_PENALTY)
    if VERBOSE:
        logging.info(LOG_RESPONSE.format(resp))  # Log the response
    return resp

def generate(
    prompt: str,
    history: List[Tuple[str, str]],
    agent_name: str = agents[0],
    sys_prompt: str = "",
    temperature: float = TEMPERATURE,
    max_new_tokens: int = MAX_TOKENS,
    top_p: float = TOP_P,
    repetition_penalty: float = REPETITION_PENALTY,
) -> str:
    """Generates text using the LLM."""
    content = PREFIX.format(
        date_time_str=date_time_str,
        purpose=purpose,
        safe_search=safe_search,
    ) + prompt_template.format(**prompt_kwargs)
    if VERBOSE:
        logging.info(LOG_PROMPT.format(content))  # Log the prompt

    stream = client.text_generation(content, stream=True, details=True, return_full_text=False, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty, max_new_tokens=max_new_tokens)
    resp = ""
    for response in stream:
        resp += response.token.text

    if VERBOSE:
        logging.info(LOG_RESPONSE.format(resp))  # Log the response
    return resp

def compress_history(purpose: str, task: str, history: List[Tuple[str, str]], directory: str) -> str:
    """Compresses the history into a shorter summary."""
    resp = run_llm(
        COMPRESS_HISTORY_PROMPT,
        stop_tokens=["observation:", "task:", "action:", "thought:"],
        purpose=purpose,
        task=task,
        history="\n".join(f"[INST] {user_prompt} [/] {bot_response}" for user_prompt, bot_response in history),
    )
    history = "observation: {}\n".format(resp)
    return history

def call_search(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
    """Performs a search based on the action input."""
    logging.info(f"CALLING SEARCH: {action_input}")
    try:
        if "http" in action_input:
            if "<" in action_input:
                action_input = action_input.strip("<")
            if ">" in action_input:
                action_input = action_input.strip(">")
            
            response = i_s(action_input)
            logging.info(f"Search Result: {response}")
            history.append(("observation: search result is: {}".format(response), ""))
        else:
            history.append(("observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n", ""))
    except Exception as e:
        history.append(("observation: {}\n".format(e), ""))
    return "MAIN", None, history, task

def call_main(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
    """Handles the main agent interaction loop."""
    logging.info(f"CALLING MAIN: {action_input}")
    resp = run_llm(
        ACTION_PROMPT,
        stop_tokens=["observation:", "task:", "action:", "thought:"],
        purpose=purpose,
        task=task,
        history="\n".join(f"[INST] {user_prompt} [/] {bot_response}" for user_prompt, bot_response in history),
    )
    lines = resp.strip().strip("\n").split("\n")
    for line in lines:
        if line == "":
            continue
        if line.startswith("thought: "):
            history.append((line, ""))
            logging.info(f"Thought: {line}")
        elif line.startswith("action: "):
            action_name, action_input = parse_action(line)
            logging.info(f"Action: {action_name} - {action_input}")
            history.append((line, ""))
            if "COMPLETE" in action_name or "COMPLETE" in action_input:
                task = "END"
                return action_name, action_input, history, task
            else:
                return action_name, action_input, history, task
        else:
            history.append((line, ""))
            logging.info(f"Other Output: {line}")
    return "MAIN", None, history, task

def call_set_task(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
    """Sets a new task for the agent."""
    logging.info(f"CALLING SET_TASK: {action_input}")
    task = run_llm(
        TASK_PROMPT,
        stop_tokens=[],
        purpose=purpose,
        task=task,
        history="\n".join(f"[INST] {user_prompt} [/] {bot_response}" for user_prompt, bot_response in history),
    ).strip("\n")
    history.append(("observation: task has been updated to: {}".format(task), ""))
    return "MAIN", None, history, task

def end_fn(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
    """Ends the agent interaction."""
    logging.info(f"CALLING END_FN: {action_input}")
    task = "END"
    return "COMPLETE", "COMPLETE", history, task

NAME_TO_FUNC: Dict[str, callable] = {
    "MAIN": call_main,
    "UPDATE-TASK": call_set_task,
    "SEARCH": call_search,
    "COMPLETE": end_fn,
}

def run_action(purpose: str, task: str, history: List[Tuple[str, str]], directory: str, action_name: str, action_input: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
    """Executes the specified action."""
    logging.info(f"RUNNING ACTION: {action_name} - {action_input}")
    try:
        if "RESPONSE" in action_name or "COMPLETE" in action_name:
            action_name = "COMPLETE"
            task = "END"
            return action_name, "COMPLETE", history, task
    
        # compress the history when it is long
        if len(history) > MAX_HISTORY:
            logging.info("COMPRESSING HISTORY")
            history = compress_history(purpose, task, history, directory)
        if not action_name in NAME_TO_FUNC:
            action_name = "MAIN"
        if action_name == "" or action_name is None:
            action_name = "MAIN"
        assert action_name in NAME_TO_FUNC
    
        logging.info(f"RUN: {action_name} - {action_input}")
        return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
    except Exception as e:
        history.append(("observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n", ""))
        logging.error(f"Error in run_action: {e}")
        return "MAIN", None, history, task

def run(purpose: str, history: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
    """Main agent interaction loop."""
    task = None
    directory = "./"
    if history:
        history = str(history).strip("[]")
    if not history:
        history = []
    
    action_name = "UPDATE-TASK" if task is None else "MAIN"
    action_input = None
    while True:
        logging.info(f"---")
        logging.info(f"Purpose: {purpose}")
        logging.info(f"Task: {task}")
        logging.info(f"---")
        logging.info(f"History: {history}")
        logging.info(f"---")

        action_name, action_input, history, task = run_action(
            purpose,
            task,
            history,
            directory,
            action_name,
            action_input,
        )
        yield (history)
        if task == "END":
            return (history)

################################################

def format_prompt(message: str, history: List[Tuple[str, str]], max_history_turns: int = 5) -> str:
    """Formats the prompt for the LLM, including the message and relevant history."""
    prompt = " "
    # Keep only the last 'max_history_turns' turns
    for user_prompt, bot_response in history[-max_history_turns:]:
        prompt += f"[INST] {user_prompt} [/  "
        prompt += f" {bot_response}"
    prompt += f"[INST] {message} [/  "
    return prompt

def parse_action(line: str) -> Tuple[str, str]:
    """Parses the action line to get the action name and input."""
    parts = line.split(":", 1)
    if len(parts) == 2:
        action_name = parts[0].replace("action", "").strip()
        action_input = parts[1].strip()
    else:
        action_name = parts[0].replace("action", "").strip()
        action_input = ""
    return action_name, action_input

def main():
    """Main function to run the Gradio interface."""
    global client
    # Initialize the LLM client with your API key
    try:
        client = InferenceClient(
            MODEL_NAME,
            token=API_KEY  # Replace with your actual API key
        )
    except Exception as e:
        logging.error(f"Error initializing LLM client: {e}")
        print("Error initializing LLM client. Please check your API key.")
        return

    with gr.Blocks() as demo:
        gr.Markdown("## FragMixt: The No-Code Development Powerhouse")
        gr.Markdown("###  Your AI-Powered Development Companion")

        # Chat Interface
        chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
        
        # Input Components
        message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
        purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
        agent_name = gr.Dropdown(label="Agents", choices=[s for s in agents], value=agents[0], interactive=True)
        sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
        temperature = gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
        max_new_tokens = gr.Slider(label="Max new tokens", value=MAX_TOKENS, minimum=0, maximum=1048*10, step=64, interactive=True, info="The maximum numbers of new tokens")
        top_p = gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
        repetition_penalty = gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")

        # Button to submit the message
        submit_button = gr.Button(value="Send")

        # Project Explorer Tab
        with gr.Tab("Project Explorer"):
            project_path = gr.Textbox(label="Project Path", placeholder="/home/user/app/current_project")
            explore_button = gr.Button(value="Explore")
            project_output = gr.Textbox(label="File Tree", lines=20)

        # Chat App Logic Tab
        with gr.Tab("Chat App"):
            history = gr.State([])
            examples = [
                ["What is the purpose of this AI agent?", "I am designed to assist with no-code development tasks."],
                ["Can you help me generate a Python function to calculate the factorial of a number?", "Sure! Here is a Python function to calculate the factorial of a number:"],
                ["Generate a simple HTML page with a heading and a paragraph.", "```html\n<!DOCTYPE html>\n<html>\n<head>\n<title>My Simple Page</title>\n</head>\n<body>\n<h1>Welcome to my page!</h1>\n<p>This is a simple paragraph.</p>\n</body>\n</html>\n```"],
                ["Create a basic SQL query to select all data from a table named 'users'.", "```sql\nSELECT * FROM users;\n```"],
                ["Design a user interface for a mobile app that allows users to track their daily expenses.", "Here's a basic UI design for a mobile expense tracker app:\n\n**Screen 1: Home**\n- Top: App Name and Balance Display\n- Middle: List of Recent Transactions (Date, Description, Amount)\n- Bottom: Buttons for Add Expense, Add Income, View Categories\n\n**Screen 2: Add Expense**\n- Input fields for Date, Category, Description, Amount\n- Buttons for Save, Cancel\n\n**Screen 3: Expense Categories**\n- List of expense categories (e.g., Food, Transportation, Entertainment)\n- Option to add/edit categories\n\n**Screen 4: Reports**\n- Charts and graphs to visualize spending by category, date range, etc.\n- Filters to customize the reports"],
            ]

        def chat(purpose: str, message: str, agent_name: str, sys_prompt: str, temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
            """Handles the chat interaction."""
            prompt = format_prompt(message, history)
            response = generate(prompt, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
            history.append((message, response))
            return history, history

        submit_button.click(chat, inputs=[purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history], outputs=[chatbot, history])

        # Project Explorer Logic
        def explore_project(project_path: str) -> str:
            """Explores the project directory and returns a file tree."""
            try:
                tree = subprocess.check_output(["tree", project_path]).decode("utf-8")
                return tree
            except Exception as e:
                return f"Error exploring project: {e}"

        explore_button.click(explore_project, inputs=[project_path], outputs=[project_output])

    demo.launch()

if __name__ == "__main__":
    main()


from flask import Flask, request, jsonify

app = Flask(__name__)

@app.route('/terminal', methods=['POST'])
def terminal():
    command = request.json.get('command')
    if not command:
        return jsonify({'error': 'No command provided'}), 400

    try:
        result = subprocess.run(command, shell=True, capture_output=True, text=True)
        return jsonify({'output': result.stdout, 'error': result.stderr})
    except Exception as e:
        return jsonify({'error': str(e)}), 500

if __name__ == '__main__':
    app.run(port=5000)