Spaces:
Sleeping
Sleeping
import os | |
import subprocess | |
import random | |
from huggingface_hub import InferenceClient | |
import gradio as gr | |
from safe_search import safe_search | |
from i_search import google | |
from i_search import i_search as i_s | |
from datetime import datetime | |
import logging | |
import json | |
now = datetime.now() | |
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S") | |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") | |
# --- Set up logging --- | |
logging.basicConfig( | |
filename="app.log", # Name of the log file | |
level=logging.INFO, # Set the logging level (INFO, DEBUG, etc.) | |
format="%(asctime)s - %(levelname)s - %(message)s", | |
) | |
agents = [ | |
"WEB_DEV", | |
"AI_SYSTEM_PROMPT", | |
"PYTHON_CODE_DEV" | |
] | |
VERBOSE = True | |
MAX_HISTORY = 5 | |
PREFIX = """ | |
{date_time_str} | |
Purpose: {purpose} | |
Safe Search: {safe_search} | |
""" | |
LOG_PROMPT = """ | |
PROMPT: {content} | |
""" | |
LOG_RESPONSE = """ | |
RESPONSE: {resp} | |
""" | |
COMPRESS_HISTORY_PROMPT = """ | |
You are a helpful AI assistant. Your task is to compress the following history into a summary that is no longer than 512 tokens. | |
History: | |
{history} | |
""" | |
ACTION_PROMPT = """ | |
You are a helpful AI assistant. You are working on the task: {task} | |
Your current history is: | |
{history} | |
What is your next thought? | |
thought: | |
What is your next action? | |
action: | |
""" | |
TASK_PROMPT = """ | |
You are a helpful AI assistant. Your current history is: | |
{history} | |
What is the next task? | |
task: | |
""" | |
UNDERSTAND_TEST_RESULTS_PROMPT = """ | |
You are a helpful AI assistant. The test results are: | |
{test_results} | |
What do you want to know about the test results? | |
thought: | |
""" | |
def format_prompt(message, history, max_history_turns=2): | |
prompt = " " | |
# Keep only the last 'max_history_turns' turns | |
for user_prompt, bot_response in history[-max_history_turns:]: | |
prompt += f"[INST] {user_prompt} [/INST] {bot_response} " | |
prompt += f"[INST] {message} [/INST] " | |
return prompt | |
def run_gpt( | |
prompt_template, | |
stop_tokens, | |
max_tokens, | |
purpose, | |
**prompt_kwargs, | |
): | |
seed = random.randint(1, 1111111111111111) | |
logging.info(f"Seed: {seed}") # Log the seed | |
content = PREFIX.format( | |
date_time_str=date_time_str, | |
purpose=purpose, | |
safe_search=safe_search, | |
) + prompt_template.format(**prompt_kwargs) | |
if VERBOSE: | |
logging.info(LOG_PROMPT.format(content)) # Log the prompt | |
resp = client.text_generation(content, max_new_tokens=max_tokens, stop_sequences=stop_tokens, temperature=0.7, top_p=0.8, repetition_penalty=1.5) | |
if VERBOSE: | |
logging.info(LOG_RESPONSE.format(resp)) # Log the response | |
return resp | |
def generate( | |
prompt, | |
history, | |
agent_name=agents[0], | |
sys_prompt="", | |
temperature=0.7, | |
max_new_tokens=2048, | |
top_p=0.8, | |
repetition_penalty=1.5, | |
): | |
content = PREFIX.format( | |
date_time_str=date_time_str, | |
purpose=purpose, | |
safe_search=safe_search, | |
) + prompt | |
if VERBOSE: | |
logging.info(LOG_PROMPT.format(content)) # Log the prompt | |
stream = client.text_generation(content, stream=True, details=True, return_full_text=False, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty, max_new_tokens=max_new_tokens) | |
resp = "" | |
for response in stream: | |
resp += response.token.text | |
if VERBOSE: | |
logging.info(LOG_RESPONSE.format(resp)) # Log the response | |
return resp | |
def compress_history(purpose, task, history, directory): | |
resp = run_gpt( | |
COMPRESS_HISTORY_PROMPT, | |
stop_tokens=["observation:", "task:", "action:", "thought:"], | |
max_tokens=512, | |
purpose=purpose, | |
task=task, | |
history=history, | |
) | |
history = "observation: {}\n".format(resp) | |
return history | |
def call_search(purpose, task, history, directory, action_input): | |
logging.info(f"CALLING SEARCH: {action_input}") | |
try: | |
if "http" in action_input: | |
action_input = action_input.strip("<>").strip() | |
response = i_s(action_input) | |
logging.info(f"Search Result: {response}") | |
history += "observation: search result is: {}\n".format(response) | |
else: | |
history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n" | |
except Exception as e: | |
history += "observation: {}\n".format(e) | |
return "MAIN", None, history, task | |
def call_main(purpose, task, history, directory, action_input): | |
logging.info(f"CALLING MAIN: {action_input}") | |
resp = run_gpt( | |
ACTION_PROMPT, | |
stop_tokens=["observation:", "task:", "action:", "thought:"], | |
max_tokens=32000, | |
purpose=purpose, | |
task=task, | |
history=history, | |
) | |
lines = resp.strip().split("\n") | |
for line in lines: | |
if line == "": | |
continue | |
if line.startswith("thought: "): | |
history += "{}\n".format(line) | |
logging.info(f"Thought: {line}") | |
elif line.startswith("action: "): | |
action_name, action_input = parse_action(line) | |
logging.info(f"Action: {action_name} - {action_input}") | |
history += "{}\n".format(line) | |
if "COMPLETE" in action_name or "COMPLETE" in action_input: | |
task = "END" | |
return action_name, action_input, history, task | |
else: | |
return action_name, action_input, history, task | |
else: | |
history += "{}\n".format(line) | |
logging.info(f"Other Output: {line}") | |
return "MAIN", None, history, task | |
def call_set_task(purpose, task, history, directory, action_input): | |
logging.info(f"CALLING SET_TASK: {action_input}") | |
task = run_gpt( | |
TASK_PROMPT, | |
stop_tokens=[], | |
max_tokens=64, | |
purpose=purpose, | |
task=task, | |
history=history, | |
).strip("\n") | |
history += "observation: task has been updated to: {}\n".format(task) | |
return "MAIN", None, history, task | |
def end_fn(purpose, task, history, directory, action_input): | |
logging.info(f"CALLING END_FN: {action_input}") | |
task = "END" | |
return "COMPLETE", "COMPLETE", history, task | |
NAME_TO_FUNC = { | |
"MAIN": call_main, | |
"UPDATE-TASK": call_set_task, | |
"SEARCH": call_search, | |
"COMPLETE": end_fn, | |
} | |
def run_action(purpose, task, history, directory, action_name, action_input): | |
logging.info(f"RUNNING ACTION: {action_name} - {action_input}") | |
try: | |
if "RESPONSE" in action_name or "COMPLETE" in action_name: | |
action_name = "COMPLETE" | |
task = "END" | |
return action_name, "COMPLETE", history, task | |
# compress the history when it is long | |
if len(history.split("\n")) > MAX_HISTORY: | |
logging.info("COMPRESSING HISTORY") | |
history = compress_history(purpose, task, history, directory) | |
if action_name not in NAME_TO_FUNC: | |
action_name = "MAIN" | |
if action_name == "" or action_name is None: | |
action_name = "MAIN" | |
assert action_name in NAME_TO_FUNC | |
logging.info(f"RUN: {action_name} - {action_input}") | |
return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input) | |
except Exception as e: | |
history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n" | |
logging.error(f"Error in run_action: {e}") | |
return "MAIN", None, history, task | |
def run(purpose, history): | |
task = None | |
directory = "./" | |
if history: | |
history = str(history).strip("[]") | |
if not history: | |
history = "" | |
action_name = "UPDATE-TASK" if task is None else "MAIN" | |
action_input = None | |
while True: | |
logging.info(f"---") | |
logging.info(f"Purpose: {purpose}") | |
logging.info(f"Task: {task}") | |
logging.info(f"---") | |
logging.info(f"History: {history}") | |
logging.info(f"---") | |
action_name, action_input, history, task = run_action( | |
purpose, | |
task, | |
history, | |
directory, | |
action_name, | |
action_input, | |
) | |
yield (history) | |
if task == "END": | |
return (history) | |
def parse_action(line): | |
"""Parse the action line to get the action name and input.""" | |
parts = line.split(":", 1) | |
if len(parts) == 2: | |
action_name = parts[0].replace("action", "").strip() | |
action_input = parts[1].strip() | |
else: | |
action_name = parts[0].replace("action", "").strip() | |
action_input = "" | |
return action_name, action_input | |
def main(): | |
with gr.Blocks() as demo: | |
gr.Markdown("## FragMixt") | |
gr.Markdown("### Agents w/ Agents") | |
# Chat Interface | |
chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel") | |
# Input Components | |
message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!") | |
purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?") | |
agent_name = gr.Dropdown(label="Agents", choices=[s for s in agents], value=agents[0], interactive=True) | |
sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True) | |
temperature = gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs") | |
max_new_tokens = gr.Slider(label="Max new tokens", value=1048*10, minimum=0, maximum=1048*10, step=64, interactive=True, info="The maximum numbers of new tokens") | |
top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens") | |
repetition_penalty = gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens") | |
# Button to submit the message | |
submit_button = gr.Button(value="Send") | |
# Project Explorer Tab | |
with gr.Tab("Project Explorer"): | |
project_path = gr.Textbox(label="Project Path", placeholder="/home/user/app/current_project") | |
explore_button = gr.Button(value="Explore") | |
project_output = gr.Textbox(label="File Tree", lines=20) | |
# Chat App Logic Tab | |
with gr.Tab("Chat App"): | |
history = gr.State([]) | |
examples = [ | |
["What is the purpose of this AI agent?", "I am designed to assist with no-code development tasks."], | |
["Can you help me generate a Python function to calculate the factorial of a number?", "Sure! Here is a Python function to calculate the factorial of a number:"], | |
] | |
def chat(purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history): | |
prompt = format_prompt(message, history) | |
response = generate(prompt, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty) | |
history.append((message, response)) | |
return history, history | |
submit_button.click(chat, inputs=[purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history], outputs=[chatbot, history]) | |
demo.launch() | |
if __name__ == "__main__": | |
main() |