Spaces:
Sleeping
Sleeping
from langchain_core.prompts import PromptTemplate | |
from langchain_core.runnables import RunnableSequence | |
from huggingface_hub import HuggingFaceHub, InferenceApi as InferenceClient | |
from langchain_community.llms import HuggingFaceEndpoint | |
from streamlit import StreamlitApp, write, text_input, text_area, button, session_state, write as st_write | |
import os | |
import time | |
# Load LLM | |
llm = HuggingFaceEndpoint(repo_id="tiiuae/falcon-7b-instruct", model_kwargs={"temperature": 0.1, "max_new_tokens": 500}) | |
class Agent: | |
def __init__(self, name: str, agent_type: str, complexity: int): | |
self.name: str = name | |
self.agent_type: str = agent_type | |
self.complexity: int = complexity | |
self.tools: List[Tool] = [] | |
def add_tool(self, tool: Tool): | |
self.tools.append(tool) | |
def __str__(self): | |
return f"{self.name} ({self.agent_type}) - Complexity: {self.complexity}" | |
class Tool: | |
def __init__(self, name: str, tool_type: str): | |
self.name: str = name | |
self.tool_type: str = tool_type | |
def __str__(self): | |
return f"{self.name} ({self.tool_type})" | |
class Pypelyne: | |
def __init__(self): | |
self.agents: List[Agent] = [] | |
self.tools: List[Tool] = [] | |
self.history: str = "" | |
self.task: str = "" | |
self.purpose: str = "" | |
self.directory: str = "" | |
self.task_queue: list = [] | |
def add_agent(self, agent: Agent): | |
self.agents.append(agent) | |
def add_tool(self, tool: Tool): | |
self.tools.append(tool) | |
def generate_chat_app(self) -> str: | |
time.sleep(2) | |
return f"Chat app generated with {len(self.agents)} agents and {len(self.tools)} tools." | |
def run_gpt( | |
self, | |
prompt_template: PromptTemplate, | |
stop_tokens: List[str], | |
max_tokens: int, | |
**prompt_kwargs, | |
) -> str: | |
content = f"""{PREFIX} | |
{prompt_template.format(**prompt_kwargs)}""" | |
if VERBOSE: | |
print(LOG_PROMPT.format(content)) | |
try: | |
stream = llm.predict(content) | |
resp = "".join(stream) | |
except Exception as e: | |
print(f"Error in run_gpt: {e}") | |
resp = f"Error: {e}" | |
if VERBOSE: | |
print(LOG_RESPONSE.format(resp)) | |
return resp | |
def compress_history(self): | |
resp = self.run_gpt( | |
COMPRESS_HISTORY_PROMPT, | |
stop_tokens=["observation:", "task:", "action:", "thought:"], | |
max_tokens=512, | |
task=self.task, | |
history=self.history, | |
) | |
self.history = f"observation: {resp}\n" | |
def run_action(self, action_name: str, action_input: Union[str, List[str]], tools: List[Tool] = None) -> str: | |
if action_name == "COMPLETE": | |
return "Task completed." | |
if len(self.history.split("\n")) > MAX_HISTORY: | |
self.compress_history() | |
if action_name not in self.task_queue: | |
self.task_queue.append(action_name) | |
task_function = getattr(self, f"call_{action_name.lower()}") | |
result = task_function(action_input, tools) | |
self.task_queue.pop(0) | |
return result | |
def call_main(self, action_input: List[str]) -> str: | |
resp = self.run_gpt( | |
f"{ACTION_PROMPT}", | |
stop_tokens=["observation:", "task:"], | |
max_tokens=256, | |
task=self.task, | |
history=self.history, | |
actions=action_input, | |
) | |
lines = resp.strip().strip("\n").split("\n") | |
for line in lines: | |
if line == "": | |
continue | |
if line.startswith("thought: "): | |
self.history += f"{line}\n" | |
action_name, action_input = parse_action(line) | |
self.run_action(action_name, action_input) | |
return "No valid action found." | |
def call_set_task(self, action_input: str) -> str: | |
self.task = action_input | |
return f"Task updated: {self.task}" | |
def call_modify(self, action_input: str, agent: Agent) -> str: | |
with open(action_input, "r") as file: | |
file_content = file.read() | |
resp = self.run_gpt( | |
f"{MODIFY_PROMPT}", | |
stop_tokens=["action:", "thought:", "observation:"], | |
max_tokens=2048, | |
task=self.task, | |
history=self.history, | |
file_path=action_input, | |
file_contents=file_content, | |
agent=agent, | |
) | |
new_contents = resp.strip() | |
with open(action_input, "w") as file: | |
file.write(new_contents) | |
self.history += f"observation: file successfully modified\n" | |
return f"File modified: {action_input}" | |
def call_read(self, action_input: str) -> str: | |
with open(action_input, "r") as file: | |
file_content = file.read() | |
self.history += f"observation: {file_content}\n" | |
return file_content | |
def call_add(self, action_input: str) -> str: | |
if not os.path.exists(self.directory): | |
os.makedirs(self.directory) | |
with open(os.path.join(self.directory, action_input), "w") as file: | |
file.write("") | |
self.history += f"observation: file created: {action_input}\n" | |
return f"File created: {action_input}" | |
def call_test(self, action_input: str) -> str: | |
result = subprocess.run(["python", os.path.join(self.directory, action_input)], capture_output=True, text=True) | |
error_message = result.stderr.strip() | |
self.history += f"observation: tests {('passed' if error_message == '' else 'failed')}\n" | |
return f"Tests {'passed' if error_message == '' else 'failed'}: {error_message}" | |
# Global Pypelyne Instance | |
pypelyne = Pypelyne() | |
# Helper Functions | |
def create_agent(name: str, agent_type: str, complexity: int) -> Agent: | |
agent = Agent(name, agent_type, complexity) | |
pypelyne.add_agent(agent) | |
return agent | |
def create_tool(name: str, tool_type: str) -> Tool: | |
tool = Tool(name, tool_type) | |
pypelyne.add_tool(tool) | |
return tool | |
# Streamlit App Code | |
def main(): | |
st.title("🧠 Pypelyne: Your AI-Powered Coding Assistant") | |
# Settings | |
st.sidebar.title("⚙️ Settings") | |
directory = st.sidebar.text_input( | |
"Project Directory:", value=pypelyne.directory, help="Path to your coding project" | |
) | |
pypelyne.directory = directory | |
purpose = st.sidebar.text_area( | |
"Project Purpose:", | |
value=pypelyne.purpose, | |
help="Describe the purpose of your coding project.", | |
) | |
pypelyne.purpose = purpose | |
# Agent and Tool Management | |
st.sidebar.header("🤖 Agents") | |
agents = st.sidebar.column(2) | |
tools = st.sidebar.column(1) | |
for agent in pypelyne.agents: | |
agents.write(f"- {agent}") | |
if st.sidebar.button("Create New Agent"): | |
agent_name = st.sidebar.text_input("Agent Name:") | |
agent_type = st.sidebar.selectbox("Agent Type:", ["Task Executor", "Information Retriever", "Decision Maker", "Data Analyzer"]) | |
agent_complexity = st.sidebar.slider("Complexity (1-5):", 1, 5, 3) | |
new_agent = create_agent(agent_name, agent_type, agent_complexity) | |
pypelyne.agents = pypelyne.agents + [new_agent] | |
st.sidebar.header("🛠️ Tools") | |
for tool in pypelyne.tools: | |
tools.write(f"- {tool}") | |
if st.sidebar.button("Create New Tool"): | |
tool_name = st.sidebar.text_input("Tool Name:") | |
tool_type = st.sidebar.selectbox("Tool Type:", ["Web Scraper", "Database Connector", "API Caller", "File Handler", "Text Processor"]) | |
new_tool = create_tool(tool_name, tool_type) | |
pypelyne.tools = pypelyne.tools + [new_tool] | |
# Main Content Area | |
st.header("💻 Code Interaction") | |
task = st.text_area( | |
"🎯 Task:", | |
value=pypelyne.task, | |
help="Describe the coding task you want to perform.", | |
) | |
if task: | |
pypelyne.task = task | |
user_input = st.text_input("💬 Your Input:") | |
if st.button("Execute"): | |
if user_input: | |
response = pypelyne.run_action("main", [user_input]) | |
st.write("Pypelyne Says: ", response) | |
if __name__ == "__main__": | |
main() |