File size: 7,263 Bytes
fcbf0a3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f47e966
45c01a3
 
 
5a63667
447844a
f47e966
fcbf0a3
5a63667
fcbf0a3
 
 
 
 
 
 
 
 
f47e966
 
fcbf0a3
 
 
 
 
9c08ccc
fcbf0a3
e0763be
45c01a3
fb8eac0
fcbf0a3
b27f6f9
fcbf0a3
 
a974aa2
 
fcbf0a3
 
 
 
 
 
a974aa2
fcbf0a3
 
e0763be
 
2f9be62
e0763be
 
 
 
 
 
 
 
 
 
 
fcbf0a3
e0763be
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f5726b3
 
37ef0d4
 
f5726b3
9bba688
f5726b3
 
fcbf0a3
9bba688
 
fcbf0a3
91bdf41
45c01a3
fb8eac0
122b2c0
f5726b3
27ca0a0
45c01a3
 
fcbf0a3
 
45c01a3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fcbf0a3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e6ba032
ef408d7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b062dd3
e0763be
 
5dc7f33
ef408d7
 
 
 
fcbf0a3
 
 
 
 
 
69c11c4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
import uuid
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from langchain_core.messages import BaseMessage, HumanMessage, trim_messages
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent
from pydantic import BaseModel
from typing import Optional
import json
from sse_starlette.sse import EventSourceResponse
import io
import sys
from contextlib import redirect_stdout, redirect_stderr
from langchain_core.runnables import RunnableConfig
import requests
import uvicorn
import re
from fastapi.staticfiles import StaticFiles
from langchain_core.runnables import RunnableConfig
from langchain_core.prompts import ChatPromptTemplate
from datetime import datetime
from presentation_api import router as presentation_router
from yf_docs import yf_docs

app = FastAPI()
app.include_router(presentation_router)

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

app.mount("/chatui", StaticFiles(directory="static/chatui", html=True), name="index")

class CodeExecutionResult:
    def __init__(self, output: str, error: str = None):
        self.output = output
        self.error = error

API_URL = "https://vps-91587096.vps.ovh.us"

@tool(response_format="content_and_artifact")
def execute_python(code: str, config: RunnableConfig):
    """Execute Python code in an jupyter notebook and return the output. The returned artifacts (if present) are automatically rendered in the UI and visible to the user. Available Libraries: plotly (default charting library),pandas,yfinance,numpy,geopandas,folium
    Args:
        code: Valid Python code with correct indentation and syntax including necessary imports.
    """

    thread_config = config.get("configurable", {})
    session_token = thread_config.get("thread_id", "test")

    headers = {
        'accept': 'application/json',
        'Content-Type': 'application/json'
    }
    data = {
        "session_token": session_token,
        "code": code
    }
    try:
        response = requests.post(
            f'{API_URL}/v0/execute',
            headers=headers,
            json=data
        )
        
        if response.status_code != 200:
            return (
                f"Error: Request failed with status code {response.status_code}. Response: {response.text}",
                None
            )
        
        # Get the response JSON
        response_json = response.json()

        # extract artifacts if they exist 
        artifacts_data = response_json.get("artifacts_data", {})

        # Create a clean response without artifacts
        execution_response = {
            "status": response_json.get("status"),
            "text": response_json.get("text"),
            "error_message": response_json.get("error_message"),
            "artifacts": response_json.get("artifacts")
        }
        
        return (
            f"Execution completed successfully: {json.dumps(execution_response)}",
            {"artifacts_data": artifacts_data} if artifacts_data else None
        )
        
    except Exception as e:
        return (f"Error executing code: {str(e)}", None)

save_yf_prompt = """
# Save downloaded data
tickers = yf.Tickers('MSFT AAPL')
data = tickers.history(period='6mo')
data.to_pickle('stock_data.pkl')

# Load it back later
data = pd.read_pickle('stock_data.pkl')

"""

memory = MemorySaver()
model = ChatOpenAI(model="gpt-4o", streaming=True)
prompt = ChatPromptTemplate.from_messages([
    ("system", f"You are a Data Visualization assistant.You have access to an jupyter notebook with access to internet for python code execution.\
    Your taks is to assist users with your data analysis and visualization expertise. Use only Plotly for creating visualizations and charts (Matplotlib is not available). Generated artifacts\
    are automatically rendered in the UI. Variables do not persist across tool calls, hence save any data to current directory you want to use in the next tool call to a file.(use file_name and do not add file path, as you have only permission to edit current folder) {save_yf_prompt} Today's date is \
    {datetime.now().strftime('%Y-%m-%d')}. Format you responses Beutifully using markdown tables, paragraphs, lists etc. Saved files are not accessible to user.The current folder contains the following files: {{collection_files}} {yf_docs}"),
    ("placeholder", "{messages}"),
])

def state_modifier(state) -> list[BaseMessage]:
    collection_files = "None"
    try:
        formatted_prompt = prompt.invoke({
            "collection_files": collection_files,
            "messages": state["messages"]
        })
        print(state["messages"])
        return trim_messages(
            formatted_prompt,
            token_counter=len,
            max_tokens=16000,
            strategy="last",
            start_on="human",
            include_system=True,
            allow_partial=False,
        )
        
    except Exception as e:
        print(f"Error in state modifier: {str(e)}")
        return state["messages"]

# Create the agent with the Python execution tool
agent = create_react_agent(
    model,
    tools=[execute_python],
    checkpointer=memory,
    state_modifier=state_modifier,
)

class ChatInput(BaseModel):
    message: str
    thread_id: Optional[str] = None

@app.post("/chat")
async def chat(input_data: ChatInput):
    thread_id = input_data.thread_id or str(uuid.uuid4())
    
    config = {
        "configurable": {
            "thread_id": thread_id
        }
    }
    
    input_message = HumanMessage(content=input_data.message)
    
    async def generate():
        async for event in agent.astream_events(
            {"messages": [input_message]}, 
            config,
            version="v2"
        ):
            kind = event["event"]
            
            if kind == "on_chat_model_stream":
                content = event["data"]["chunk"].content
                if content:
                    yield f"{json.dumps({'type': 'token', 'content': content})}\n"

            elif kind == "on_tool_start":
                tool_input = event['data'].get('input', '')
                yield f"{json.dumps({'type': 'tool_start', 'tool': event['name'], 'input': tool_input})}\n"
            
            elif kind == "on_tool_end":
                tool_output = event['data'].get('output', '').content
                artifact_output = event['data'].get('output', '').artifact.get('artifacts_data') if event['data'].get('output', '').artifact else None
                yield f"{json.dumps({'type': 'tool_end', 'tool': event['name'], 'output': tool_output, 'artifacts_data': artifact_output})}\n"
                #print(f"{json.dumps({'type': 'tool_end', 'tool': event['name'], 'output': tool_output, 'artifacts_data': artifact_output})}\n")
    return EventSourceResponse(
        generate(),
        media_type="text/event-stream"
    )

@app.get("/health")
async def health_check():
    return {"status": "healthy"}

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=7860)