code-chat-api / main.py
pvanand's picture
Update main.py
b27f6f9 verified
raw
history blame
4.47 kB
import uuid
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from langchain_core.messages import BaseMessage, HumanMessage, trim_messages
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent
from pydantic import BaseModel
from typing import Optional
import json
from sse_starlette.sse import EventSourceResponse
import io
import sys
from contextlib import redirect_stdout, redirect_stderr
from langchain_core.runnables import RunnableConfig
import requests
import uvicorn
import re
from fastapi.staticfiles import StaticFiles
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.mount("/chatui", StaticFiles(directory="static/chatui", html=True), name="index")
class CodeExecutionResult:
def __init__(self, output: str, error: str = None):
self.output = output
self.error = error
API_URL = "https://pvanand-code-execution-files-v5.hf.space"
@tool
def execute_python(code: str) -> str:
"""Execute Python code in an IPython interactiveshell and return the output.
The returned artifacts (if present) are automatically rendered in the UI and visible to the user.
Args:
code: Valid Python code with correct indentation and syntax including necessary imports.
Available Libraries:
# Use Plotly for creating visualizations
plotly
pandas
groq
yfinance
numpy
numpy
scikit-learn
statsmodels
geopandas
folium
fpdf
kaleido
scipy
geopy
mapbox
"""
#print(config)
headers = {
'accept': 'application/json',
'Content-Type': 'application/json'
}
data = {
"session_token": "test12345", #config.configurable.get("thread_id", "test"),
"code": code
}
response = requests.post(
f'{API_URL}/v0/execute',
headers=headers,
data=json.dumps(data)
)
if response.status_code != 200:
return f"Error: Request failed with status code {response.status_code}. Response: {response.text}"
else:
response_json = response.json()
return f"data: {json.dumps(response_json)} \ndata:"
# Configure the memory and model"
memory = MemorySaver()
model = ChatOpenAI(model="gpt-4o-mini", streaming=True)
def state_modifier(state) -> list[BaseMessage]:
return trim_messages(
state["messages"],
token_counter=len,
max_tokens=16000,
strategy="last",
start_on="human",
include_system=True,
allow_partial=False,
)
# Create the agent with the Python execution tool
agent = create_react_agent(
model,
tools=[execute_python],
checkpointer=memory,
state_modifier=state_modifier,
)
class ChatInput(BaseModel):
message: str
thread_id: Optional[str] = None
@app.post("/chat")
async def chat(input_data: ChatInput):
thread_id = input_data.thread_id or str(uuid.uuid4())
config = {
"configurable": {
"thread_id": thread_id
}
}
input_message = HumanMessage(content=input_data.message)
async def generate():
async for event in agent.astream_events(
{"messages": [input_message]},
config,
version="v2"
):
kind = event["event"]
if kind == "on_chat_model_stream":
content = event["data"]["chunk"].content
if content:
yield f"{json.dumps({'type': 'token', 'content': content})}\n"
elif kind == "on_tool_start":
tool_input = event['data'].get('input', '')
yield f"{json.dumps({'type': 'tool_start', 'tool': event['name'], 'input': tool_input})}\n"
elif kind == "on_tool_end":
tool_output = event['data'].get('output', '').content
yield f"{json.dumps({'type': 'tool_end', 'tool': event['name'], 'output': tool_output})}\n"
return EventSourceResponse(
generate(),
media_type="text/event-stream"
)
@app.get("/health")
async def health_check():
return {"status": "healthy"}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7860)