EstateGuru / backup /test2.py
pathakDev10's picture
performance
d54731f
import uuid
import threading
import asyncio
import json
import re
import random
import time
import pickle
import numpy as np
import requests # For llama.cpp server calls
from datetime import datetime
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from langgraph.graph import StateGraph, START, END
import faiss
from sentence_transformers import SentenceTransformer
from tools import extract_json_from_response, apply_filters_partial, rule_based_extract, format_property_data, estateKeywords
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.tools import tool
from langchain_core.callbacks import StreamingStdOutCallbackHandler, CallbackManager
from langchain_core.callbacks.base import BaseCallbackHandler
# ------------------------ Model Inference Wrapper ------------------------
class ChatQwen:
"""
A chat wrapper for Qwen using llama.cpp.
This class can work in two modes:
- Local: Using a llama-cpp-python binding (gguf model file loaded locally).
- Server: Calling a remote llama.cpp server endpoint.
"""
def __init__(
self,
temperature=0.3,
streaming=False,
max_new_tokens=512,
callbacks=None,
use_server=False,
model_path: str = None,
server_url: str = None
):
self.temperature = temperature
self.streaming = streaming
self.max_new_tokens = max_new_tokens
self.callbacks = callbacks
self.use_server = use_server
if self.use_server:
# Use remote llama.cpp server – provide its URL.
self.server_url = server_url or "http://localhost:8000"
else:
# For local inference, a model_path must be provided.
if not model_path:
raise ValueError("Local mode requires a valid model_path to the gguf file.")
from llama_cpp import Llama # assumes llama-cpp-python is installed
self.model = Llama(
model_path=model_path,
temperature=self.temperature,
n_ctx=512,
n_threads=4 # Adjust as needed
)
def build_prompt(self, messages: list) -> str:
"""Build Qwen-compatible prompt with special tokens."""
prompt = ""
for msg in messages:
role = msg["role"]
content = msg["content"]
if role == "system":
prompt += f"<|im_start|>system\n{content}<|im_end|>\n"
elif role == "user":
prompt += f"<|im_start|>user\n{content}<|im_end|>\n"
elif role == "assistant":
prompt += f"<|im_start|>assistant\n{content}<|im_end|>\n"
prompt += "<|im_start|>assistant\n"
return prompt
# def generate_text(self, messages: list) -> str:
# prompt = self.build_prompt(messages)
# if self.use_server:
# payload = {
# "prompt": prompt,
# "max_tokens": self.max_new_tokens,
# "temperature": self.temperature,
# "stream": self.streaming
# }
# if self.streaming:
# response = requests.post(f"{self.server_url}/generate", json=payload, stream=True)
# generated_text = ""
# for line in response.iter_lines():
# if line:
# token = line.decode("utf-8")
# generated_text += token
# if self.callbacks:
# for callback in self.callbacks:
# callback.on_llm_new_token(token)
# return generated_text
# else:
# response = requests.post(f"{self.server_url}/generate", json=payload)
# return response.json().get("generated_text", "")
# else:
# # Local llama.cpp inference using llama-cpp-python.
# if self.streaming:
# stream = self.model(
# prompt=prompt,
# max_tokens=self.max_new_tokens,
# stream=True
# )
# generated_text = ""
# for token in stream:
# # If token is a dict, extract text from token["choices"][0]["text"]
# if isinstance(token, dict):
# if "choices" in token and token["choices"]:
# token_text = token["choices"][0].get("text", "")
# else:
# token_text = str(token)
# else:
# token_text = token
# generated_text += token_text
# if self.callbacks:
# for callback in self.callbacks:
# callback.on_llm_new_token(token_text)
# return generated_text
# else:
# result = self.model(
# prompt=prompt,
# max_tokens=self.max_new_tokens,
# stream=False
# )
# return result["choices"][0]["text"]
def generate_text(self, messages: list) -> str:
prompt = self.build_prompt(messages)
stop_tokens = ["<|im_end|>", "\n"] # Qwen's stop sequences
if self.use_server:
payload = {
"prompt": prompt,
"max_tokens": self.max_new_tokens,
"temperature": self.temperature,
"stream": self.streaming,
"stop": stop_tokens # Add stop tokens to server request
}
if self.streaming:
response = requests.post(f"{self.server_url}/generate", json=payload, stream=True)
generated_text = ""
for line in response.iter_lines():
if line:
token = line.decode("utf-8")
# Check for stop tokens in stream
if any(stop in token for stop in stop_tokens):
break
generated_text += token
if self.callbacks:
for callback in self.callbacks:
callback.on_llm_new_token(token)
return generated_text
else:
response = requests.post(f"{self.server_url}/generate", json=payload)
return response.json().get("generated_text", "")
else:
# Local llama.cpp inference
if self.streaming:
stream = self.model.create_completion(
prompt=prompt,
max_tokens=self.max_new_tokens,
temperature=self.temperature,
stream=True,
stop=stop_tokens
)
generated_text = ""
for token_chunk in stream:
token_text = token_chunk["choices"][0]["text"]
# Stop early if we detect end token
if any(stop in token_text for stop in stop_tokens):
break
generated_text += token_text
if self.callbacks:
for callback in self.callbacks:
callback.on_llm_new_token(token_text)
return generated_text
else:
result = self.model.create_completion(
prompt=prompt,
max_tokens=self.max_new_tokens,
temperature=self.temperature,
stop=stop_tokens
)
return result["choices"][0]["text"]
def invoke(self, messages: list, config: dict = None) -> AIMessage:
config = config or {}
callbacks = config.get("callbacks", self.callbacks)
original_callbacks = self.callbacks
self.callbacks = callbacks
output_text = self.generate_text(messages)
self.callbacks = original_callbacks
# In streaming mode we return an empty content as tokens are being sent via callbacks.
if self.streaming:
return AIMessage(content="")
else:
return AIMessage(content=output_text)
def __call__(self, messages: list) -> AIMessage:
return self.invoke(messages)
# ------------------------ Callback for WebSocket Streaming ------------------------
class WebSocketStreamingCallbackHandler(BaseCallbackHandler):
def __init__(self, connection_id: str, loop):
self.connection_id = connection_id
self.loop = loop
def on_llm_new_token(self, token: str, **kwargs):
asyncio.run_coroutine_threadsafe(
manager_socket.send_message(self.connection_id, token),
self.loop
)
# ------------------------ Instantiate the LLM ------------------------
# Choose one mode: local (set use_server=False) or server (set use_server=True).
model_path="qwen2.5-1.5b-instruct-q4_k_m.gguf"
llm = ChatQwen(
temperature=0.3,
streaming=True,
max_new_tokens=512,
use_server=False,
model_path=model_path,
# server_url="http://localhost:8000" # Uncomment and set if using server mode.
)
# ------------------------ FAISS and Sentence Transformer Setup ------------------------
index = faiss.read_index("./faiss.index")
with open("./metadata.pkl", "rb") as f:
docs = pickle.load(f)
st_model = SentenceTransformer('all-MiniLM-L6-v2')
def make_system_prompt(suffix: str) -> str:
return (
"You are EstateGuru, a real estate expert developed by Abhishek Pathak at SwavishTek. "
"Your role is to help customers buy properties using only the provided data—do not invent any details. "
"The default currency is AED; if a query mentions another currency, convert the amount to AED "
"(for example, convert $10k to 36726.50 AED and $1 to 3.67 AED). "
"If a customer is interested in a property or needs to contact an agent, instruct them to call +91 8766268285. "
"Keep your answers short, clear, and concise."
f"\n{suffix}"
)
general_query_prompt = make_system_prompt(
"You are EstateGuru, a helpful real estate assistant. "
"Please respond only in English. "
"Convert any prices to USD before answering. "
"Provide a brief, direct answer without extra details."
)
# ------------------------ Tool Definitions ------------------------
@tool
def extract_filters(query: str) -> dict:
"""Extract filters from the query."""
llm_local = ChatQwen(temperature=0.3, streaming=False, use_server=False, model_path=model_path)
system = (
"You are an expert in extracting filters from property-related queries. Your task is to extract and return only the keys explicitly mentioned in the query as a valid JSON object (starting with '{' and ending with '}'). Include only those keys that are directly present in the query.\n\n"
"The possible keys are:\n"
" - 'projectName': The name of the project.\n"
" - 'developerName': The developer's name.\n"
" - 'relationshipManager': The relationship manager.\n"
" - 'propertyAddress': The property address.\n"
" - 'surroundingArea': The area or nearby landmarks.\n"
" - 'propertyType': The type or configuration of the property.\n"
" - 'amenities': Any amenities mentioned.\n"
" - 'coveredParking': Parking availability.\n"
" - 'petRules': Pet policies.\n"
" - 'security': Security details.\n"
" - 'occupancyRate': Occupancy information.\n"
" - 'constructionImpact': Construction or its impact.\n"
" - 'propertySize': Size of the property.\n"
" - 'propertyView': View details.\n"
" - 'propertyCondition': Condition of the property.\n"
" - 'serviceCharges': Service or maintenance charges.\n"
" - 'ownershipType': Ownership type.\n"
" - 'totalCosts': A cost threshold or cost amount.\n"
" - 'paymentPlans': Payment or financing plans.\n"
" - 'expectedRentalYield': Expected rental yield.\n"
" - 'rentalHistory': Rental history.\n"
" - 'shortTermRentals': Short-term rental information.\n"
" - 'resalePotential': Resale potential.\n"
" - 'uniqueId': A unique identifier.\n\n"
"Important instructions regarding cost thresholds:\n"
" - If the query contains phrases like 'under 10k', 'below 2m', or 'less than 5k', interpret these as cost thresholds.\n"
" - Convert any shorthand cost values to pure numbers (for example, '10k' becomes 10000, '2m' becomes 2000000) and assign them to the key 'totalCosts'.\n"
" - Do not use 'propertySize' for cost thresholds.\n\n"
" - Default currency is AED, if user query have different currency symbol then convert to equivalent AED amount (eg. $10k becomes 36726.50, $1 becomes 3.67).\n\n"
"Example:\n"
" For the query: \"properties near dubai mall under 43k\"\n"
" The expected output should be:\n"
" { \"surroundingArea\": \"dubai mall\", \"totalCosts\": 43000 }\n\n"
"Return ONLY a valid JSON object with the extracted keys and their corresponding values, with no additional text."
)
human_str = f"Here is the query:\n{query}"
filter_prompt = [
{"role": "system", "content": system},
{"role": "user", "content": human_str},
]
response = llm_local.invoke(messages=filter_prompt)
response_text = response.content if isinstance(response, AIMessage) else str(response)
try:
model_filters = extract_json_from_response(response_text)
except Exception as e:
print(f"JSON parsing error: {e}")
model_filters = {}
rule_filters = rule_based_extract(query)
print("Rule-based extraction:", rule_filters)
final_filters = {**model_filters, **rule_filters}
print("Final extraction:", final_filters)
return {"filters": final_filters}
@tool
def determine_route(query: str) -> dict:
"""Determine the route (search, suggest, detail, general, out_of_domain) for the query."""
real_estate_keywords = estateKeywords
pattern = re.compile("|".join(re.escape(keyword) for keyword in real_estate_keywords), re.IGNORECASE)
positive_signal = bool(pattern.search(query))
llm_local = ChatQwen(temperature=0.3, streaming=False, use_server=False, model_path=model_path)
transform_suggest_to_list = query.lower().replace("suggest ", "list ", -1)
system = """
Classify the user query as:
- **"search"**: if it requests property listings with specific filters (e.g., location, price, property type like "2bhk", service charges, pet policies, etc.).
- **"suggest"**: if it asks for property suggestions without filters.
- **"detail"**: if it is asking for more information about a previously provided property (for example, "tell me more about property 5" or "I want more information regarding 4BHK").
- **"general"**: for all other real estate-related questions.
- **"out_of_domain"**: if the query is not related to real estate (for example, tourist attractions, restaurants, etc.).
Keep in mind that queries mentioning terms like "service charge", "allow pets", "pet rules", etc., are considered real estate queries.
Return only the keyword: search, suggest, detail, general, or out_of_domain.
"""
human_str = f"Here is the query:\n{transform_suggest_to_list}"
router_prompt = [
{"role": "system", "content": system},
{"role": "user", "content": human_str},
]
response = llm_local.invoke(messages=router_prompt)
response_text = response.content if isinstance(response, AIMessage) else str(response)
route_value = str(response_text).strip().lower()
# Fallback override if query appears detailed.
detail_phrases = [
"more information", "tell me more", "more details", "give me more details",
"i need more details", "can you provide more details", "additional details",
"further information", "expand on that", "explain further", "elaborate more",
"more specifics", "i want to know more", "could you elaborate", "need more info",
"provide more details", "detail it further", "in-depth information", "break it down further",
"further explanation", "property 1", "property1", "first property", "about the 2nd", "regarding number 3"
]
if any(phrase in query.lower() for phrase in detail_phrases):
route_value = "detail"
if route_value not in {"search", "suggest", "detail", "general", "out_of_domain"}:
route_value = "general"
if route_value == "out_of_domain" and positive_signal:
route_value = "general"
if route_value == "out_of_domain":
route_value = "general" if positive_signal else "out_of_domain"
return {"route": route_value}
# ------------------------ Workflow Setup ------------------------
workflow = StateGraph(state_schema=dict)
def route_query(state: dict) -> dict:
new_state = state.copy()
try:
new_state["route"] = determine_route.invoke(new_state.get("query", "")).get("route", "general")
print(new_state["route"])
except Exception as e:
print(f"Routing error: {e}")
new_state["route"] = "general"
return new_state
def hybrid_extract(state: dict) -> dict:
new_state = state.copy()
new_state["filters"] = extract_filters.invoke(new_state.get("query", "")).get("filters", {})
return new_state
# def search_faiss(state: dict) -> dict:
# new_state = state.copy()
# query_embedding = st_model.encode([state["query"]])
# _, indices = index.search(query_embedding.astype(np.float32), 5)
# new_state["faiss_results"] = [docs[idx] for idx in indices[0] if idx < len(docs)]
# return new_state
def apply_filters(state: dict) -> dict:
new_state = state.copy()
new_state["final_results"] = apply_filters_partial(state["faiss_results"], state.get("filters", {}))
return new_state
# def suggest_properties(state: dict) -> dict:
# new_state = state.copy()
# new_state["suggestions"] = random.sample(docs, 5)
# return new_state
def handle_out_of_domain(state: dict) -> dict:
new_state = state.copy()
new_state["response"] = "I only handle real estate inquiries. Please ask a question related to properties."
return new_state
def search_faiss(state: dict) -> dict:
new_state = state.copy()
# Keep existing properties unless explicitly changed
new_state.setdefault("current_properties", state.get("current_properties", []))
query_embedding = st_model.encode([state["query"]])
_, indices = index.search(query_embedding.astype(np.float32), 5)
new_state["faiss_results"] = [docs[idx] for idx in indices[0] if idx < len(docs)]
return new_state
def suggest_properties(state: dict) -> dict:
new_state = state.copy()
new_state["suggestions"] = random.sample(docs, 5)
new_state["current_properties"] = new_state["suggestions"] # Explicitly set
return new_state
# def generate_response(state: dict) -> dict:
# new_state = state.copy()
# messages = []
# # Add the general query prompt.
# messages.append({"role": "system", "content": general_query_prompt})
# # For detail queries, add extra instructions.
# if new_state.get("route", "general") == "detail":
# messages.append({
# "role": "system",
# "content": (
# "This is a detail query. Please provide detailed information about the property below. "
# "Do not generate a new list of properties; only use the provided property details to answer the query. "
# "Focus on answering the specific question (for example, whether pets are allowed)."
# )
# })
# if new_state.get("current_properties"):
# property_context = format_property_data(new_state["current_properties"])
# messages.append({"role": "system", "content": "Available Property:\n" + property_context})
# messages.append({"role": "system", "content": "When responding, use only the provided property details."})
# for msg in state.get("messages", []):
# messages.append({"role": msg["role"], "content": msg["content"]})
# connection_id = state.get("connection_id")
# loop = state.get("loop")
# if connection_id and loop:
# print("Using WebSocket streaming")
# callback_manager = [WebSocketStreamingCallbackHandler(connection_id, loop)]
# _ = llm.invoke(messages, config={"callbacks": callback_manager})
# new_state["response"] = ""
# else:
# callback_manager = [StreamingStdOutCallbackHandler()]
# response = llm.invoke(messages, config={"callbacks": callback_manager})
# new_state["response"] = response.content if isinstance(response, AIMessage) else str(response)
# return new_state
# def format_final_response(state: dict) -> dict:
# new_state = state.copy()
# if not state.get("route", "general") == "detail":
# if state.get("route") in ["search", "suggest"]:
# if "final_results" in state:
# new_state["current_properties"] = state["final_results"]
# elif "suggestions" in state:
# new_state["current_properties"] = state["suggestions"]
# if new_state.get("current_properties"):
# formatted = []
# for idx, prop in enumerate(new_state["current_properties"], 1):
# cost = prop.get("totalCosts", "N/A")
# cost_str = f"{cost:,}" if isinstance(cost, (int, float)) else cost
# formatted.append(
# f"{idx}. Type: {prop['propertyType']}, Cost: AED {cost_str}, "
# f"Size: {prop.get('propertySize', 'N/A')}, Amenities: {', '.join(map(str, prop.get('amenities', []))) if prop.get('amenities') else 'N/A'}, "
# f"Rental Yield: {prop.get('expectedRentalYield', 'N/A')}, "
# f"Ownership: {prop.get('ownershipType', 'N/A')}\n"
# )
# aggregated_response = "Here are the property details:\n" + "\n".join(formatted)
# connection_id = state.get("connection_id")
# loop = state.get("loop")
# if connection_id and loop:
# tokens = aggregated_response.split(" ")
# for token in tokens:
# asyncio.run_coroutine_threadsafe(
# manager_socket.send_message(connection_id, token + " "),
# loop
# )
# time.sleep(0.05)
# new_state["response"] = ""
# else:
# new_state["response"] = aggregated_response
# elif "response" in new_state:
# new_state["response"] = str(new_state["response"])
# return new_state
def generate_response(state: dict) -> dict:
new_state = state.copy()
messages = []
# Add the general query prompt.
messages.append({"role": "system", "content": general_query_prompt})
# For detail queries, add extra instructions.
if new_state.get("route", "general") == "detail":
messages.append({
"role": "system",
"content": (
"The user is asking about a specific property from the numbered list below. "
"Properties are listed as 1, 2, 3, etc. Use ONLY the corresponding property details. "
"Example: If they ask 'property 2', use the second entry in the list. Never invent data."
)
})
if new_state.get("current_properties"):
# Format properties with indices starting at 1
property_context = format_property_data_with_indices(new_state["current_properties"])
messages.append({"role": "system", "content": "Available Properties:\n" + property_context})
messages.append({"role": "system", "content": "When responding, use only the provided property details."})
# Add conversation history
for msg in state.get("messages", []):
messages.append({"role": msg["role"], "content": msg["content"]})
connection_id = state.get("connection_id")
loop = state.get("loop")
if connection_id and loop:
print("Yes")
callback_manager = [WebSocketStreamingCallbackHandler(connection_id, loop)]
_ = llm.invoke(
messages,
config={"callbacks": callback_manager}
)
new_state["response"] = ""
else:
callback_manager = [StreamingStdOutCallbackHandler()]
response = llm.invoke(
messages,
config={"callbacks": callback_manager}
)
new_state["response"] = response.content if isinstance(response, AIMessage) else str(response)
return new_state
def format_property_data_with_indices(properties: list) -> str:
formatted = []
for idx, prop in enumerate(properties, 1):
cost = prop.get("totalCosts", "N/A")
cost_str = f"{cost:,}" if isinstance(cost, (int, float)) else cost
formatted.append(
f"{idx}. Type: {prop['propertyType']}, Cost: AED {cost_str}, "
f"Size: {prop.get('propertySize', 'N/A')}, Amenities: {', '.join(prop.get('amenities', []))}, "
f"Rental Yield: {prop.get('expectedRentalYield', 'N/A')}, "
f"Ownership: {prop.get('ownershipType', 'N/A')}"
)
return "\n".join(formatted)
def format_final_response(state: dict) -> dict:
new_state = state.copy()
if "current_properties" in new_state:
new_state["current_properties"] = state["current_properties"]
if not state.get("route", "general") == "detail":
if state.get("route") in ["search", "suggest"]:
if "final_results" in state:
new_state["current_properties"] = state["final_results"]
elif "suggestions" in state:
new_state["current_properties"] = state["suggestions"]
# Ensure properties are stored even if not in search/suggest routes
if "current_properties" not in new_state and "response" in new_state:
# Fallback to retain properties if needed
pass
# Existing formatting code remains but use the same indexed formatting
if new_state.get("current_properties"):
formatted = []
for idx, prop in enumerate(new_state["current_properties"], 1):
cost = prop.get("totalCosts", "N/A")
cost_str = f"{cost:,}" if isinstance(cost, (int, float)) else cost
formatted.append(
f"{idx}. Type: {prop['propertyType']}, Cost: AED {cost_str}, "
f"Size: {prop.get('propertySize', 'N/A')}, Amenities: {', '.join(map(str, prop.get('amenities', []))) if prop.get('amenities') else 'N/A'}, "
f"Rental Yield: {prop.get('expectedRentalYield', 'N/A')}, "
f"Ownership: {prop.get('ownershipType', 'N/A')}\n"
)
aggregated_response = "Here are the property details:\n" + "\n".join(formatted)
connection_id = state.get("connection_id")
loop = state.get("loop")
if connection_id and loop:
import time
tokens = aggregated_response.split(" ")
for token in tokens:
asyncio.run_coroutine_threadsafe(
manager_socket.send_message(connection_id, token + " "),
loop
)
time.sleep(0.05)
new_state["response"] = ""
else:
new_state["response"] = aggregated_response
elif "response" in new_state:
new_state["response"] = str(new_state["response"])
return new_state
nodes = [
("route_query", route_query),
("hybrid_extract", hybrid_extract),
("faiss_search", search_faiss),
("apply_filters", apply_filters),
("suggest_properties", suggest_properties),
("handle_out_of_domain", handle_out_of_domain),
("generate_response", generate_response),
("format_response", format_final_response)
]
for name, node in nodes:
workflow.add_node(name, node)
workflow.add_edge(START, "route_query")
workflow.add_conditional_edges(
"route_query",
lambda state: state.get("route", "general"),
{
"search": "hybrid_extract",
"suggest": "suggest_properties",
"detail": "generate_response",
"general": "generate_response",
"out_of_domain": "handle_out_of_domain"
}
)
workflow.add_edge("hybrid_extract", "faiss_search")
workflow.add_edge("faiss_search", "apply_filters")
workflow.add_edge("apply_filters", "format_response")
workflow.add_edge("suggest_properties", "format_response")
workflow.add_edge("generate_response", "format_response")
workflow.add_edge("handle_out_of_domain", "format_response")
workflow.add_edge("format_response", END)
workflow_app = workflow.compile()
# ------------------------ Conversation Manager ------------------------
class ConversationManager:
def __init__(self):
self.conversation_history = []
self.current_properties = []
def _add_message(self, role: str, content: str):
self.conversation_history.append({
"role": role,
"content": content,
"timestamp": datetime.now().isoformat()
})
def process_query(self, query: str) -> str:
if query.strip().lower() in {"hi", "hello", "hey"}:
self.conversation_history = []
self.current_properties = []
greeting_response = "Hello! How can I assist you today with your real estate inquiries?"
self._add_message("assistant", greeting_response)
return greeting_response
try:
self._add_message("user", query)
initial_state = {
"messages": self.conversation_history.copy(),
"query": query,
"route": "general",
"filters": {},
"current_properties": self.current_properties
}
for event in workflow_app.stream(initial_state, stream_mode="values"):
final_state = event
if 'final_results' in final_state:
self.current_properties = final_state['final_results']
elif 'suggestions' in final_state:
self.current_properties = final_state['suggestions']
if final_state.get("route") == "general":
response_text = final_state.get("response", "")
self._add_message("assistant", response_text)
return response_text
else:
response = final_state.get("response", "I couldn't process that request.")
self._add_message("assistant", response)
return response
except Exception as e:
print(f"Processing error: {e}")
return "Sorry, I encountered an error processing your request."
conversation_managers = {}
# ------------------------ FastAPI Backend with WebSockets ------------------------
app = FastAPI()
class ConnectionManager:
def __init__(self):
self.active_connections = {}
async def connect(self, websocket: WebSocket):
await websocket.accept()
connection_id = str(uuid.uuid4())
self.active_connections[connection_id] = websocket
print(f"New connection: {connection_id}")
return connection_id
def disconnect(self, connection_id: str):
if connection_id in self.active_connections:
del self.active_connections[connection_id]
print(f"Disconnected: {connection_id}")
async def send_message(self, connection_id: str, message: str):
websocket = self.active_connections.get(connection_id)
if websocket:
await websocket.send_text(message)
manager_socket = ConnectionManager()
def stream_query(query: str, connection_id: str, loop):
conv_manager = conversation_managers.get(connection_id)
if conv_manager is None:
print(f"No conversation manager found for connection {connection_id}")
return
if query.strip().lower() in {"hi", "hello", "hey"}:
conv_manager.conversation_history = []
conv_manager.current_properties = []
greeting_response = "Hello! How can I assist you today with your real estate inquiries?"
conv_manager._add_message("assistant", greeting_response)
asyncio.run_coroutine_threadsafe(
manager_socket.send_message(connection_id, greeting_response),
loop
)
return
conv_manager._add_message("user", query)
initial_state = {
"messages": conv_manager.conversation_history.copy(),
"query": query,
"route": "general",
"filters": {},
"current_properties": conv_manager.current_properties,
"connection_id": connection_id,
"loop": loop
}
try:
workflow_app.invoke(initial_state)
except Exception as e:
error_msg = f"Error processing query: {str(e)}"
asyncio.run_coroutine_threadsafe(
manager_socket.send_message(connection_id, error_msg),
loop
)
@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
connection_id = await manager_socket.connect(websocket)
conversation_managers[connection_id] = ConversationManager()
try:
while True:
query = await websocket.receive_text()
loop = asyncio.get_event_loop()
threading.Thread(
target=stream_query,
args=(query, connection_id, loop),
daemon=True
).start()
except WebSocketDisconnect:
conv_manager = conversation_managers.get(connection_id)
if conv_manager:
filename = f"conversations/conversation_{connection_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
with open(filename, "w") as f:
json.dump(conv_manager.conversation_history, f, indent=4)
del conversation_managers[connection_id]
manager_socket.disconnect(connection_id)
@app.post("/query")
async def post_query(query: str):
conv_manager = ConversationManager()
response = conv_manager.process_query(query)
return {"response": response}