File size: 15,599 Bytes
ebd1bb7
34d17cd
 
 
 
ab7e592
34d17cd
 
 
 
 
 
 
0d47f17
34d17cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4c39c75
9c7c267
accbd8f
 
f72d8ff
54f6686
943da33
 
34d17cd
 
b129b7d
accbd8f
78c0ec0
34d17cd
 
 
14b02fd
 
522caaa
f648e72
 
 
 
 
c48ef60
ac2e91b
 
 
f648e72
522caaa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c48ef60
 
f648e72
c48ef60
 
 
 
 
 
f648e72
c48ef60
 
 
 
f648e72
ad4f466
 
da1e0d4
 
 
 
 
ad4f466
 
ac2e91b
ad4f466
 
 
 
 
 
 
 
7f7f33b
ad4f466
7f7f33b
ad4f466
 
f648e72
 
 
 
 
 
 
6e0b574
 
 
 
 
 
 
7f7f33b
6e0b574
 
 
7f7f33b
6e0b574
f648e72
241e962
f648e72
6e0b574
 
 
 
 
 
 
c48ef60
6e0b574
7f7f33b
da1e0d4
6e0b574
 
c48ef60
ac2e91b
6e0b574
a73d493
 
6e0b574
 
 
c48ef60
 
6e0b574
 
7f7f33b
6e0b574
 
2a25020
f648e72
 
c48ef60
f648e72
8c472a8
34d17cd
accbd8f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
from flask import Flask, render_template, request, redirect, url_for, send_from_directory
from flask_socketio import SocketIO
import threading
import os
from dotenv import load_dotenv
import sqlite3  

# LangChain and agent imports
from langchain_community.chat_models.huggingface import ChatHuggingFace  # if needed later
from langchain.agents import Tool
from langchain.agents.format_scratchpad import format_log_to_str
from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser
from langchain_core.callbacks import CallbackManager, BaseCallbackHandler
from langchain_community.agent_toolkits.load_tools import load_tools 
from langchain_core.tools import tool
from langchain_community.agent_toolkits import PowerBIToolkit
from langchain.chains import LLMMathChain
from langchain import hub
from langchain_community.tools import DuckDuckGoSearchRun

# Agent requirements and type hints
from typing import Annotated, Literal, Sequence, TypedDict, Any
from langchain_core.messages import AIMessage, ToolMessage
from pydantic import BaseModel, Field
from typing_extensions import TypedDict
from langgraph.graph import END, StateGraph, START
from langgraph.graph.message import AnyMessage, add_messages
from langchain_core.runnables import RunnableLambda, RunnableWithFallbacks
from langgraph.prebuilt import ToolNode

# Load environment variables
load_dotenv()

UPLOAD_FOLDER = "uploads/"

# In your .env file, ensure you have:
# DATABASE_URI=sqlite:///employee.db
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DATABASE_URI = f"sqlite:///{os.path.join(BASE_DIR, 'data', 'mydb.db')}"
print("DATABASE URI:", DATABASE_URI)


GROQ_API_KEY = os.getenv("GROQ_API_KEY")
MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY")
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
os.environ["MISTRAL_API_KEY"] = MISTRAL_API_KEY

# Use ChatGroq LLM (which does not require a Hugging Face API token)
from langchain_groq import ChatGroq
from langchain_mistralai.chat_models import ChatMistralAI

###############################################################################
# Application Factory: create_app()
#
# This function sets up the Flask application, SocketIO, routes, and initializes
# the global agent_app using the default DATABASE_URI. It returns the Flask app.
###############################################################################
# --- Application Factory ---
abs_file_path = None
agent_app= None

def create_app():

    def create_agent_app(db_path: str):
        # Construct the SQLite URI from the given file path.
        # Ensure the db_path is absolute so that SQLAlchemy can locate the file.
        #llm = ChatMistralAI(model="mistral-large-latest")
        llm = ChatGroq(model="llama3-70b-8192")
        
        @tool
        def db_query_tool(query: str) -> str:
            """
            Execute a SQL query against the database and return the result.
            If the query is invalid or returns no result, an error message will be returned.
            In case of an error, the user is advised to rewrite the query and try again.
            """
            result = db_instance.run_no_throw(query)
            if not result:
                return "Error: Query failed. Please rewrite your query and try again."
            return result
        
        # Define a Pydantic model for submitting the final answer
        class SubmitFinalAnswer(BaseModel):
            """Submit the final answer to the user based on the query results."""
            final_answer: str = Field(..., description="The final answer to the user")
        
        # Define the state type
        class State(TypedDict):
            messages: Annotated[list[AnyMessage], add_messages]
        
        # Define prompt templates for query checking and query generation
        from langchain_core.prompts import ChatPromptTemplate
        
        query_check_system = """You are a SQL expert with a strong attention to detail.
        Double check the SQLite query for common mistakes, including:
        - Using NOT IN with NULL values
        - Using UNION when UNION ALL should have been used
        - Using BETWEEN for exclusive ranges
        - Data type mismatch in predicates
        - Properly quoting identifiers
        - Using the correct number of arguments for functions
        - Casting to the correct data type
        - Using the proper columns for joins
        
        If there are any of the above mistakes, rewrite the query. If there are no mistakes, just reproduce the original query.
        
        You will call the appropriate tool to execute the query after running this check."""
        query_check_prompt = ChatPromptTemplate.from_messages([("system", query_check_system), ("placeholder", "{messages}")])
        query_check = query_check_prompt | llm.bind_tools([db_query_tool])
        
        query_gen_system = """You are a SQL expert with a strong attention to detail.
        
        Given an input question, output a syntactically correct SQLite query to run, then look at the results of the query and return the answer.
        
        DO NOT call any tool besides SubmitFinalAnswer to submit the final answer.
        
        When generating the query:
        
        Output the SQL query that answers the input question without a tool call.
        
        Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most 5 results.
        You can order the results by a relevant column to return the most interesting examples in the database.
        Never query for all the columns from a specific table, only ask for the relevant columns given the question.
        
        If you get an error while executing a query, rewrite the query and try again.
        
        If you get an empty result set, you should try to rewrite the query to get a non-empty result set.
        NEVER make stuff up if you don't have enough information to answer the query... just say you don't have enough information.
        
        If you have enough information to answer the input question, simply invoke the appropriate tool to submit the final answer to the user.
        
        DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database. Do not return any sql query except answer."""
        query_gen_prompt = ChatPromptTemplate.from_messages([("system", query_gen_system), ("placeholder", "{messages}")])
        query_gen = query_gen_prompt | llm.bind_tools([SubmitFinalAnswer])
        
        abs_db_path = os.path.abspath(db_path)
        global DATABASE_URI
        DATABASE_URI = abs_db_path    
        db_uri = f"sqlite:///{abs_db_path}"
        print("db_uri",db_uri)
        
        # Create new SQLDatabase connection using the constructed URI.
        from langchain_community.utilities import SQLDatabase
        db_instance = SQLDatabase.from_uri(db_uri)
        print("db_instance----->",db_instance)
        print("db_uri----->",db_uri)
    
    
        # Create SQL toolkit and get the tools.
        from langchain_community.agent_toolkits import SQLDatabaseToolkit
        toolkit_instance = SQLDatabaseToolkit(db=db_instance, llm=llm)
        tools_instance = toolkit_instance.get_tools()
    
        # Define workflow nodes and fallback functions
        def first_tool_call(state: State) -> dict[str, list[AIMessage]]:
            return {"messages": [AIMessage(content="", tool_calls=[{"name": "sql_db_list_tables", "args": {}, "id": "tool_abcd123"}])]}
    
        def handle_tool_error(state: State) -> dict:
            error = state.get("error")
            tool_calls = state["messages"][-1].tool_calls
            return {
                "messages": [
                    ToolMessage(content=f"Error: {repr(error)}\n please fix your mistakes.", tool_call_id=tc["id"])
                    for tc in tool_calls
                ]
            }
    
        def create_tool_node_with_fallback(tools_list: list) -> RunnableWithFallbacks[Any, dict]:
            return ToolNode(tools_list).with_fallbacks([RunnableLambda(handle_tool_error)], exception_key="error")
    
        def query_gen_node(state: State):
            message = query_gen.invoke(state)
            # Check for incorrect tool calls
            tool_messages = []
            if message.tool_calls:
                for tc in message.tool_calls:
                    if tc["name"] != "SubmitFinalAnswer":
                        tool_messages.append(
                            ToolMessage(
                                content=f"Error: The wrong tool was called: {tc['name']}. Please fix your mistakes. Remember to only call SubmitFinalAnswer to submit the final answer. Generated queries should be outputted WITHOUT a tool call.",
                                tool_call_id=tc["id"],
                            )
                        )
            return {"messages": [message] + tool_messages}
    
        def should_continue(state: State) -> Literal[END, "correct_query", "query_gen"]:
            messages = state["messages"]
            last_message = messages[-1]
            if getattr(last_message, "tool_calls", None):
                return END
            if last_message.content.startswith("Error:"):
                return "query_gen"
            else:
                return "correct_query"
    
        def model_check_query(state: State) -> dict[str, list[AIMessage]]:
            """Double-check if the query is correct before executing it."""
            return {"messages": [query_check.invoke({"messages": [state["messages"][-1]]})]}
    
        # Get tools for listing tables and fetching schema
        list_tables_tool = next((tool for tool in tools_instance if tool.name == "sql_db_list_tables"), None)
        get_schema_tool = next((tool for tool in tools_instance if tool.name == "sql_db_schema"), None)
    
        # Define the workflow (state graph)
        workflow = StateGraph(State)
        workflow.add_node("first_tool_call", first_tool_call)
        workflow.add_node("list_tables_tool", create_tool_node_with_fallback([list_tables_tool]))
        workflow.add_node("get_schema_tool", create_tool_node_with_fallback([get_schema_tool]))
        model_get_schema = llm.bind_tools([get_schema_tool])
        workflow.add_node("model_get_schema", lambda state: {"messages": [model_get_schema.invoke(state["messages"])],})
        workflow.add_node("query_gen", query_gen_node)
        workflow.add_node("correct_query", model_check_query)
        workflow.add_node("execute_query", create_tool_node_with_fallback([db_query_tool]))
    
        workflow.add_edge(START, "first_tool_call")
        workflow.add_edge("first_tool_call", "list_tables_tool")
        workflow.add_edge("list_tables_tool", "model_get_schema")
        workflow.add_edge("model_get_schema", "get_schema_tool")
        workflow.add_edge("get_schema_tool", "query_gen")
        workflow.add_conditional_edges("query_gen", should_continue)
        workflow.add_edge("correct_query", "execute_query")
        workflow.add_edge("execute_query", "query_gen")
    
        # Compile and return the agent application workflow.
        return workflow.compile()
    
    # Option: configure static files from uploads folder as well.
    flask_app = Flask(__name__, static_url_path='/uploads', static_folder='uploads')
    socketio = SocketIO(flask_app, cors_allowed_origins="*")
    
    # Set up uploads folder
    UPLOAD_FOLDER_LOCAL = os.path.join(os.getcwd(), "uploads")
    if not os.path.exists(UPLOAD_FOLDER_LOCAL):
        os.makedirs(UPLOAD_FOLDER_LOCAL)
    flask_app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER_LOCAL

    # Static route: option if you want a custom route to serve files:
    @flask_app.route("/files/<path:filename>")
    def uploaded_file(filename):
        return send_from_directory(flask_app.config['UPLOAD_FOLDER'], filename)

    # Helper function to run the agent; uses the global agent_app.
    def run_agent(prompt, socketio):
        global agent_app
        if agent_app is None:
            socketio.emit("log", {"message": "[ERROR]: No database has been uploaded. Please upload a database file first."})
            socketio.emit("final", {"message": "No database available. Upload a database and try again."})
            return
        try:
            query = {"messages": [("user", prompt)]}
            agent_app = create_agent_app(abs_file_path)
            result = agent_app.invoke(query)
            try:
                result = result["messages"][-1].tool_calls[0]["args"]["final_answer"]
            except Exception:
                result = "Query failed or no valid answer found."
    
            print("final_answer------>", result)
            socketio.emit("final", {"message": f"{result}"})
            
        except Exception as e:
            print(f"[ERROR]: {str(e)}")
            socketio.emit("log", {"message": f"[ERROR]: {str(e)}"})
            socketio.emit("final", {"message": "Generation failed."})

    @flask_app.route("/")
    def index():
        return render_template("index.html")

    @flask_app.route("/generate", methods=["POST"])
    def generate():
        try:
            socketio.emit("log", {"message": "[STEP]: Entering query_gen..."})
            data = request.json
            prompt = data.get("prompt", "")
            socketio.emit("log", {"message": f"[INFO]: Received prompt: {prompt}\n"})
            # Run the agent in a separate thread
            thread = threading.Thread(target=run_agent, args=(prompt, socketio))
            socketio.emit("log", {"message": f"[INFO]: thread info: {thread}\n"})
            thread.start()
            return "OK", 200
        except Exception as e:
            print(f"[ERROR]: {str(e)}")
            socketio.emit("log", {"message": f"[ERROR]: {str(e)}"})

    @flask_app.route("/upload", methods=["POST", "GET"])
    def upload():
        try:
            if request.method == 'POST':
                file = request.files.get('file')
                if not file:
                    print("No file uploaded")
                    return "No file uploaded", 400
                if file and file.filename.endswith('.db'):
                    # Save file using flask_app.config
                    db_path = os.path.join(flask_app.config['UPLOAD_FOLDER'], 'uploaded.db')
                    socketio.emit("log", {"message": f"[INFO]: Saving file to: {db_path}\n"})
                    print("Saving file to:", db_path)
                    file.save(db_path)
                    
                    # Reinitialize the agent_app with the new database file
                    global abs_file_path
                    abs_file_path = os.path.abspath(db_path)
                    global agent_app
                    agent_app = create_agent_app(abs_file_path)
                    
                    print(f"[INFO_PRINT]: Database file '{file.filename}' uploaded and loaded.")
                    socketio.emit("log", {"message": f"[INFO]: Database file '{file.filename}' uploaded and loaded."})
                    return redirect(url_for("index"))
            # For GET, render upload form:
            return render_template("upload.html")
        except Exception as e:
            print(f"[ERROR]: {str(e)}")
            socketio.emit("log", {"message": f"[ERROR]: {str(e)}"})
            return render_template("upload.html")

    return flask_app, socketio

# Create the app for Gunicorn compatibility.
app, socketio_instance = create_app()

if __name__ == "__main__":
    socketio_instance.run(app, debug=True)