File size: 6,414 Bytes
c101c53
b99b67f
c101c53
 
 
 
 
 
 
 
b99b67f
 
c101c53
b99b67f
c101c53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b99b67f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
from typing import List
from typing import AnyStr
from haystack import component
import pandas as pd
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
import sqlite3
import psycopg2
from pymongo import MongoClient
import pymongoarrow.monkey
from utils import TEMP_DIR
import ast

@component
class SQLiteQuery:

    def __init__(self, sql_database: str):
      self.connection = sqlite3.connect(sql_database, check_same_thread=False)

    @component.output_types(results=List[str], queries=List[str])
    def run(self, queries: List[str], session_hash):
        print("ATTEMPTING TO RUN SQLITE QUERY")
        dir_path = TEMP_DIR / str(session_hash)
        results = []
        for query in queries:
          result = pd.read_sql(query, self.connection)
          result.to_csv(f'{dir_path}/file_upload/query.csv', index=False)
          results.append(f"{result}")
        self.connection.close()
        return {"results": results, "queries": queries}
    


def sqlite_query_func(queries: List[str], session_hash, **kwargs):
    dir_path = TEMP_DIR / str(session_hash)
    sql_query = SQLiteQuery(f'{dir_path}/file_upload/data_source.db')
    try:
      result = sql_query.run(queries, session_hash)
      if len(result["results"][0]) > 1000:
        print("QUERY TOO LARGE")
        return {"reply": "query result too large to be processed by llm, the query results are in our query.csv file. If you need to display the results directly, perhaps use the table_generation_func function."}
      else:   
        return {"reply": result["results"][0]}

    except Exception as e:
      reply = f"""There was an error running the SQL Query = {queries}

              The error is {e},

              You should probably try again.

              """
      return {"reply": reply}
    
@component
class PostgreSQLQuery:

    def __init__(self, url: str, sql_port: int, sql_user: str, sql_pass: str, sql_db_name: str):
      self.connection = psycopg2.connect(
            database=sql_db_name,
            user=sql_user,
            password=sql_pass,
            host=url,  # e.g., "localhost" or an IP address
            port=sql_port  # default is 5432
        )

    @component.output_types(results=List[str], queries=List[str])
    def run(self, queries: List[str], session_hash):
        print("ATTEMPTING TO RUN POSTGRESQL QUERY")
        dir_path = TEMP_DIR / str(session_hash)
        results = []
        for query in queries:
          print(query)
          result = pd.read_sql_query(query, self.connection)
          result.to_csv(f'{dir_path}/sql/query.csv', index=False)
          results.append(f"{result}")
        self.connection.close()
        return {"results": results, "queries": queries}
    


def sql_query_func(queries: List[str], session_hash, db_url, db_port, db_user, db_pass, db_name, **kwargs):
    sql_query = PostgreSQLQuery(db_url, db_port, db_user, db_pass, db_name)
    try:
      result = sql_query.run(queries, session_hash)
      print("RESULT")
      print(result)
      if len(result["results"][0]) > 1000:
        print("QUERY TOO LARGE")
        return {"reply": "query result too large to be processed by llm, the query results are in our query.csv file. If you need to display the results directly, perhaps use the table_generation_func function."}
      else:   
        return {"reply": result["results"][0]}

    except Exception as e:
      reply = f"""There was an error running the SQL Query = {queries}

              The error is {e},

              You should probably try again.

              """
      print(reply)
      return {"reply": reply}

@component
class DocDBQuery:

    def __init__(self, connection_string: str, doc_db_name: str):
      client = MongoClient(connection_string)

      self.client = client
      self.connection = client[doc_db_name]

    @component.output_types(results=List[str], queries=List[str])
    def run(self, aggregation_pipeline: List[str], db_collection,  session_hash):
        pymongoarrow.monkey.patch_all()
        print("ATTEMPTING TO RUN MONGODB QUERY")
        dir_path = TEMP_DIR / str(session_hash)
        results = []
        print(aggregation_pipeline)

        aggregation_pipeline = aggregation_pipeline.replace(" ", "")

        false_replace = [':false', ': false']
        false_value = ':False'   
        true_replace = [':true', ': true']
        true_value = ':True'

        for replace in false_replace:
            aggregation_pipeline = aggregation_pipeline.replace(replace, false_value)
        for replace in true_replace:
            aggregation_pipeline = aggregation_pipeline.replace(replace, true_value)

        query_list = ast.literal_eval(aggregation_pipeline)

        print("QUERY List")
        print(query_list)
        print(db_collection)
        
        db = self.connection
        collection = db[db_collection]

        print(collection)
        docs = collection.aggregate_pandas_all(query_list)
        print("DATA FRAME COMPLETE")
        docs.to_csv(f'{dir_path}/doc_db/query.csv', index=False)
        print("CSV COMPLETE")
        results.append(f"{docs}") 
        self.client.close()
        return {"results": results, "queries": aggregation_pipeline}
    


def doc_db_query_func(aggregation_pipeline: List[str], db_collection: AnyStr, session_hash, connection_string, doc_db_name, **kwargs):
    doc_db_query = DocDBQuery(connection_string, doc_db_name)
    try:
      result = doc_db_query.run(aggregation_pipeline, db_collection, session_hash)
      print("RESULT")
      if len(result["results"][0]) > 1000:
        print("QUERY TOO LARGE")
        return {"reply": "query result too large to be processed by llm, the query results are in our query.csv file. If you need to display the results directly, perhaps use the table_generation_func function."}
      else:   
        return {"reply": result["results"][0]}

    except Exception as e:
      reply = f"""There was an error running the NoSQL (Mongo) Query = {aggregation_pipeline}

              The error is {e},

              You should probably try again.

              """
      print(reply)
      return {"reply": reply}