Spaces:
Sleeping
Sleeping
File size: 8,545 Bytes
b474ae1 ec9d21a 06f01b3 b474ae1 d33fe62 dfe1769 1fa796c 5b4c268 ae610aa 94bf8f1 f146007 5b4c268 d33fe62 5a73339 92494e9 d33fe62 ae610aa f146007 ae610aa b474ae1 d33fe62 5b4c268 94bf8f1 ae610aa c490b83 d33fe62 88c83f6 dfe1769 a1792a1 94bf8f1 88c83f6 a1792a1 1fa796c dfe1769 94bf8f1 13f0f94 88c83f6 dfe1769 24b6a6d dfe1769 ae610aa dfe1769 88c83f6 d33fe62 94bf8f1 88c83f6 dfe1769 94bf8f1 d33fe62 94bf8f1 d33fe62 94bf8f1 8760634 c490b83 94bf8f1 c490b83 94bf8f1 dfe1769 ae610aa c490b83 06f01b3 94bf8f1 c490b83 b474ae1 c490b83 94bf8f1 c490b83 94bf8f1 8cb3a33 94bf8f1 00c05fa 94bf8f1 dfe1769 c490b83 94bf8f1 c490b83 94bf8f1 dfe1769 c490b83 94bf8f1 c490b83 94bf8f1 c490b83 94bf8f1 c490b83 94bf8f1 c490b83 dfe1769 c490b83 d9a0200 c490b83 94bf8f1 c490b83 5b4c268 00c05fa 94bf8f1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 |
import json
import openai
import gradio as gr
import duckdb
from functools import lru_cache
import pandas as pd
import plotly.express as px
import os
# =========================
# Configuration and Setup
# =========================
# Set OpenAI API key
openai.api_key = os.getenv("OPENAI_API_KEY")
# Load the Parquet dataset path
dataset_path = 'sample_contract_df.parquet' # Update with your Parquet file path
# Provided schema
schema = [
{"column_name": "department_ind_agency", "column_type": "VARCHAR"},
{"column_name": "cgac", "column_type": "BIGINT"},
{"column_name": "sub_tier", "column_type": "VARCHAR"},
{"column_name": "fpds_code", "column_type": "VARCHAR"},
{"column_name": "office", "column_type": "VARCHAR"},
{"column_name": "aac_code", "column_type": "VARCHAR"},
{"column_name": "posteddate", "column_type": "VARCHAR"},
{"column_name": "type", "column_type": "VARCHAR"},
{"column_name": "basetype", "column_type": "VARCHAR"},
{"column_name": "popstreetaddress", "column_type": "VARCHAR"},
{"column_name": "popcity", "column_type": "VARCHAR"},
{"column_name": "popstate", "column_type": "VARCHAR"},
{"column_name": "popzip", "column_type": "VARCHAR"},
{"column_name": "popcountry", "column_type": "VARCHAR"},
{"column_name": "active", "column_type": "VARCHAR"},
{"column_name": "awardnumber", "column_type": "VARCHAR"},
{"column_name": "awarddate", "column_type": "VARCHAR"},
{"column_name": "award", "column_type": "DOUBLE"},
{"column_name": "awardee", "column_type": "VARCHAR"},
{"column_name": "state", "column_type": "VARCHAR"},
{"column_name": "city", "column_type": "VARCHAR"},
{"column_name": "zipcode", "column_type": "VARCHAR"},
{"column_name": "countrycode", "column_type": "VARCHAR"}
]
@lru_cache(maxsize=1)
def get_schema():
return schema
COLUMN_TYPES = {col['column_name']: col['column_type'] for col in get_schema()}
# =========================
# Database Interaction
# =========================
def load_dataset_schema():
"""
Loads the dataset schema into DuckDB by creating a view.
"""
con = duckdb.connect()
try:
con.execute("DROP VIEW IF EXISTS contract_data")
con.execute(f"CREATE VIEW contract_data AS SELECT * FROM '{dataset_path}'")
return True
except Exception as e:
print(f"Error loading dataset schema: {e}")
return False
finally:
con.close()
# Load the dataset schema at startup
load_dataset_schema()
# =========================
# OpenAI API Integration
# =========================
def parse_query(nl_query):
"""
Converts a natural language query into a SQL query using OpenAI's API.
"""
messages = [
{"role": "system", "content": "You are an assistant that converts natural language queries into SQL queries for the 'contract_data' table."},
{"role": "user", "content": f"Schema:\n{json.dumps(schema, indent=2)}\n\nQuery:\n\"{nl_query}\"\n\nSQL:"}
]
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0,
max_tokens=150,
)
sql_query = response.choices[0].message.content.strip()
return sql_query
except Exception as e:
return f"Error generating SQL query: {e}"
# =========================
# Plotting Utilities
# =========================
def detect_plot_intent(nl_query):
"""
Detects if the user's query involves plotting.
"""
plot_keywords = ['plot', 'graph', 'chart', 'distribution', 'visualize', 'trend', 'histogram', 'bar', 'line', 'scatter', 'pie']
return any(keyword in nl_query.lower() for keyword in plot_keywords)
def generate_plot(nl_query, result_df):
"""
Generates a Plotly figure based on the result DataFrame and the user's intent.
"""
if not detect_plot_intent(nl_query):
return None, ""
columns = result_df.columns.tolist()
if len(columns) < 2:
return None, "Not enough data to generate a plot."
# Simple heuristic to choose plot type based on keywords
if 'bar' in nl_query.lower():
fig = px.bar(result_df, x=columns[0], y=columns[1], title='Bar Chart')
elif 'line' in nl_query.lower():
fig = px.line(result_df, x=columns[0], y=columns[1], title='Line Chart')
elif 'scatter' in nl_query.lower():
fig = px.scatter(result_df, x=columns[0], y=columns[1], title='Scatter Plot')
elif 'pie' in nl_query.lower():
fig = px.pie(result_df, names=columns[0], values=columns[1], title='Pie Chart')
else:
# Default to bar chart
fig = px.bar(result_df, x=columns[0], y=columns[1], title='Bar Chart')
fig.update_layout(title_x=0.5)
return fig, ""
# =========================
# Gradio Application UI
# =========================
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as demo:
gr.Markdown("""
<h1 style="text-align: center; font-size: 2.5em; color: #333333;">Parquet Data Explorer</h1>
<p style="text-align: center; color: #666666;">Query and visualize your data effortlessly.</p>
""", elem_id="main-title")
with gr.Row():
with gr.Column(scale=1):
query = gr.Textbox(
label="Your Query",
placeholder='e.g., "What are the total awards over 1M in California?"',
lines=1
)
# Hidden schema display that appears on focus
schema_display = gr.JSON(
label="Dataset Schema",
value=get_schema(),
interactive=False,
visible=False
)
error_out = gr.Markdown(
value="",
visible=False
)
with gr.Column(scale=2):
results_out = gr.DataFrame(
label="Results",
interactive=False
)
plot_out = gr.Plot(
label="Visualization"
)
gr.Markdown("""
<style>
/* Center the content */
.gradio-container {
max-width: 1000px;
margin: auto;
}
/* Style the main title */
#main-title h1 {
font-weight: bold;
}
/* Style the error alert */
.gradio-container .alert-error {
background-color: #ffe6e6;
color: #cc0000;
border: 1px solid #cc0000;
}
</style>
""")
# =========================
# Click Event Handlers
# =========================
def on_query_submit(nl_query):
"""
Handles the submission of a natural language query.
"""
if not nl_query.strip():
return gr.update(visible=True, value="Please enter a query."), None, None
sql_query = parse_query(nl_query)
if sql_query.startswith("Error"):
return gr.update(visible=True, value=sql_query), None, None
result_df, error_msg = execute_query(sql_query)
if error_msg:
return gr.update(visible=True, value=error_msg), None, None
fig, plot_error = generate_plot(nl_query, result_df)
if plot_error:
return gr.update(visible=True, value=plot_error), None, None
return gr.update(visible=False, value=""), result_df, fig
def on_input_focus():
"""
Shows the dataset schema when the input box is focused.
"""
return gr.update(visible=True)
# =========================
# Assign Event Handlers
# =========================
query.submit(
fn=on_query_submit,
inputs=query,
outputs=[error_out, results_out, plot_out]
)
query.focus(
fn=lambda: gr.update(visible=True),
inputs=None,
outputs=schema_display
)
# =========================
# Helper Functions
# =========================
def execute_query(sql_query):
"""
Executes the SQL query and returns the results.
"""
try:
con = duckdb.connect()
con.execute("PRAGMA threads=4") # Optimize for performance
con.execute("DROP VIEW IF EXISTS contract_data")
con.execute(f"CREATE VIEW contract_data AS SELECT * FROM '{dataset_path}'")
result_df = con.execute(sql_query).fetchdf()
con.close()
return result_df, ""
except Exception as e:
return None, f"Error executing query: {e}"
# =========================
# Launch the Gradio App
# =========================
if __name__ == "__main__":
demo.launch()
|