Spaces:
Sleeping
Sleeping
File size: 6,325 Bytes
b474ae1 ec9d21a 06f01b3 b474ae1 d33fe62 dfe1769 1fa796c 5b4c268 00c05fa c490b83 00c05fa ae610aa f146007 5b4c268 d33fe62 5a73339 92494e9 d33fe62 ae610aa f146007 ae610aa b474ae1 d33fe62 5b4c268 ae610aa c490b83 d33fe62 88c83f6 dfe1769 a1792a1 88c83f6 a1792a1 1fa796c dfe1769 f0741dc f7a7a3a 13f0f94 88c83f6 dfe1769 24b6a6d dfe1769 ae610aa dfe1769 88c83f6 d33fe62 c490b83 88c83f6 dfe1769 c490b83 d33fe62 c490b83 d33fe62 c490b83 8760634 c490b83 dfe1769 ae610aa c490b83 06f01b3 c490b83 d9a0200 c490b83 d9a0200 c490b83 b474ae1 c490b83 8cb3a33 c490b83 00c05fa c490b83 dfe1769 c490b83 dfe1769 c490b83 dfe1769 c490b83 d9a0200 c490b83 d9a0200 c490b83 5b4c268 00c05fa ae610aa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 |
import json
import openai
import gradio as gr
import duckdb
from functools import lru_cache
import pandas as pd
import plotly.express as px
import os
# Set OpenAI API key
openai.api_key = os.getenv("OPENAI_API_KEY")
# =========================
# Configuration and Setup
# =========================
# Load the Parquet dataset path
dataset_path = 'sample_contract_df.parquet' # Update with your Parquet file path
# Provided schema
schema = [
{"column_name": "department_ind_agency", "column_type": "VARCHAR"},
{"column_name": "cgac", "column_type": "BIGINT"},
{"column_name": "sub_tier", "column_type": "VARCHAR"},
{"column_name": "fpds_code", "column_type": "VARCHAR"},
{"column_name": "office", "column_type": "VARCHAR"},
{"column_name": "aac_code", "column_type": "VARCHAR"},
{"column_name": "posteddate", "column_type": "VARCHAR"},
{"column_name": "type", "column_type": "VARCHAR"},
{"column_name": "basetype", "column_type": "VARCHAR"},
{"column_name": "popstreetaddress", "column_type": "VARCHAR"},
{"column_name": "popcity", "column_type": "VARCHAR"},
{"column_name": "popstate", "column_type": "VARCHAR"},
{"column_name": "popzip", "column_type": "VARCHAR"},
{"column_name": "popcountry", "column_type": "VARCHAR"},
{"column_name": "active", "column_type": "VARCHAR"},
{"column_name": "awardnumber", "column_type": "VARCHAR"},
{"column_name": "awarddate", "column_type": "VARCHAR"},
{"column_name": "award", "column_type": "DOUBLE"},
{"column_name": "awardee", "column_type": "VARCHAR"},
{"column_name": "state", "column_type": "VARCHAR"},
{"column_name": "city", "column_type": "VARCHAR"},
{"column_name": "zipcode", "column_type": "VARCHAR"},
{"column_name": "countrycode", "column_type": "VARCHAR"}
]
@lru_cache(maxsize=1)
def get_schema():
return schema
COLUMN_TYPES = {col['column_name']: col['column_type'] for col in get_schema()}
# =========================
# Database Interaction
# =========================
def load_dataset_schema():
"""
Loads the dataset schema into DuckDB by creating a view.
"""
con = duckdb.connect()
try:
con.execute("DROP VIEW IF EXISTS contract_data")
con.execute(f"CREATE VIEW contract_data AS SELECT * FROM '{dataset_path}'")
return True
except Exception as e:
print(f"Error loading dataset schema: {e}")
return False
finally:
con.close()
# =========================
# OpenAI API Integration
# =========================
def parse_query(nl_query):
"""
Converts a natural language query into a SQL query using OpenAI's API.
"""
messages = [
{"role": "system", "content": "Convert natural language queries to SQL queries for 'contract_data'."},
{"role": "user", "content": f"Schema:\n{json.dumps(schema, indent=2)}\n\nQuery:\n\"{nl_query}\"\n\nSQL:"}
]
try:
response = openai.chat.completions.create(
model="gpt-4o-mini",
messages=messages,
temperature=0,
max_tokens=150,
)
sql_query = response.choices[0].message.content.strip()
return sql_query
except Exception as e:
return f"Error generating SQL query: {e}"
# =========================
# Plotting Utilities
# =========================
def detect_plot_intent(nl_query):
"""
Detects if the user's query involves plotting.
"""
plot_keywords = ['plot', 'graph', 'chart', 'distribution', 'visualize', 'trend', 'histogram', 'bar', 'line']
return any(keyword in nl_query.lower() for keyword in plot_keywords)
def generate_plot_code(sql_query, result_df):
"""
Generates plotting code based on the SQL query and result DataFrame.
"""
if not detect_plot_intent(sql_query):
return None
columns = result_df.columns.tolist()
if len(columns) >= 2:
fig = px.bar(result_df, x=columns[0], y=columns[1], title='Generated Plot')
fig.update_layout(title_x=0.5)
return fig
else:
return None
# =========================
# Gradio Application UI
# =========================
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as demo:
gr.Markdown("""
## Parquet Data Explorer
**Query and visualize data effortlessly.**
""", elem_id="main-title")
with gr.Row():
with gr.Column(scale=1):
query = gr.Textbox(
label="Ask a question about the data",
placeholder='e.g., "What are the total awards over 1M in California?"',
lines=1
)
# Display schema next to the input
schema_display = gr.JSON(value=json.loads(json.dumps(get_schema(), indent=2)), visible=False)
error_out = gr.Alert(variant="error", visible=False)
with gr.Column(scale=2):
results_out = gr.DataFrame(label="Results")
plot_out = gr.Plot()
def on_query_submit(nl_query):
sql_query = parse_query(nl_query)
if sql_query.startswith("Error"):
return gr.update(visible=True, value=sql_query), None, None
result_df, error_msg = execute_query(sql_query)
if error_msg:
return gr.update(visible=True, value=error_msg), None, None
fig = generate_plot_code(nl_query, result_df)
return gr.update(visible=False), result_df, fig
def on_focus():
return gr.update(visible=True)
query.submit(
fn=on_query_submit,
inputs=query,
outputs=[error_out, results_out, plot_out]
)
query.focus(
fn=on_focus,
outputs=schema_display
)
# =========================
# Helper Functions
# =========================
def execute_query(sql_query):
"""
Executes the SQL query and returns the results.
"""
if sql_query.startswith("Error"):
return None, sql_query
try:
con = duckdb.connect()
con.execute(f"CREATE OR REPLACE VIEW contract_data AS SELECT * FROM '{dataset_path}'")
result_df = con.execute(sql_query).fetchdf()
con.close()
return result_df, ""
except Exception as e:
return None, f"Error executing query: {e}"
# =========================
# Launch the Gradio App
# =========================
demo.launch()
|