|
import gradio as gr |
|
import pandas as pd |
|
import numpy as np |
|
import plotly.express as px |
|
import io |
|
import json |
|
import warnings |
|
import google.generativeai as genai |
|
import os |
|
from typing import List, Dict, Any, Tuple, Optional |
|
import re |
|
|
|
|
|
warnings.filterwarnings('ignore') |
|
MAX_DASHBOARD_PLOTS = 10 |
|
CSS = """ |
|
/* --- Phoenix UI Professional Dark CSS --- */ |
|
#app-title { text-align: center; font-weight: 800; font-size: 2.5rem; color: #f9fafb; padding-top: 10px; } |
|
.stat-card { border-radius: 12px !important; padding: 20px !important; background: #1f2937 !important; border: 1px solid #374151 !important; text-align: center; transition: all 0.3s ease; } |
|
.stat-card:hover { transform: translateY(-5px); box-shadow: 0 10px 15px -3px rgba(0,0,0,0.1), 0 4px 6px -2px rgba(0,0,0,0.05); } |
|
.stat-card-title { font-size: 16px; font-weight: 500; color: #9ca3af !important; margin-bottom: 8px; } |
|
.stat-card-value { font-size: 32px; font-weight: 700; color: #f9fafb !important; } |
|
.sidebar { background-color: #111827 !important; padding: 15px; border-right: 1px solid #374151 !important; min-height: 100vh; } |
|
.sidebar .gr-button { width: 100%; text-align: left !important; background: none !important; border: none !important; box-shadow: none !important; color: #d1d5db !important; font-size: 16px !important; padding: 12px 10px !important; margin-bottom: 8px !important; border-radius: 8px !important; transition: background-color 0.2s ease; } |
|
.sidebar .gr-button:hover { background-color: #374151 !important; } |
|
.sidebar .gr-button.selected { background-color: #4f46e5 !important; font-weight: 600 !important; color: white !important; } |
|
.explanation-block { background-color: #1e3a8a !important; border-left: 4px solid #3b82f6 !important; padding: 12px; color: #e5e7eb !important; border-radius: 4px; } |
|
""" |
|
|
|
class DataExplorerApp: |
|
"""A professional-grade, AI-powered data exploration application.""" |
|
|
|
def __init__(self): |
|
self.demo = self._build_ui() |
|
|
|
def _build_ui(self) -> gr.Blocks: |
|
with gr.Blocks(theme=gr.themes.Glass(primary_hue="indigo", secondary_hue="blue"), css=CSS, title="AI Data Explorer Pro") as demo: |
|
state_var = gr.State({}) |
|
|
|
|
|
cockpit_btn = gr.Button("π Data Cockpit", elem_classes="selected", elem_id="cockpit") |
|
deep_dive_btn = gr.Button("π Deep Dive Builder", elem_id="deep_dive") |
|
copilot_btn = gr.Button("π€ Chief Data Scientist", elem_id="co-pilot") |
|
|
|
|
|
file_input = gr.File( |
|
label="π Upload Data File", |
|
file_types=[".csv", ".txt", ".xls", ".xlsx"] |
|
) |
|
|
|
status_output = gr.Markdown("Status: Awaiting data...") |
|
api_key_input = gr.Textbox(label="π Gemini API Key", type="password", placeholder="Enter key to enable AI...") |
|
suggestion_btn = gr.Button("Get Smart Suggestions", variant="secondary", interactive=False) |
|
|
|
rows_stat, cols_stat = gr.Textbox("0", interactive=False, show_label=False), gr.Textbox("0", interactive=False, show_label=False) |
|
quality_stat, time_cols_stat = gr.Textbox("0%", interactive=False, show_label=False), gr.Textbox("0", interactive=False, show_label=False) |
|
suggestion_buttons = [gr.Button(visible=False) for _ in range(5)] |
|
|
|
plot_type_dd = gr.Dropdown(['histogram', 'bar', 'scatter', 'box'], label="Plot Type", value='histogram') |
|
x_col_dd = gr.Dropdown([], label="X-Axis / Column", interactive=False) |
|
y_col_dd = gr.Dropdown([], label="Y-Axis (for Scatter/Box)", visible=False, interactive=False) |
|
add_plot_btn, clear_plots_btn = gr.Button("Add to Dashboard", variant="primary", interactive=False), gr.Button("Clear Dashboard", interactive=False) |
|
dashboard_plots = [gr.Plot(visible=False) for _ in range(MAX_DASHBOARD_PLOTS)] |
|
|
|
chatbot = gr.Chatbot(height=500, label="Conversation", show_copy_button=True, avatar_images=(None, "bot.png")) |
|
copilot_explanation, copilot_code = gr.Markdown(visible=False, elem_classes="explanation-block"), gr.Code(language="python", visible=False, label="Executed Code") |
|
copilot_plot, copilot_table = gr.Plot(visible=False, label="Generated Visualization"), gr.Dataframe(visible=False, label="Generated Table", wrap=True) |
|
chat_input, chat_submit_btn = gr.Textbox(label="Your Question", placeholder="e.g., 'What is the relationship between age and salary?'", scale=4), gr.Button("Ask AI", variant="primary", interactive=False) |
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1, elem_classes="sidebar"): |
|
gr.Markdown("## π AI Explorer Pro", elem_id="app-title"); cockpit_btn; deep_dive_btn; copilot_btn; gr.Markdown("---") |
|
file_input; status_output; gr.Markdown("---"); api_key_input; suggestion_btn |
|
with gr.Column(scale=4): |
|
welcome_page, cockpit_page, deep_dive_page, copilot_page = [gr.Column(visible=i==0) for i in range(4)] |
|
with welcome_page: gr.Markdown("# Welcome to the AI Data Explorer Pro\n> Please **upload a CSV, TXT, or Excel file** and **enter your Gemini API key** to begin your analysis.") |
|
with cockpit_page: |
|
gr.Markdown("## π Data Cockpit: At-a-Glance Overview") |
|
with gr.Row(): |
|
for title, stat_comp in [("Rows", rows_stat), ("Columns", cols_stat), ("Data Quality", quality_stat), ("Date/Time Cols", time_cols_stat)]: |
|
with gr.Column(elem_classes="stat-card"): gr.Markdown(f"<div class='stat-card-title'>{title}</div>"); stat_comp |
|
with gr.Accordion(label="β¨ AI Smart Suggestions", open=True): [btn for btn in suggestion_buttons] |
|
with deep_dive_page: |
|
gr.Markdown("## π Deep Dive: Manual Dashboard Builder"); gr.Markdown("Construct visualizations to investigate specific relationships.") |
|
with gr.Row(): plot_type_dd; x_col_dd; y_col_dd |
|
with gr.Row(): add_plot_btn; clear_plots_btn |
|
with gr.Column(): [plot for plot in dashboard_plots] |
|
with copilot_page: |
|
gr.Markdown("## π€ Chief Data Scientist: Your AI Partner"); chatbot |
|
with gr.Accordion("AI's Detailed Response", open=True): copilot_explanation; copilot_code; copilot_plot; copilot_table |
|
with gr.Row(): chat_input; chat_submit_btn |
|
|
|
|
|
pages, nav_buttons = [welcome_page, cockpit_page, deep_dive_page, copilot_page], [cockpit_btn, deep_dive_btn, copilot_btn] |
|
for i, btn in enumerate(nav_buttons): |
|
btn.click(lambda id=btn.elem_id: self._switch_page(id, pages), outputs=pages).then( |
|
lambda i=i: [gr.update(elem_classes="selected" if j==i else "") for j in range(len(nav_buttons))], outputs=nav_buttons) |
|
|
|
file_input.upload(self.load_and_process_file, inputs=[file_input], outputs=[ |
|
state_var, status_output, *pages, rows_stat, cols_stat, quality_stat, time_cols_stat, x_col_dd, y_col_dd, add_plot_btn]) |
|
|
|
api_key_input.change(lambda x: gr.update(interactive=bool(x)), inputs=[api_key_input], outputs=[suggestion_btn]) |
|
chat_input.change(lambda x: gr.update(interactive=bool(x.strip())), inputs=[chat_input], outputs=[chat_submit_btn]) |
|
|
|
plot_type_dd.change(self._update_plot_controls, inputs=[plot_type_dd], outputs=[y_col_dd]) |
|
add_plot_btn.click(self.add_plot_to_dashboard, inputs=[state_var, x_col_dd, y_col_dd, plot_type_dd], outputs=[state_var, clear_plots_btn, *dashboard_plots]) |
|
clear_plots_btn.click(self.clear_dashboard, inputs=[state_var], outputs=[state_var, clear_plots_btn, *dashboard_plots]) |
|
|
|
suggestion_btn.click(self.get_ai_suggestions, inputs=[state_var, api_key_input], outputs=suggestion_buttons) |
|
for btn in suggestion_buttons: |
|
btn.click(self.handle_suggestion_click, inputs=[btn], outputs=[*pages, chat_input]) |
|
|
|
chat_submit_btn.click(self.respond_to_chat, [state_var, api_key_input, chat_input, chatbot], [chatbot, copilot_explanation, copilot_code, copilot_plot, copilot_table]).then(lambda: "", outputs=[chat_input]) |
|
chat_input.submit(self.respond_to_chat, [state_var, api_key_input, chat_input, chatbot], [chatbot, copilot_explanation, copilot_code, copilot_plot, copilot_table]).then(lambda: "", outputs=[chat_input]) |
|
return demo |
|
|
|
def launch(self): self.demo.launch(debug=True) |
|
|
|
def _switch_page(self, page_id: str, all_pages: List) -> List[gr.update]: |
|
visibility = {"welcome":0, "cockpit":1, "deep_dive":2, "co-pilot":3} |
|
return [gr.update(visible=i == visibility.get(page_id, 0)) for i in range(len(all_pages))] |
|
|
|
def _update_plot_controls(self, plot_type: str) -> gr.update: return gr.update(visible=plot_type in ['scatter', 'box']) |
|
|
|
def load_and_process_file(self, file_obj: Any) -> Tuple[Any, ...]: |
|
"""Intelligently loads data from CSV, TXT, or Excel files.""" |
|
try: |
|
filename = file_obj.name |
|
extension = os.path.splitext(filename)[1].lower() |
|
|
|
if extension == '.csv': |
|
df = pd.read_csv(filename) |
|
elif extension == '.txt': |
|
|
|
df = pd.read_csv(filename, sep=None, engine='python') |
|
elif extension in ['.xls', '.xlsx']: |
|
df = pd.read_excel(filename) |
|
else: |
|
raise ValueError(f"Unsupported file type: {extension}") |
|
|
|
|
|
for col in df.select_dtypes(include=['object']).columns: |
|
try: df[col] = pd.to_datetime(df[col], errors='raise') |
|
except (ValueError, TypeError): continue |
|
|
|
metadata = self._extract_dataset_metadata(df) |
|
state = {'df': df, 'metadata': metadata, 'dashboard_plots': []} |
|
rows, cols, quality = metadata['shape'][0], metadata['shape'][1], metadata['data_quality'] |
|
page_updates = self._switch_page("cockpit", [0,1,2,3]) |
|
return (state, f"β
**{os.path.basename(filename)}** loaded.", *page_updates, f"{rows:,}", f"{cols}", f"{quality}%", f"{len(metadata['datetime_cols'])}", |
|
gr.update(choices=metadata['columns'], interactive=True), gr.update(choices=metadata['columns'], interactive=True), gr.update(interactive=True)) |
|
except Exception as e: |
|
gr.Error(f"File Load Error: {e}"); page_updates = self._switch_page("welcome", [0,1,2,3]); |
|
return {}, f"β Error: {e}", *page_updates, "0", "0", "0%", "0", gr.update(choices=[], interactive=False), gr.update(choices=[], interactive=False), gr.update(interactive=False) |
|
|
|
def _extract_dataset_metadata(self, df: pd.DataFrame) -> Dict[str, Any]: |
|
rows, cols = df.shape |
|
quality = round((df.notna().sum().sum() / df.size) * 100, 1) if df.size > 0 else 0 |
|
return {'shape': (rows, cols), 'columns': df.columns.tolist(), 'numeric_cols': df.select_dtypes(include=np.number).columns.tolist(), |
|
'categorical_cols': df.select_dtypes(include=['object', 'category']).columns.tolist(), 'datetime_cols': df.select_dtypes(include=['datetime64', 'datetime64[ns]']).columns.tolist(), |
|
'dtypes_head': df.head(3).to_string(), 'data_quality': quality} |
|
|
|
def add_plot_to_dashboard(self, state: Dict, x_col: str, y_col: Optional[str], plot_type: str) -> List[Any]: |
|
dashboard_plots = state.get('dashboard_plots', []) |
|
if len(dashboard_plots) >= MAX_DASHBOARD_PLOTS: |
|
gr.Warning(f"Dashboard is full. Max {MAX_DASHBOARD_PLOTS} plots."); return [state, gr.update(interactive=True), *self._get_plot_updates(state)] |
|
if not x_col: gr.Warning("Please select an X-axis column."); return [state, gr.update(interactive=True), *self._get_plot_updates(state)] |
|
df, title = state.get('df'), f"{plot_type.capitalize()}: {y_col} by {x_col}" if y_col and plot_type in ['box', 'scatter'] else f"Distribution of {x_col}" |
|
try: |
|
fig=None; |
|
if plot_type == 'histogram': fig = px.histogram(df, x=x_col, title=title) |
|
elif plot_type == 'box': fig = px.box(df, x=x_col, y=y_col, title=title) |
|
elif plot_type == 'scatter': fig = px.scatter(df, x=x_col, y=y_col, title=title, trendline="ols") |
|
elif plot_type == 'bar': fig = px.bar(df[x_col].value_counts().nlargest(20), title=f"Top 20 for {x_col}") |
|
if fig: |
|
fig.update_layout(template="plotly_dark"); dashboard_plots.append(fig); gr.Info(f"Added '{title}' to dashboard.") |
|
return [state, gr.update(interactive=True), *self._get_plot_updates(state)] |
|
except Exception as e: gr.Error(f"Plotting Error: {e}"); return [state, gr.update(interactive=True), *self._get_plot_updates(state)] |
|
|
|
def _get_plot_updates(self, state: Dict) -> List[gr.update]: |
|
plots = state.get('dashboard_plots', []) |
|
return [gr.update(value=plots[i] if i < len(plots) else None, visible=i < len(plots)) for i in range(MAX_DASHBOARD_PLOTS)] |
|
|
|
def clear_dashboard(self, state: Dict) -> List[Any]: |
|
state['dashboard_plots'] = []; gr.Info("Dashboard cleared."); return [state, gr.update(interactive=False), *self._get_plot_updates(state)] |
|
|
|
def get_ai_suggestions(self, state: Dict, api_key: str) -> List[gr.update]: |
|
if not api_key: gr.Warning("API Key is required."); return [gr.update(visible=False)]*5 |
|
if not state: gr.Warning("Please load data first."); return [gr.update(visible=False)]*5 |
|
metadata, columns = state.get('metadata', {}), state.get('metadata', {}).get('columns', []) |
|
prompt = f"From columns {columns}, generate 4 impactful analytical questions. Return ONLY a JSON list of strings." |
|
try: |
|
genai.configure(api_key=api_key); suggestions = json.loads(genai.GenerativeModel('gemini-1.5-flash').generate_content(prompt).text) |
|
return [gr.Button(s, visible=True) for s in suggestions] + [gr.Button(visible=False)] * (5 - len(suggestions)) |
|
except Exception as e: gr.Error(f"AI Suggestion Error: {e}"); return [gr.update(visible=False)]*5 |
|
|
|
def handle_suggestion_click(self, question: str) -> Tuple[gr.update, ...]: |
|
return *self._switch_page("co-pilot", [0,1,2,3]), question |
|
|
|
def _sanitize_and_parse_json(self, raw_text: str) -> Dict: |
|
clean_text = re.sub(r'```json\n?|```', '', raw_text).strip() |
|
clean_text = re.sub(r'(?<!\\)\\(?!["\\/bfnrtu])', r'\\\\', clean_text) |
|
return json.loads(clean_text) |
|
|
|
def respond_to_chat(self, state: Dict, api_key: str, user_message: str, history: List) -> Any: |
|
if not user_message.strip(): return history, *[gr.update()]*4 |
|
if not api_key or not state: |
|
history.append((user_message, "I need a Gemini API key and a dataset to work.")); return history, *[gr.update(visible=False)]*4 |
|
|
|
history.append((user_message, "Thinking... π€")); yield history, *[gr.update(visible=False)]*4 |
|
|
|
metadata = state.get('metadata', {}); dtypes_head = metadata.get('dtypes_head', 'No metadata available.') |
|
prompt = f"""You are 'Chief Data Scientist', an expert AI analyst... |
|
Respond ONLY with a single JSON object with keys: "plan", "code", "insight". |
|
Metadata: {dtypes_head} |
|
User Question: "{user_message}" |
|
""" |
|
try: |
|
genai.configure(api_key=api_key); response_json = self._sanitize_and_parse_json(genai.GenerativeModel('gemini-1.5-flash').generate_content(prompt).text) |
|
plan, code, insight = response_json.get("plan"), response_json.get("code"), response_json.get("insight") |
|
stdout, fig, df_result, error = self._safe_exec(code, {'df': state['df'], 'px': px, 'pd': pd}) |
|
|
|
history[-1] = (user_message, f"**Plan:** {plan}") |
|
explanation = f"**Insight:** {insight}" |
|
if stdout: explanation += f"\n\n**Console Output:**\n```\n{stdout}\n```" |
|
if error: gr.Error(f"AI Code Execution Failed: {error}") |
|
|
|
yield (history, gr.update(visible=bool(explanation), value=explanation), gr.update(visible=bool(code), value=code), |
|
gr.update(visible=bool(fig), value=fig), gr.update(visible=bool(df_result is not None), value=df_result)) |
|
except Exception as e: |
|
history[-1] = (user_message, f"I encountered an error processing the AI response. Please rephrase your question.\n\n**Details:** `{str(e)}`") |
|
yield history, *[gr.update(visible=False)]*4 |
|
|
|
def _safe_exec(self, code_string: str, local_vars: Dict) -> Tuple[Any, ...]: |
|
try: |
|
output_buffer = io.StringIO() |
|
with redirect_stdout(output_buffer): exec(code_string, globals(), local_vars) |
|
return output_buffer.getvalue(), local_vars.get('fig'), local_vars.get('result_df'), None |
|
except Exception as e: return None, None, None, str(e) |
|
|
|
if __name__ == "__main__": |
|
if not os.path.exists("bot.png"): |
|
try: |
|
from PIL import Image |
|
Image.new('RGB', (1, 1)).save('bot.png') |
|
except ImportError: print("Pillow not installed, cannot create dummy bot.png.") |
|
|
|
app = DataExplorerApp() |
|
app.launch() |