import gradio as gr import pandas as pd import numpy as np import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots import io import json import warnings import google.generativeai as genai import os from contextlib import redirect_stdout # --- Configuration --- warnings.filterwarnings('ignore') CSS = """ /* --- Phoenix UI Custom CSS --- */ /* Stat Card Styling */ .stat-card { border-radius: 12px !important; padding: 20px !important; background: #f7fafc; /* light gray background */ border: 1px solid #e2e8f0; box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06); text-align: center; } .stat-card-title { font-size: 16px; font-weight: 500; color: #4a5568; margin-bottom: 8px; } .stat-card-value { font-size: 32px; font-weight: 700; color: #2d3748; } /* General Layout & Feel */ .gradio-container { font-family: 'Inter', sans-serif; } .gr-button { box-shadow: 0 1px 2px 0 rgba(0,0,0,0.05); } /* Sidebar Styling */ .sidebar { background-color: #f9fafb; padding: 15px; border-right: 1px solid #e5e7eb; min-height: 100vh; } .sidebar .gr-button { width: 100%; text-align: left !important; background: none !important; border: none !important; box-shadow: none !important; color: #374151 !important; font-size: 16px !important; padding: 12px 10px !important; margin-bottom: 8px !important; border-radius: 8px !important; } .sidebar .gr-button:hover { background-color: #e5e7eb !important; } .sidebar .gr-button.selected { background-color: #d1d5db !important; font-weight: 600 !important; } /* AI Co-pilot Styling */ .code-block { border: 1px solid #e5e7eb; border-radius: 8px; } .explanation-block { background-color: #f0f9ff; border-left: 4px solid #3b82f6; padding: 12px; } """ # --- Helper Functions --- def safe_exec(code_string: str, local_vars: dict): """Safely execute a string of Python code and capture its output.""" output_buffer = io.StringIO() try: with redirect_stdout(output_buffer): exec(code_string, globals(), local_vars) stdout = output_buffer.getvalue() fig = local_vars.get('fig') result_df = local_vars.get('result_df') return stdout, fig, result_df, None except Exception as e: return None, None, None, f"Execution Error: {str(e)}" # --- Core Data Processing & State Management --- def load_and_process_file(file_obj, state_dict): """Loads a CSV, processes it, and updates the entire UI state.""" if file_obj is None: return state_dict, "Please upload a file.", *[gr.update(visible=False)] * 4 try: df = pd.read_csv(file_obj.name, low_memory=False) for col in df.select_dtypes(include=['object']).columns: try: df[col] = pd.to_datetime(df[col], errors='raise') except (ValueError, TypeError): continue metadata = extract_dataset_metadata(df) state_dict = { 'df': df, 'metadata': metadata, 'filename': os.path.basename(file_obj.name), 'dashboard_plots': [] } status_msg = f"✅ **{state_dict['filename']}** loaded successfully." # Update UI elements with new data context cockpit_update = gr.update(visible=True) deep_dive_update = gr.update(visible=False) copilot_update = gr.update(visible=False) welcome_update = gr.update(visible=False) # Stat cards rows, cols = metadata['shape'] quality = metadata['data_quality'] return (state_dict, status_msg, welcome_update, cockpit_update, deep_dive_update, copilot_update, gr.update(value=f"{rows:,}"), gr.update(value=cols), gr.update(value=f"{quality}%"), gr.update(value=f"{len(metadata['datetime_cols'])}"), gr.update(choices=metadata['columns']), gr.update(choices=metadata['columns']), gr.update(choices=metadata['columns'])) except Exception as e: return state_dict, f"❌ **Error:** {e}", *[gr.update()] * 11 def extract_dataset_metadata(df: pd.DataFrame): rows, cols = df.shape numeric_cols = df.select_dtypes(include=np.number).columns.tolist() categorical_cols = df.select_dtypes(include=['object', 'category']).columns.tolist() datetime_cols = df.select_dtypes(include=['datetime64', 'datetime64[ns]']).columns.tolist() data_quality = round((df.notna().sum().sum() / (rows * cols)) * 100, 1) if rows * cols > 0 else 0 return { 'shape': (rows, cols), 'columns': df.columns.tolist(), 'numeric_cols': numeric_cols, 'categorical_cols': categorical_cols, 'datetime_cols': datetime_cols, 'dtypes': df.dtypes.to_string(), 'head': df.head().to_string(), 'data_quality': data_quality } # --- Page Navigation --- def switch_page(page_name): """Controls visibility of main content pages.""" if page_name == "cockpit": return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False) elif page_name == "deep_dive": return gr.update(visible=False), gr.update(visible=True), gr.update(visible=False) elif page_name == "co-pilot": return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True) return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False) # --- Page 1: Data Cockpit --- def get_ai_suggestions(state_dict, api_key): """Generates proactive analytical suggestions from the AI.""" if not api_key: return "Enter your Gemini API key to get suggestions.", gr.update(visible=False) if not state_dict: return "Upload data first.", gr.update(visible=False) metadata = state_dict['metadata'] prompt = f""" Based on the following dataset metadata, generate 3 to 5 specific, actionable, and interesting analytical questions a user might want to ask. Frame them as questions. - **Columns:** {', '.join(metadata['columns'])} - **Numeric:** {', '.join(metadata['numeric_cols'])} - **Categorical:** {', '.join(metadata['categorical_cols'])} - **Datetime:** {', '.join(metadata['datetime_cols'])} Return ONLY a JSON list of strings. Example: ["What is the trend of sales over time?", "Which category has the highest average price?"] """ try: genai.configure(api_key=api_key) model = genai.GenerativeModel('gemini-1.5-flash') response = model.generate_content(prompt) suggestions = json.loads(response.text) # Create a button for each suggestion buttons = [gr.Button(s, variant="secondary", visible=True) for s in suggestions] # Pad with hidden buttons to always have 5 outputs buttons += [gr.Button(visible=False)] * (5 - len(buttons)) return gr.update(visible=False), *buttons except Exception as e: return f"Could not generate suggestions: {e}", *[gr.update(visible=False)]*5 def handle_suggestion_click(question_text): """When a suggestion button is clicked, switch to the co-pilot page and populate the input.""" return ( gr.update(visible=False), # Hide cockpit gr.update(visible=False), # Hide deep dive gr.update(visible=True), # Show co-pilot question_text # Populate the chat input ) # --- Page 2: Deep Dive Dashboard --- def add_plot_to_dashboard(state_dict, x_col, y_col, plot_type): """Generates a plot and adds it to the state-managed dashboard.""" if not x_col: return state_dict, gr.update() df = state_dict['df'] title = f"{plot_type.capitalize()}: {y_col} by {x_col}" if y_col else f"Distribution of {x_col}" fig = None try: if plot_type == 'histogram': fig = px.histogram(df, x=x_col, title=title) elif plot_type == 'box': fig = px.box(df, x=x_col, y=y_col, title=title) elif plot_type == 'scatter': fig = px.scatter(df, x=x_col, y=y_col, title=title, trendline="ols") elif plot_type == 'bar': counts = df[x_col].value_counts().nlargest(20) fig = px.bar(counts, x=counts.index, y=counts.values, title=f"Top 20 Categories for {x_col}") fig.update_xaxes(title=x_col) if fig: state_dict['dashboard_plots'].append(fig) # Rebuild the accordion with all plots accordion_children = [gr.Plot(fig, visible=True) for fig in state_dict['dashboard_plots']] return state_dict, gr.Accordion(label="Your Dashboard Plots", children=accordion_children, open=True) except Exception as e: gr.Warning(f"Plotting Error: {e}") return state_dict, gr.update() def clear_dashboard(state_dict): """Clears all plots from the dashboard.""" state_dict['dashboard_plots'] = [] return state_dict, gr.Accordion(label="Your Dashboard Plots", children=[]) # --- Page 3: AI Co-pilot --- def respond_to_chat(user_message, history, state_dict, api_key): """Handles the advanced chat interaction with the AI Co-pilot.""" if not api_key: history.append((user_message, "I need a Gemini API key to function. Please provide it in the sidebar.")) return history, *[gr.update(visible=False)] * 4 if not state_dict: history.append((user_message, "Please upload a dataset first.")) return history, *[gr.update(visible=False)] * 4 history.append((user_message, None)) metadata = state_dict['metadata'] prompt = f""" You are 'Phoenix Co-pilot', an expert AI data analyst. Your goal is to help a user analyze a pandas DataFrame named `df`. **Instructions:** 1. Carefully understand the user's question. 2. Formulate a plan (thought process). 3. Write Python code to execute that plan. 4. The code can use pandas (pd), numpy (np), and plotly.express (px). 5. **For plots, assign the figure to a variable `fig` (e.g., `fig = px.histogram(...)`).** 6. **For table-like results, assign the final DataFrame to a variable `result_df` (e.g., `result_df = df.describe()`).** 7. Do not modify the original `df`. Use `df.copy()` if needed. 8. Provide a brief, user-friendly explanation of the result. 9. Respond **ONLY** with a single, raw JSON object with keys: "thought", "code", "explanation". **DataFrame Metadata:** - Columns and dtypes: {metadata['dtypes']} - First 5 rows: {metadata['head']} **User Question:** "{user_message}" **Your JSON Response:** """ try: genai.configure(api_key=api_key) model = genai.GenerativeModel('gemini-1.5-flash') response = model.generate_content(prompt) response_text = response.text.strip().replace("```json", "").replace("```", "") response_json = json.loads(response_text) thought = response_json.get("thought", "Thinking...") code_to_run = response_json.get("code", "") explanation = response_json.get("explanation", "Here is the result.") stdout, fig_result, df_result, error = safe_exec(code_to_run, {'df': state_dict['df'], 'px': px, 'pd': pd, 'np': np}) bot_message = f"🤔 **Thought:** *{thought}*" history[-1] = (user_message, bot_message) # Prepare outputs, making them visible only if they contain content output_updates = [gr.update(visible=False, value=None)] * 4 # [explanation, code, plot, table] if explanation: output_updates[0] = gr.update(visible=True, value=f"**Phoenix Co-pilot:** {explanation}") if code_to_run: output_updates[1] = gr.update(visible=True, value=code_to_run) if fig_result: output_updates[2] = gr.update(visible=True, value=fig_result) if df_result is not None: output_updates[3] = gr.update(visible=True, value=df_result) if stdout: # Append stdout to explanation if it exists new_explanation = (output_updates[0]['value'] if output_updates[0]['visible'] else "") + f"\n\n**Console Output:**\n```\n{stdout}\n```" output_updates[0] = gr.update(visible=True, value=new_explanation) if error: error_explanation = f"**Phoenix Co-pilot:** I encountered an error. Here's the details:\n\n`{error}`" output_updates[0] = gr.update(visible=True, value=error_explanation) return history, *output_updates except Exception as e: error_msg = f"A critical error occurred: {e}. The AI may have returned an invalid response. Please try rephrasing your question." history[-1] = (user_message, error_msg) return history, *[gr.update(visible=False)] * 4 # --- Gradio UI Definition --- def create_gradio_interface(): with gr.Blocks(theme=gr.themes.Monochrome(primary_hue="indigo", secondary_hue="blue"), css=CSS, title="Phoenix AI Data Explorer") as demo: global_state = gr.State({}) with gr.Row(): # --- Sidebar --- with gr.Column(scale=1, elem_classes="sidebar"): gr.Markdown("# 🚀 Phoenix UI") gr.Markdown("AI Data Explorer") # Navigation buttons cockpit_btn = gr.Button("📊 Data Cockpit", elem_classes="selected") deep_dive_btn = gr.Button("🔍 Deep Dive Builder") copilot_btn = gr.Button("🤖 AI Co-pilot") gr.Markdown("---") file_input = gr.File(label="📁 Upload New CSV", file_types=[".csv"]) status_output = gr.Markdown("Status: Awaiting data...") gr.Markdown("---") api_key_input = gr.Textbox(label="🔑 Gemini API Key", type="password", placeholder="Enter key here...") suggestion_btn = gr.Button("Get Smart Suggestions", variant="secondary") # --- Main Content Area --- with gr.Column(scale=4): # Welcome Page (Visible initially) with gr.Column(visible=True) as welcome_page: gr.Markdown("# Welcome to the AI Data Explorer (Phoenix UI)", elem_id="welcome-header") gr.Markdown("Please **upload a CSV file** and **enter your Gemini API key** in the sidebar to begin.") # CORRECTED: Uses a local file, 'workflow.png', which must be in the same directory. gr.Image(value="workflow.png", label="Workflow", show_label=False, show_download_button=False, container=False) # Page 1: Data Cockpit (Hidden initially) with gr.Column(visible=False) as cockpit_page: gr.Markdown("## 📊 Data Cockpit") with gr.Row(): with gr.Column(elem_classes="stat-card"): gr.Markdown("