Spaces:
Sleeping
Sleeping
import streamlit as st | |
import pandas as pd | |
from typing import Dict, List, Optional, Any | |
from pydantic import BaseModel, Field | |
import base64 | |
import io | |
import matplotlib.pyplot as plt | |
import seaborn as sns | |
from abc import ABC, abstractmethod | |
from sklearn.model_selection import train_test_split | |
from sklearn.linear_model import LogisticRegression | |
from sklearn.metrics import accuracy_score | |
from statsmodels.tsa.seasonal import seasonal_decompose | |
from statsmodels.tsa.stattools import adfuller | |
from langchain.prompts import PromptTemplate | |
from groq import Groq | |
import os | |
import numpy as np | |
from scipy.stats import ttest_ind, f_oneway | |
import json | |
# Initialize Groq Client | |
client = Groq(api_key=os.environ.get("GROQ_API_KEY")) | |
# ---------------------- Base Classes and Schemas --------------------------- | |
class ResearchInput(BaseModel): | |
"""Base schema for research tool inputs""" | |
data_key: str = Field(..., description="Session state key containing DataFrame") | |
columns: Optional[List[str]] = Field(None, description="List of columns to analyze") | |
class TemporalAnalysisInput(ResearchInput): | |
"""Schema for temporal analysis""" | |
time_col: str = Field(..., description="Name of timestamp column") | |
value_col: str = Field(..., description="Name of value column to analyze") | |
class HypothesisInput(ResearchInput): | |
"""Schema for hypothesis testing""" | |
group_col: str = Field(..., description="Categorical column defining groups") | |
value_col: str = Field(..., description="Numerical column to compare") | |
class ModelTrainingInput(ResearchInput): | |
"""Schema for model training""" | |
target_col: str = Field(..., description="Name of target column") | |
class DataAnalyzer(ABC): | |
"""Abstract base class for data analysis modules""" | |
def invoke(self, **kwargs) -> Dict[str, Any]: | |
pass | |
# ---------------------- Concrete Analyzer Implementations --------------------------- | |
class AdvancedEDA(DataAnalyzer): | |
"""Comprehensive Exploratory Data Analysis""" | |
def invoke(self, data: pd.DataFrame, **kwargs) -> Dict[str, Any]: | |
try: | |
analysis = { | |
"dimensionality": { | |
"rows": len(data), | |
"columns": list(data.columns), | |
"memory_usage": f"{data.memory_usage().sum() / 1e6:.2f} MB" | |
}, | |
"statistical_profile": data.describe(percentiles=[.25, .5, .75]).to_dict(), | |
"temporal_analysis": { | |
"date_ranges": { | |
col: { | |
"min": data[col].min(), | |
"max": data[col].max() | |
} for col in data.select_dtypes(include='datetime').columns | |
} | |
}, | |
"data_quality": { | |
"missing_values": data.isnull().sum().to_dict(), | |
"duplicates": data.duplicated().sum(), | |
"cardinality": { | |
col: data[col].nunique() for col in data.columns | |
} | |
} | |
} | |
return analysis | |
except Exception as e: | |
return {"error": f"EDA Failed: {str(e)}"} | |
class DistributionVisualizer(DataAnalyzer): | |
"""Distribution visualizations""" | |
def invoke(self, data: pd.DataFrame, columns: List[str], **kwargs) -> str: | |
try: | |
plt.figure(figsize=(12, 6)) | |
for i, col in enumerate(columns, 1): | |
plt.subplot(1, len(columns), i) | |
sns.histplot(data[col], kde=True, stat="density") | |
plt.title(f'Distribution of {col}', fontsize=10) | |
plt.xticks(fontsize=8) | |
plt.yticks(fontsize=8) | |
plt.tight_layout() | |
buf = io.BytesIO() | |
plt.savefig(buf, format='png', dpi=300, bbox_inches='tight') | |
plt.close() | |
return base64.b64encode(buf.getvalue()).decode() | |
except Exception as e: | |
return f"Visualization Error: {str(e)}" | |
class TemporalAnalyzer(DataAnalyzer): | |
"""Time series analysis""" | |
def invoke(self, data: pd.DataFrame, time_col: str, value_col: str, **kwargs) -> Dict[str, Any]: | |
try: | |
ts_data = data.set_index(pd.to_datetime(data[time_col]))[value_col] | |
decomposition = seasonal_decompose(ts_data, period=365) | |
plt.figure(figsize=(12, 8)) | |
decomposition.plot() | |
plt.tight_layout() | |
buf = io.BytesIO() | |
plt.savefig(buf, format='png') | |
plt.close() | |
plot_data = base64.b64encode(buf.getvalue()).decode() | |
return { | |
"trend_statistics": { | |
"stationarity": adfuller(ts_data)[1], | |
"seasonality_strength": max(decomposition.seasonal) | |
}, | |
"visualization": plot_data | |
} | |
except Exception as e: | |
return {"error": f"Temporal Analysis Failed: {str(e)}"} | |
class HypothesisTester(DataAnalyzer): | |
"""Statistical hypothesis testing""" | |
def invoke(self, data: pd.DataFrame, group_col: str, value_col: str, **kwargs) -> Dict[str, Any]: | |
try: | |
groups = data[group_col].unique() | |
if len(groups) < 2: | |
return {"error": "Insufficient groups for comparison"} | |
if len(groups) == 2: | |
group_data = [data[data[group_col] == g][value_col] for g in groups] | |
stat, p = ttest_ind(*group_data) | |
test_type = "Independent t-test" | |
else: | |
group_data = [data[data[group_col] == g][value_col] for g in groups] | |
stat, p = f_oneway(*group_data) | |
test_type = "ANOVA" | |
return { | |
"test_type": test_type, | |
"test_statistic": stat, | |
"p_value": p, | |
"effect_size": { | |
"cohens_d": abs(group_data[0].mean() - group_data[1].mean())/np.sqrt( | |
(group_data[0].var() + group_data[1].var())/2 | |
) if len(groups) == 2 else None | |
}, | |
"interpretation": self.interpret_p_value(p) | |
} | |
except Exception as e: | |
return {"error": f"Hypothesis Testing Failed: {str(e)}"} | |
def interpret_p_value(self, p: float) -> str: | |
if p < 0.001: return "Very strong evidence against H0" | |
elif p < 0.01: return "Strong evidence against H0" | |
elif p < 0.05: return "Evidence against H0" | |
elif p < 0.1: return "Weak evidence against H0" | |
else: return "No significant evidence against H0" | |
class LogisticRegressionTrainer(DataAnalyzer): | |
"""Logistic Regression Model Trainer""" | |
def invoke(self, data: pd.DataFrame, target_col: str, columns: List[str], **kwargs) -> Dict[str, Any]: | |
try: | |
X = data[columns] | |
y = data[target_col] | |
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) | |
model = LogisticRegression(max_iter=1000) | |
model.fit(X_train, y_train) | |
y_pred = model.predict(X_test) | |
accuracy = accuracy_score(y_test, y_pred) | |
return { | |
"model_type": "Logistic Regression", | |
"accuracy": accuracy, | |
"model_params": model.get_params() | |
} | |
except Exception as e: | |
return {"error": f"Logistic Regression Model Error: {str(e)}"} | |
# ---------------------- Groq Research Agent --------------------------- | |
class GroqResearcher: | |
"""Advanced AI Research Engine using Groq""" | |
def __init__(self, model_name="mixtral-8x7b-32768"): | |
self.model_name = model_name | |
self.system_template = """You are a senior data scientist at a research institution. | |
Analyze this dataset with rigorous statistical methods and provide academic-quality insights: | |
{dataset_info} | |
User Question: {query} | |
Required Format: | |
- Executive Summary (1 paragraph) | |
- Methodology (bullet points) | |
- Key Findings (numbered list) | |
- Limitations | |
- Recommended Next Steps""" | |
def research(self, query: str, data: pd.DataFrame) -> str: | |
"""Conduct academic-level analysis using Groq""" | |
try: | |
dataset_info = f""" | |
Dataset Dimensions: {data.shape} | |
Variables: {', '.join(data.columns)} | |
Temporal Coverage: {data.select_dtypes(include='datetime').columns.tolist()} | |
Missing Values: {data.isnull().sum().to_dict()} | |
""" | |
prompt = PromptTemplate.from_template(self.system_template).format( | |
dataset_info=dataset_info, | |
query=query | |
) | |
completion = client.chat.completions.create( | |
messages=[ | |
{"role": "system", "content": "You are a research AI assistant"}, | |
{"role": "user", "content": prompt} | |
], | |
model=self.model_name, | |
temperature=0.2, | |
max_tokens=4096, | |
stream=False | |
) | |
return completion.choices[0].message.content | |
except Exception as e: | |
return f"Research Error: {str(e)}" | |
# ---------------------- Business Logic Layer --------------------------- | |
class BusinessRule(BaseModel): | |
name: str | |
condition: str | |
action: str | |
class BusinessRulesEngine(): | |
def __init__(self): | |
self.rules: Dict[str, BusinessRule] = {} | |
def add_rule(self, rule: BusinessRule): | |
self.rules[rule.name] = rule | |
def execute_rules(self, data: pd.DataFrame): | |
results = {} | |
for rule_name, rule in self.rules.items(): | |
try: | |
if eval(rule.condition, {}, {"df":data}): | |
results[rule_name] = {"rule_matched": True, "action": rule.action} | |
else: | |
results[rule_name] = {"rule_matched": False, "action": None} | |
except Exception as e: | |
results[rule_name] = {"rule_matched": False, "error": str(e)} | |
return results | |
class KPI(BaseModel): | |
name: str | |
calculation: str | |
threshold: Optional[float] = None | |
class KPIMonitoring(): | |
def __init__(self): | |
self.kpis : Dict[str, KPI] = {} | |
def add_kpi(self, kpi:KPI): | |
self.kpis[kpi.name] = kpi | |
def calculate_kpis(self, data: pd.DataFrame): | |
results = {} | |
for kpi_name, kpi in self.kpis.items(): | |
try: | |
results[kpi_name] = eval(kpi.calculation, {}, {"df": data}) | |
except Exception as e: | |
results[kpi_name] = {"error": str(e)} | |
return results | |
class ForecastingEngine(ABC): | |
def predict(self, data: pd.DataFrame, **kwargs) -> pd.DataFrame: | |
pass | |
class SimpleForecasting(ForecastingEngine): | |
def predict(self, data: pd.DataFrame, period: int = 7, **kwargs) -> pd.DataFrame: | |
#Placeholder for actual forecasting | |
return pd.DataFrame({"forecast":[f"Forecast for the next {period} days"]}) | |
# ---------------------- Insights and Reporting Layer --------------------------- | |
class AutomatedInsights(): | |
def __init__(self): | |
self.analyses : Dict[str, DataAnalyzer] = { | |
"EDA": AdvancedEDA(), | |
"temporal": TemporalAnalyzer(), | |
"distribution": DistributionVisualizer(), | |
"hypothesis": HypothesisTester(), | |
"model": LogisticRegressionTrainer() | |
} | |
def generate_insights(self, data: pd.DataFrame, analysis_names: List[str], **kwargs): | |
results = {} | |
for name in analysis_names: | |
if name in self.analyses: | |
analyzer = self.analyses[name] | |
results[name] = analyzer.invoke(data=data, **kwargs) | |
else: | |
results[name] = {"error": "Analysis not found"} | |
return results | |
class Dashboard(): | |
def __init__(self): | |
self.layout: Dict[str,str] = {} | |
def add_visualisation(self, vis_name: str, vis_type: str): | |
self.layout[vis_name] = vis_type | |
def display_dashboard(self, data_dict: Dict[str,pd.DataFrame]): | |
st.header("Dashboard") | |
for vis_name, vis_type in self.layout.items(): | |
st.subheader(vis_name) | |
if vis_type == "table": | |
if vis_name in data_dict: | |
st.table(data_dict[vis_name]) | |
else: | |
st.write("Data Not Found") | |
elif vis_type == "plot": | |
if vis_name in data_dict: | |
df = data_dict[vis_name] | |
if len(df.columns) > 1: | |
fig = plt.figure() | |
sns.lineplot(data=df) | |
st.pyplot(fig) | |
else: | |
st.write("Please have more than 1 column") | |
else: | |
st.write("Data not found") | |
class AutomatedReports(): | |
def __init__(self): | |
self.report_definition: Dict[str,str] = {} | |
def create_report_definition(self, report_name: str, definition: str): | |
self.report_definition[report_name] = definition | |
def generate_report(self, report_name: str, data:Dict[str, pd.DataFrame]): | |
if report_name not in self.report_definition: | |
return {"error":"Report name not found"} | |
st.header(f"Report : {report_name}") | |
st.write(f"Report Definition: {self.report_definition[report_name]}") | |
for df_name, df in data.items(): | |
st.subheader(f"Data: {df_name}") | |
st.table(df) | |
# ---------------------- Data Acquisition Layer --------------------------- | |
class DataSource(ABC): | |
"""Base class for data sources.""" | |
def connect(self) -> None: | |
"""Connect to the data source.""" | |
pass | |
def fetch_data(self, query: str, **kwargs) -> pd.DataFrame: | |
"""Fetch the data based on a specific query.""" | |
pass | |
class CSVDataSource(DataSource): | |
"""Data source for CSV files.""" | |
def __init__(self, file_path: str): | |
self.file_path = file_path | |
self.data: Optional[pd.DataFrame] = None | |
def connect(self): | |
self.data = pd.read_csv(self.file_path) | |
def fetch_data(self, query: str = None, **kwargs) -> pd.DataFrame: | |
if self.data is None: | |
raise Exception("No connection is made, call connect()") | |
return self.data | |
class DatabaseSource(DataSource): | |
def __init__(self, connection_string: str, database_type: str): | |
self.connection_string = connection_string | |
self.database_type = database_type | |
self.connection = None | |
def connect(self): | |
if self.database_type.lower() == "sql": | |
#Placeholder for the actual database connection | |
self.connection = "Connected to SQL Database" | |
else: | |
raise Exception(f"Database type '{self.database_type}' is not supported") | |
def fetch_data(self, query: str, **kwargs) -> pd.DataFrame: | |
if self.connection is None: | |
raise Exception("No connection is made, call connect()") | |
#Placeholder for the data fetching | |
return pd.DataFrame({"result":[f"Fetched data based on query: {query}"]}) | |
class DataIngestion: | |
def __init__(self): | |
self.sources : Dict[str, DataSource] = {} | |
def add_source(self, source_name: str, source: DataSource): | |
self.sources[source_name] = source | |
def ingest_data(self, source_name: str, query: str = None, **kwargs) -> pd.DataFrame: | |
if source_name not in self.sources: | |
raise Exception(f"Source '{source_name}' not found") | |
source = self.sources[source_name] | |
source.connect() | |
return source.fetch_data(query, **kwargs) | |
class DataModel(BaseModel): | |
name : str | |
kpis : List[str] = Field(default_factory=list) | |
dimensions : List[str] = Field(default_factory=list) | |
custom_calculations : Optional[Dict[str, str]] = None | |
relations: Optional[Dict[str,str]] = None #Example {table1: table2} | |
def to_json(self): | |
return json.dumps(self.dict()) | |
def from_json(json_str): | |
return DataModel(**json.loads(json_str)) | |
class DataModelling(): | |
def __init__(self): | |
self.models : Dict[str, DataModel] = {} | |
def add_model(self, model:DataModel): | |
self.models[model.name] = model | |
def get_model(self, model_name: str) -> DataModel: | |
if model_name not in self.models: | |
raise Exception(f"Model '{model_name}' not found") | |
return self.models[model_name] | |
# ---------------------- Main Streamlit Application --------------------------- | |
def main(): | |
st.set_page_config(page_title="AI BI Automation Platform", layout="wide") | |
st.title("🚀 AI-Powered Business Intelligence Automation Platform") | |
# Session State | |
if 'data' not in st.session_state: | |
st.session_state.data = {} # store pd.DataFrame under a name | |
if 'data_ingestion' not in st.session_state: | |
st.session_state.data_ingestion = DataIngestion() | |
if 'data_modelling' not in st.session_state: | |
st.session_state.data_modelling = DataModelling() | |
if 'business_rules' not in st.session_state: | |
st.session_state.business_rules = BusinessRulesEngine() | |
if 'kpi_monitoring' not in st.session_state: | |
st.session_state.kpi_monitoring = KPIMonitoring() | |
if 'forecasting_engine' not in st.session_state: | |
st.session_state.forecasting_engine = SimpleForecasting() | |
if 'automated_insights' not in st.session_state: | |
st.session_state.automated_insights = AutomatedInsights() | |
if 'dashboard' not in st.session_state: | |
st.session_state.dashboard = Dashboard() | |
if 'automated_reports' not in st.session_state: | |
st.session_state.automated_reports = AutomatedReports() | |
if 'researcher' not in st.session_state: | |
st.session_state.researcher = GroqResearcher() | |
# Sidebar for Data Management | |
with st.sidebar: | |
st.header("⚙️ Data Management") | |
data_source_selection = st.selectbox("Select Data Source Type",["CSV","SQL Database"]) | |
if data_source_selection == "CSV": | |
uploaded_file = st.file_uploader("Upload research dataset (CSV)", type=["csv"]) | |
if uploaded_file: | |
source_name = st.text_input("Data Source Name") | |
if source_name: | |
try: | |
csv_source = CSVDataSource(file_path=uploaded_file) | |
st.session_state.data_ingestion.add_source(source_name,csv_source) | |
st.success(f"Uploaded {uploaded_file.name}") | |
except Exception as e: | |
st.error(f"Error loading dataset: {e}") | |
elif data_source_selection == "SQL Database": | |
conn_str = st.text_input("Enter connection string for SQL DB") | |
if conn_str: | |
source_name = st.text_input("Data Source Name") | |
if source_name: | |
try: | |
sql_source = DatabaseSource(connection_string=conn_str, database_type="sql") | |
st.session_state.data_ingestion.add_source(source_name, sql_source) | |
st.success(f"Added SQL DB Source {source_name}") | |
except Exception as e: | |
st.error(f"Error loading database source {e}") | |
if st.button("Ingest Data"): | |
if st.session_state.data_ingestion.sources: | |
source_name_to_fetch = st.selectbox("Select Data Source to Ingest", list(st.session_state.data_ingestion.sources.keys())) | |
query = st.text_area("Optional Query to Fetch data") | |
if source_name_to_fetch: | |
with st.spinner("Ingesting data..."): | |
try: | |
data = st.session_state.data_ingestion.ingest_data(source_name_to_fetch, query) | |
st.session_state.data[source_name_to_fetch] = data | |
st.success(f"Ingested data from {source_name_to_fetch}") | |
except Exception as e: | |
st.error(f"Ingestion failed: {e}") | |
else: | |
st.error("No data source added, please add data source") | |
if st.session_state.data: | |
col1, col2 = st.columns([1, 3]) | |
with col1: | |
st.subheader("Dataset Metadata") | |
data_source_keys = list(st.session_state.data.keys()) | |
selected_data_key = st.selectbox("Select Dataset", data_source_keys) | |
if selected_data_key: | |
data = st.session_state.data[selected_data_key] | |
st.json({ | |
"Variables": list(data.columns), | |
"Time Range": { | |
col: { | |
"min": data[col].min(), | |
"max": data[col].max() | |
} for col in data.select_dtypes(include='datetime').columns | |
}, | |
"Size": f"{data.memory_usage().sum() / 1e6:.2f} MB" | |
}) | |
with col2: | |
analysis_tab, business_logic_tab, insights_tab, reports_tab, custom_research_tab = st.tabs([ | |
"Data Analysis", | |
"Business Logic", | |
"Insights", | |
"Reports", | |
"Custom Research" | |
]) | |
with analysis_tab: | |
if selected_data_key: | |
analysis_type = st.selectbox("Select Analysis Mode", [ | |
"Exploratory Data Analysis", | |
"Temporal Pattern Analysis", | |
"Comparative Statistics", | |
"Distribution Analysis", | |
"Train Logistic Regression Model" | |
]) | |
data = st.session_state.data[selected_data_key] | |
if analysis_type == "Exploratory Data Analysis": | |
analyzer = AdvancedEDA() | |
eda_result = analyzer.invoke(data=data) | |
st.subheader("Data Quality Report") | |
st.json(eda_result) | |
elif analysis_type == "Temporal Pattern Analysis": | |
time_col = st.selectbox("Temporal Variable", | |
data.select_dtypes(include='datetime').columns) | |
value_col = st.selectbox("Analysis Variable", | |
data.select_dtypes(include=np.number).columns) | |
if time_col and value_col: | |
analyzer = TemporalAnalyzer() | |
result = analyzer.invoke(data=data, time_col=time_col, value_col=value_col) | |
if "visualization" in result: | |
st.image(f"data:image/png;base64,{result['visualization']}") | |
st.json(result) | |
elif analysis_type == "Comparative Statistics": | |
group_col = st.selectbox("Grouping Variable", | |
data.select_dtypes(include='category').columns) | |
value_col = st.selectbox("Metric Variable", | |
data.select_dtypes(include=np.number).columns) | |
if group_col and value_col: | |
analyzer = HypothesisTester() | |
result = analyzer.invoke(data=data, group_col=group_col, value_col=value_col) | |
st.subheader("Statistical Test Results") | |
st.json(result) | |
elif analysis_type == "Distribution Analysis": | |
num_cols = data.select_dtypes(include=np.number).columns.tolist() | |
selected_cols = st.multiselect("Select Variables", num_cols) | |
if selected_cols: | |
analyzer = DistributionVisualizer() | |
img_data = analyzer.invoke(data=data, columns=selected_cols) | |
st.image(f"data:image/png;base64,{img_data}") | |
elif analysis_type == "Train Logistic Regression Model": | |
num_cols = data.select_dtypes(include=np.number).columns.tolist() | |
target_col = st.selectbox("Select Target Variable", | |
data.columns.tolist()) | |
selected_cols = st.multiselect("Select Feature Variables", num_cols) | |
if selected_cols and target_col: | |
analyzer = LogisticRegressionTrainer() | |
result = analyzer.invoke(data=data, target_col=target_col, columns=selected_cols) | |
st.subheader("Logistic Regression Model Results") | |
st.json(result) | |
with business_logic_tab: | |
st.header("Business Logic") | |
st.subheader("Data Modelling") | |
model_name = st.text_input("Enter the name of the model") | |
if model_name: | |
kpis = st.text_input("Enter KPIs (comma-separated)") | |
dimensions = st.text_input("Enter Dimensions (comma-separated)") | |
custom_calculations = st.text_area("Custom calculations (JSON format), use {'df': DataFrame}") | |
relations = st.text_area("Relations (JSON format), use {'table1': 'table2'}") | |
if st.button("Add Data Model"): | |
try: | |
custom_calculations_dict = None if not custom_calculations else json.loads(custom_calculations) | |
relations_dict = None if not relations else json.loads(relations) | |
model = DataModel(name=model_name, | |
kpis= [kpi.strip() for kpi in kpis.split(',')] if kpis else [], | |
dimensions=[dim.strip() for dim in dimensions.split(',')] if dimensions else [], | |
custom_calculations= custom_calculations_dict, | |
relations = relations_dict) | |
st.session_state.data_modelling.add_model(model) | |
st.success(f"Added data model {model_name}") | |
except Exception as e: | |
st.error(f"Error creating data model: {e}") | |
st.subheader("Business Rules") | |
rule_name = st.text_input("Enter Rule Name") | |
condition = st.text_area("Enter Rule Condition (use 'df' for data frame), Example df['sales'] > 100") | |
action = st.text_area("Enter Action to be Taken on Rule Match") | |
if st.button("Add Business Rule"): | |
try: | |
rule = BusinessRule(name=rule_name, condition=condition, action=action) | |
st.session_state.business_rules.add_rule(rule) | |
st.success("Added Business Rule") | |
except Exception as e: | |
st.error(f"Error in rule definition: {e}") | |
st.subheader("KPI Definition") | |
kpi_name = st.text_input("Enter KPI name") | |
kpi_calculation = st.text_area("Enter KPI calculation (use 'df' for data frame), Example df['revenue'].sum()") | |
threshold = st.text_input("Enter Threshold for KPI") | |
if st.button("Add KPI"): | |
try: | |
threshold_value = float(threshold) if threshold else None | |
kpi = KPI(name=kpi_name, calculation=kpi_calculation, threshold=threshold_value) | |
st.session_state.kpi_monitoring.add_kpi(kpi) | |
st.success(f"Added KPI {kpi_name}") | |
except Exception as e: | |
st.error(f"Error creating KPI: {e}") | |
if selected_data_key: | |
data = st.session_state.data[selected_data_key] | |
if st.button("Execute Business Rules"): | |
with st.spinner("Executing Business Rules.."): | |
result = st.session_state.business_rules.execute_rules(data) | |
st.json(result) | |
if st.button("Calculate KPIs"): | |
with st.spinner("Calculating KPIs..."): | |
result = st.session_state.kpi_monitoring.calculate_kpis(data) | |
st.json(result) | |
with insights_tab: | |
if selected_data_key: | |
data = st.session_state.data[selected_data_key] | |
available_analysis = ["EDA", "temporal", "distribution", "hypothesis", "model"] | |
selected_analysis = st.multiselect("Select Analysis", available_analysis) | |
if st.button("Generate Automated Insights"): | |
with st.spinner("Generating Insights"): | |
results = st.session_state.automated_insights.generate_insights(data, analysis_names=selected_analysis) | |
st.json(results) | |
with reports_tab: | |
st.header("Reports") | |
report_name = st.text_input("Report Name") | |
report_def = st.text_area("Report definition") | |
if st.button("Create Report Definition"): | |
st.session_state.automated_reports.create_report_definition(report_name, report_def) | |
st.success("Report definition created") | |
if selected_data_key: | |
data = st.session_state.data | |
if st.button("Generate Report"): | |
with st.spinner("Generating Report..."): | |
report = st.session_state.automated_reports.generate_report(report_name, data) | |
with custom_research_tab: | |
research_query = st.text_area("Enter Research Question:", height=150, | |
placeholder="E.g., 'What factors are most predictive of X outcome?'") | |
if st.button("Execute Custom Research"): | |
with st.spinner("Conducting rigorous analysis..."): | |
if selected_data_key: | |
data = st.session_state.data[selected_data_key] | |
result = st.session_state.researcher.research( | |
research_query, data | |
) | |
st.markdown("## Research Findings") | |
st.markdown(result) | |
if __name__ == "__main__": | |
main() |