mgbam commited on
Commit
e4b2570
·
verified ·
1 Parent(s): a875537

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +177 -140
app.py CHANGED
@@ -1,152 +1,189 @@
1
- """app.py — BizIntel AI Ultra (Gemini‑only, v4)
2
- Robust BI copilot featuring:
3
- • CSV / Excel / Parquet uploads + live SQL ingestion
4
- • Memory‑safe sampling for large files (≤ 5 M rows sample)
5
- • Gemini‑generated narrative insights & quick schema audit
6
- • Interactive EDA (histogram + box, correlation heatmap)
7
- • Tunable ARIMA forecasting (p,d,q & horizon)
8
- • One‑click strategy brief download
9
- """
10
- from __future__ import annotations
11
- import io, os, tempfile
12
- from pathlib import Path
13
- from typing import List
14
-
15
- import pandas as pd
16
- import plotly.express as px
17
  import streamlit as st
 
 
 
 
18
  from sqlalchemy import create_engine
 
 
19
  from statsmodels.tsa.arima.model import ARIMA
20
- import google.generativeai as genai
21
 
22
- # ───────────────────── 0 · PAGE CONFIG ──────────────────────
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  st.set_page_config(
24
- page_title="BizIntel AI Ultra",
25
- page_icon="📊",
26
  layout="wide",
27
  initial_sidebar_state="expanded",
28
  )
 
29
 
30
- auth_key = None
31
- try:
32
- auth_key = st.secrets["GEMINI_APIKEY"] # type: ignore[attr-defined]
33
- except Exception:
34
- auth_key = os.getenv("GEMINI_APIKEY")
35
 
36
- if not auth_key:
37
- st.error(
38
- "❌ **GEMINI_APIKEY** missing. Add it in *Settings → Secrets* or set env var before running."
 
 
 
39
  )
40
- st.stop()
41
-
42
- genai.configure(api_key=auth_key)
43
- GEM_MODEL = "gemini-1.5-pro-latest"
44
- TMP = Path(tempfile.gettempdir())
45
-
46
- # ───────────────────── 1 · HELPERS ──────────────────────────
47
- @st.cache_data(show_spinner=False)
48
- def load_file(buf: io.BufferedReader, sample: bool = False) -> pd.DataFrame:
49
- suf = Path(buf.name).suffix.lower()
50
- if suf in {".xls", ".xlsx"}:
51
- return pd.read_excel(buf, engine="openpyxl")
52
- if suf == ".parquet":
53
- return pd.read_parquet(buf)
54
- return pd.read_csv(buf, nrows=5_000_000 if sample else None)
55
-
56
- @st.cache_data(show_spinner=False)
57
- def list_sql_tables(uri: str) -> List[str]:
58
- return create_engine(uri).table_names()
59
-
60
- @st.cache_data(show_spinner=True)
61
- def fetch_sql_table(uri: str, tbl: str) -> pd.DataFrame:
62
- return pd.read_sql_table(tbl, create_engine(uri))
63
-
64
- @st.cache_data(show_spinner=False)
65
- def gemini(prompt: str) -> str:
66
- return genai.GenerativeModel(GEM_MODEL).generate_content(prompt).text.strip()
67
-
68
- # ───────────────────── 2 · DATA LOAD UI ─────────────────────
69
- st.title("📊 BizIntel AI Ultra Gemini 1.5 Pro BI Copilot")
70
- mode = st.sidebar.radio("Data source", ["Upload file", "SQL DB"], horizontal=True)
71
-
72
- df: pd.DataFrame = pd.DataFrame()
73
- if mode == "Upload file":
74
- upl = st.sidebar.file_uploader("CSV / Excel / Parquet", ["csv", "xls", "xlsx", "parquet"], help="≤ 2 GB")
75
- sample = st.sidebar.checkbox("Sample first 5 M rows only")
76
- if upl:
77
- df = load_file(upl, sample)
78
- else:
79
- uri = st.sidebar.text_input("SQLAlchemy URI")
80
- if uri:
81
- tbl = st.sidebar.selectbox("Table", list_sql_tables(uri))
82
- if tbl:
83
- df = fetch_sql_table(uri, tbl)
84
-
85
- if df.empty:
86
- st.info("⬅️ Load data to start analysis.")
87
- st.stop()
88
-
89
- st.success("✅ Data loaded")
90
- st.dataframe(df.head(), use_container_width=True)
91
-
92
- # ───────────────────── 3 · OVERVIEW & GEMINI INSIGHT ─────────
93
- rows, cols = df.shape
94
- missing_pct = df.isna().sum().sum() / (rows * cols) * 100
95
- m1,m2,m3 = st.columns(3)
96
- m1.metric("Rows", f"{rows:,}")
97
- m2.metric("Columns", cols)
98
- m3.metric("Missing %", f"{missing_pct:.1f}")
99
-
100
- st.subheader("🧠 Gemini Insights")
101
- with st.spinner("Generating narrative…"):
102
- summ_json = df.describe(include="all", datetime_is_numeric=True).round(2).to_json()
103
- st.markdown(gemini(
104
- "You are a senior BI analyst. Provide five bullet insights and three actions for this dataset:\n\n" + summ_json
105
- ))
106
-
107
- # ───────────────────── 4 · TIME‑SERIES PREP ──────────────────
108
- # attempt datetime coercion
109
- for col in df.columns:
110
- if not pd.api.types.is_datetime64_any_dtype(df[col]):
 
 
 
 
 
 
 
 
 
 
111
  try:
112
- df[col] = pd.to_datetime(df[col])
113
- except: pass
114
-
115
- DATE_COL = st.selectbox("Date column", [c for c in df.columns if pd.api.types.is_datetime64_any_dtype(df[c])])
116
- NUM_COL = st.selectbox("Numeric metric", [c for c in df.select_dtypes("number").columns])
117
-
118
- ts = df[[DATE_COL, NUM_COL]].dropna().groupby(DATE_COL)[NUM_COL].mean().sort_index()
119
- fig_tr = px.line(ts, title=f"{NUM_COL} Trend", labels={"index":"Date", NUM_COL:NUM_COL})
120
- st.plotly_chart(fig_tr, use_container_width=True)
121
-
122
- # ───────────────────── 5 · ARIMA FORECAST ───────────────────
123
- st.subheader("🔮 ARIMA Forecast")
124
- steps = st.slider("Horizon (days)", 3, 365, 90)
125
- p = st.number_input("p", 0, 5, 1); d = st.number_input("d", 0, 2, 1); q = st.number_input("q", 0, 5, 1)
126
- try:
127
- model = ARIMA(ts, order=(p, d, q)).fit()
128
- future_idx = pd.date_range(ts.index[-1], periods=steps + 1, freq=pd.infer_freq(ts.index) or "D")[1:]
129
- forecast = pd.Series(model.forecast(steps), index=future_idx, name="Forecast")
130
- fig_fc = px.line(pd.concat([ts, forecast], axis=1), title="Actual vs Forecast")
131
- st.plotly_chart(fig_fc, use_container_width=True)
132
- except Exception as e:
133
- st.error(f"ARIMA failed: {e}")
134
-
135
- # ───────────────────── 6 · EDA EXPANDERS ────────────────────
136
- st.subheader("🔍 EDA Dashboard")
137
- with st.expander("Histogram + Box"):
138
- st.plotly_chart(px.histogram(df, x=NUM_COL, marginal="box", template="plotly_dark"), use_container_width=True)
139
- with st.expander("Correlation Heatmap"):
140
- corr = df.select_dtypes("number").corr()
141
- st.plotly_chart(px.imshow(corr, color_continuous_scale="RdBu", title="Correlation"), use_container_width=True)
142
-
143
- # ───────────────────── 7 · STRATEGY BRIEF ───────────────────
144
- brief = (
145
- "# Strategy Brief\n"
146
- "• Clean missing date values for stable models.\n"
147
- "• Investigate high correlations for driver analysis.\n"
148
- "• Use forecast for inventory & workforce planning.\n"
149
- "• Monitor outliers (>3σ) weekly.\n"
150
- "• Segment by region/product for targeted actions."
151
- )
152
- st.download_button("⬇️ Download Strategy (.md)", brief, "bizintel_brief.md", "text/markdown")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ import tempfile
5
+ from io import BytesIO
6
  from sqlalchemy import create_engine
7
+ import plotly.express as px
8
+ import matplotlib.pyplot as plt
9
  from statsmodels.tsa.arima.model import ARIMA
 
10
 
11
+ # ── Helpers to read CSV/Excel robustly ───────────────────────────────────────────
12
+ @st.cache_data
13
+ def load_file(uploaded):
14
+ """Read a CSV or Excel file into a DataFrame."""
15
+ try:
16
+ if uploaded.name.lower().endswith((".xls", ".xlsx")):
17
+ return pd.read_excel(uploaded, engine="openpyxl")
18
+ else:
19
+ return pd.read_csv(uploaded)
20
+ except Exception as e:
21
+ raise st.Error(f"Error parsing file: {e}")
22
+
23
+ # ── Helpers for SQL database ────────────────────────────────────────────────────
24
+ SUPPORTED_ENGINES = ["postgresql", "mysql", "mssql+pyodbc", "oracle+cx_oracle"]
25
+ @st.cache_data
26
+ def list_tables(connection_string):
27
+ engine = create_engine(connection_string)
28
+ return engine.table_names()
29
+
30
+ @st.cache_data
31
+ def fetch_table(connection_string, table_name):
32
+ engine = create_engine(connection_string)
33
+ return pd.read_sql_table(table_name, engine)
34
+
35
+ # ── Streamlit page setup ────────────────────────────────────────────────────────
36
  st.set_page_config(
37
+ page_title="BizIntel AI Ultra",
 
38
  layout="wide",
39
  initial_sidebar_state="expanded",
40
  )
41
+ st.title("📊 BizIntel AI Ultra – Advanced Analytics + Gemini 1.5 Pro")
42
 
43
+ # ── Data source selection ───────────────────────────────────────────────────────
44
+ data_source = st.radio("Select data source", ["Upload CSV / Excel", "Connect to SQL Database"])
 
 
 
45
 
46
+ df = None
47
+ if data_source == "Upload CSV / Excel":
48
+ uploaded = st.file_uploader(
49
+ "Drag & drop file here (≤ 500 MB)",
50
+ type=["csv", "xls", "xlsx"],
51
+ accept_multiple_files=False,
52
  )
53
+ if uploaded:
54
+ with st.spinner("Loading file…"):
55
+ df = load_file(uploaded)
56
+ st.success("✅ File loaded into memory")
57
+ elif data_source == "Connect to SQL Database":
58
+ engine = st.selectbox("Select DB engine", SUPPORTED_ENGINES)
59
+ conn_str = st.text_input("Connection string (SQLAlchemy format)", placeholder="e.g. postgresql://user:pass@host:port/dbname")
60
+ if conn_str:
61
+ tables = list_tables(conn_str)
62
+ table = st.selectbox("Choose table", tables)
63
+ if table:
64
+ with st.spinner(f"Fetching `{table}`…"):
65
+ df = fetch_table(conn_str, table)
66
+ st.success(f"✅ `{table}` loaded from database")
67
+
68
+ # ── If DataFrame is ready, show overview and proceed ───────────────────────────
69
+ if df is not None:
70
+ st.markdown("### 🗂️ Preview")
71
+ st.dataframe(df.head(5), use_container_width=True)
72
+
73
+ # Dataset overview metrics
74
+ n_rows, n_cols = df.shape
75
+ missing_pct = (df.isna().sum().sum() / (n_rows * n_cols)) * 100
76
+ st.markdown("---")
77
+ c1, c2, c3 = st.columns(3)
78
+ c1.metric("Rows", f"{n_rows:,}")
79
+ c2.metric("Columns", f"{n_cols:,}")
80
+ c3.metric("Missing %", f"{missing_pct:.1f}%")
81
+
82
+ # Detailed stats
83
+ st.markdown("#### 📋 Detailed descriptive statistics")
84
+ st.dataframe(df.describe(include="all").transpose(), use_container_width=True)
85
+
86
+ # Optional exploratory visuals
87
+ st.markdown("---")
88
+ st.markdown("#### 🔎 Optional Exploratory Visuals")
89
+ col1, col2, col3 = st.columns(3)
90
+ with col1:
91
+ if st.checkbox("Histogram"):
92
+ num_cols = df.select_dtypes(include="number").columns.tolist()
93
+ col = st.selectbox("Choose numeric column for histogram", num_cols, key="hist")
94
+ fig = px.histogram(df, x=col, nbins=30, title=f"Histogram of {col}")
95
+ st.plotly_chart(fig, use_container_width=True)
96
+ with col2:
97
+ if st.checkbox("Scatter matrix"):
98
+ num_cols = df.select_dtypes(include="number").columns.tolist()[:6] # limit to first 6
99
+ fig = px.scatter_matrix(df[num_cols], dimensions=num_cols, title="Scatter Matrix")
100
+ st.plotly_chart(fig, use_container_width=True)
101
+ with col3:
102
+ if st.checkbox("Correlation heatmap"):
103
+ corr = df.select_dtypes(include="number").corr()
104
+ fig, ax = plt.subplots(figsize=(6, 5))
105
+ im = ax.imshow(corr, vmin=-1, vmax=1, cmap="RdBu")
106
+ plt.xticks(range(len(corr)), corr.columns, rotation=45, ha="right")
107
+ plt.yticks(range(len(corr)), corr.columns)
108
+ plt.colorbar(im, ax=ax)
109
+ st.pyplot(fig)
110
+
111
+ # ── Trend & Forecast ──────────────────────────────────────────────────────
112
+ st.markdown("---")
113
+ st.markdown("### 📈 Trend & Forecast")
114
+ # pick date/time column
115
+ dt_cols = df.columns[df.dtypes.isin([np.dtype("datetime64[ns]"), np.dtype("object")])].tolist()
116
+ date_col = st.selectbox("Select date/time column", dt_cols)
117
+ df[date_col] = pd.to_datetime(df[date_col], errors="coerce")
118
+
119
+ # pick numeric metric
120
+ num_cols = df.select_dtypes(include="number").columns.tolist()
121
+ metric_col = st.selectbox("Select numeric metric", num_cols)
122
+
123
+ # prepare time series
124
+ ts = df[[date_col, metric_col]].dropna()
125
+ ts = ts.set_index(date_col).sort_index()
126
+ ts = ts[~ts.index.duplicated(keep="first")]
127
+
128
+ # Trend plot
129
+ fig_trend = px.line(ts, y=metric_col, title=f"{metric_col} over Time")
130
+ st.plotly_chart(fig_trend, use_container_width=True)
131
+
132
+ # Forecast next 90 days with ARIMA
133
+ with st.spinner("Running 90-day forecast…"):
134
  try:
135
+ model = ARIMA(ts, order=(1, 1, 1)).fit()
136
+ fcast = model.get_forecast(90)
137
+ idx = pd.date_range(ts.index.max(), periods=91, freq="D")[1:]
138
+ df_f = pd.DataFrame({"forecast": fcast.predicted_mean}, index=idx)
139
+
140
+ fig_fc = px.line(
141
+ pd.concat([ts, df_f], axis=1),
142
+ labels={metric_col: metric_col, "forecast": "Forecast"},
143
+ title=f"{metric_col} & 90-Day Forecast",
144
+ )
145
+ st.plotly_chart(fig_fc, use_container_width=True)
146
+ except Exception as e:
147
+ st.error(f"Forecast failed: {e}")
148
+
149
+ # ── Strategy Recommendations ─────────────────────────────────────────────
150
+ st.markdown("---")
151
+ st.markdown("### 🚀 Strategy Recommendations")
152
+ st.markdown(
153
+ """
154
+ 1. **Data Quality First**
155
+ Address any missing or malformed dates before further time-series analysis.
156
+
157
+ 2. **Trend & Seasonality**
158
+ Investigate any upward/downward trends and repeating seasonal patterns.
159
+
160
+ 3. **Outlier Management**
161
+ Identify extreme highs/lows in your metric—could be bulk orders or data errors.
162
+
163
+ 4. **Segment-Level Analysis**
164
+ Drill into key dimensions (e.g. region, product) to tailor growth strategies.
165
+
166
+ 5. **Predict & Act**
167
+ Use your 90-day forecasts to guide inventory, staffing, and marketing decisions.
168
+ """
169
+ )
170
+
171
+ # downloadable strategy as markdown
172
+ strategy_md = st.session_state.get("strategy_md", "")
173
+ if not strategy_md:
174
+ strategy_md = st.session_state["strategy_md"] = st.container().markdown("…") # dummy to store
175
+
176
+ st.download_button(
177
+ "📥 Download Strategy (.md)",
178
+ data="""
179
+ # BizIntel AI Ultra – Strategy Recommendations
180
+
181
+ 1. Data Quality First: …
182
+ 2. Trend & Seasonality: …
183
+ 3. Outlier Management: …
184
+ 4. Segment-Level Analysis: …
185
+ 5. Predict & Act: …
186
+ """,
187
+ file_name="strategy.md",
188
+ mime="text/markdown",
189
+ )