Spaces:
Sleeping
Sleeping
File size: 5,296 Bytes
2f99a1c 40197c3 2f99a1c ac81731 2f99a1c ac81731 2f99a1c ac81731 2f99a1c ac81731 2f99a1c 40197c3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
import gradio as gr
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.impute import SimpleImputer
from io import BytesIO
import warnings
warnings.filterwarnings("ignore")
def read_file(file):
try:
if file.name.endswith(".csv"):
df = pd.read_csv(file)
elif file.name.endswith(".xlsx"):
df = pd.read_excel(file)
else:
raise ValueError("Unsupported file format. Please upload a CSV or Excel file.")
# Ensure the file has columns
if df.empty or df.columns.size == 0:
raise ValueError("The file has no data or valid columns to parse.")
return df
except Exception as e:
raise ValueError(f"Error reading file: {str(e)}")
# Clean the data
def clean_data(df):
# Drop duplicates
df = df.drop_duplicates()
# Fill missing values
imputer = SimpleImputer(strategy="most_frequent")
df = pd.DataFrame(imputer.fit_transform(df), columns=df.columns)
return df
# Generate summary statistics
def generate_summary(df):
return df.describe(include="all").transpose()
# Correlation heatmap
def generate_correlation_heatmap(df):
numeric_df = df.select_dtypes(include=[np.number])
corr = numeric_df.corr()
plt.figure(figsize=(10, 8))
sns.heatmap(corr, annot=True, cmap="coolwarm", fmt=".2f")
buf = BytesIO()
plt.savefig(buf, format="png")
buf.seek(0)
plt.close()
return buf
# Feature importance using Random Forest
def feature_importance(df):
# Encode categorical variables
df_encoded = df.copy()
label_encoders = {}
for col in df_encoded.select_dtypes(include="object").columns:
le = LabelEncoder()
df_encoded[col] = le.fit_transform(df_encoded[col])
label_encoders[col] = le
# Target variable selection
target_column = df_encoded.columns[-1]
X = df_encoded.iloc[:, :-1]
y = df_encoded[target_column]
# Fit Random Forest
model = RandomForestClassifier(random_state=42)
model.fit(X, y)
# Get feature importance
importance = pd.DataFrame({
"Feature": X.columns,
"Importance": model.feature_importances_
}).sort_values(by="Importance", ascending=False)
return importance
# Visualize feature importance
def plot_feature_importance(importance):
plt.figure(figsize=(10, 6))
sns.barplot(x="Importance", y="Feature", data=importance)
plt.title("Feature Importance")
buf = BytesIO()
plt.savefig(buf, format="png")
buf.seek(0)
plt.close()
return buf
def analyze_file(file):
try:
# Step 1: Read file
df = read_file(file)
# Check if the dataframe is empty
if df.empty:
return (
"The uploaded file is empty or has no valid data.",
None,
None,
None,
)
# Step 2: Clean data
df_cleaned = clean_data(df)
# Check if the cleaned dataframe is still empty
if df_cleaned.empty:
return (
"The dataset contains no valid data after cleaning.",
None,
None,
None,
)
# Step 3: Generate summary statistics
summary = generate_summary(df_cleaned)
# Step 4: Generate correlation heatmap
heatmap_buf = generate_correlation_heatmap(df_cleaned)
# Step 5: Feature importance analysis
importance = feature_importance(df_cleaned)
importance_plot_buf = plot_feature_importance(importance)
# Step 6: Return results
return (
summary,
heatmap_buf,
importance.head(10), # Top 10 important features
importance_plot_buf,
)
except ValueError as ve:
# Handle file format issues or parsing errors
return (
f"ValueError: {str(ve)}",
None,
None,
None,
)
except Exception as e:
# Catch any other unforeseen issues
return (
f"An unexpected error occurred: {str(e)}",
None,
None,
None,
)
# Gradio Interface
def gradio_interface():
with gr.Blocks() as interface:
gr.Markdown("# AI Data Analytics Tool")
gr.Markdown("Upload your dataset in CSV or Excel format to analyze and generate insights automatically.")
with gr.Row():
file_input = gr.File(label="Upload your CSV or Excel file")
analyze_button = gr.Button("Analyze")
with gr.Row():
summary_output = gr.Dataframe(label="Summary Statistics")
heatmap_output = gr.Image(label="Correlation Heatmap")
importance_output = gr.Dataframe(label="Feature Importance")
importance_plot_output = gr.Image(label="Feature Importance Plot")
analyze_button.click(
analyze_file,
inputs=file_input,
outputs=[summary_output, heatmap_output, importance_output, importance_plot_output],
)
return interface
interface = gradio_interface()
interface.launch(debug=True) |