File size: 13,636 Bytes
dfadb49 47e386b bf40641 8cfb9bd d96c1a5 47e386b 8d0e96f 47e386b d96c1a5 8cfb9bd d96c1a5 47e386b 95d5e6c 47e386b 8d0e96f 258d1fd 8d0e96f 47e386b 8d0e96f 9d880f3 f238172 258d1fd f238172 dfadb49 f238172 47e386b dfadb49 47e386b 95d5e6c 47e386b d70ad07 dfadb49 d70ad07 90d5f4e dfadb49 d70ad07 ecdc977 dfadb49 ecdc977 d70ad07 47e386b d70ad07 dfadb49 47e386b 95d5e6c 47e386b 8cfb9bd 97994b8 8cfb9bd df06091 dfadb49 3e07f55 dfadb49 3e07f55 97994b8 dfadb49 47e386b 8cfb9bd 47e386b a7df111 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 |
# import streamlit as st
# import pandas as pd
# from app_config import AppConfig # Import the configurations class
# from data_processor import DataProcessor # Import the data analysis class
# from visualization import Visualization # Import the data viz class
# from ai_analysis import AIAnalysis # Import the ai analysis class
# from sidebar import Sidebar # Import the Sidebar class
# def main():
# # Initialize the app configuration
# app_config = AppConfig()
# # Initialize the sidebar
# sidebar = Sidebar()
# sidebar.display()
# # Initialize the data processor
# data_processor = DataProcessor()
# # Initialize the visualization handler
# visualization = Visualization()
# # Initialize the AI analysis handler
# ai_analysis = AIAnalysis(data_processor.client)
# st.title("Literacy Implementation Record Data Analysis")
# # Add the descriptive text
# st.markdown("""
# This tool summarizes implementation record data for student attendance, engagement, and intervention dosage to address hypothesis #1: Have Students Received Adequate Instruction?
# """)
# # Date selection option
# date_option = st.radio(
# "Select data range:",
# ("All Data", "Date Range")
# )
# # Initialize start and end date variables
# start_date = None
# end_date = None
# if date_option == "Date Range":
# # Prompt user to enter start and end dates
# start_date = st.date_input("Start Date")
# end_date = st.date_input("End Date")
# # Ensure start date is before end date
# if start_date > end_date:
# st.error("Start date must be before end date.")
# return
# # File uploader
# uploaded_file = st.file_uploader("Upload your Excel file", type=["xlsx"])
# if uploaded_file is not None:
# try:
# # Read the Excel file into a DataFrame
# df = data_processor.read_excel(uploaded_file)
# # Format the session data
# df = data_processor.format_session_data(df)
# # Replace student names with initials
# df = data_processor.replace_student_names_with_initials(df)
# # Filter data if date range is selected
# if date_option == "Date Range":
# # Convert start_date and end_date to datetime
# start_date = pd.to_datetime(start_date).date()
# end_date = pd.to_datetime(end_date).date()
# # Filter the DataFrame based on the selected date range
# df = df[(df['Date of Session'] >= start_date) & (df['Date of Session'] <= end_date)]
# st.subheader("Uploaded Data")
# st.write(df)
# # Ensure expected column is available
# if DataProcessor.INTERVENTION_COLUMN not in df.columns:
# st.error(f"Expected column '{DataProcessor.INTERVENTION_COLUMN}' not found.")
# return
# #MOVE
# # Compute Intervention Session Statistics
# intervention_stats = data_processor.compute_intervention_statistics(df)
# st.subheader("Intervention Dosage")
# st.write(intervention_stats)
# # Plot and download intervention statistics
# # intervention_fig = visualization.plot_intervention_statistics(intervention_stats)
# # visualization.download_chart(intervention_fig, "intervention_statistics_chart.png")
# # Plot and download intervention statistics: Two-column layout for the visualization and intervention frequency
# col1, col2 = st.columns([3, 1]) # Set the column width ratio
# with col1:
# intervention_fig = visualization.plot_intervention_statistics(intervention_stats)
# with col2:
# intervention_frequency = intervention_stats['Intervention Dosage (%)'].values[0]
# # Display the "Intervention Frequency (%)" text
# st.markdown("<h3 style='color: #358E66;'>Intervention Dosage</h3>", unsafe_allow_html=True)
# # Display the frequency value below it
# st.markdown(f"<h1 style='color: #358E66;'>{intervention_frequency}%</h1>", unsafe_allow_html=True)
# visualization.download_chart(intervention_fig, "intervention_statistics_chart.png")
# # Compute Student Metrics
# student_metrics_df = data_processor.compute_student_metrics(df)
# st.subheader("Student Attendance and Engagement")
# st.write(student_metrics_df)
# # Compute Student Metric Averages
# attendance_avg_stats, engagement_avg_stats = data_processor.compute_average_metrics(student_metrics_df)
# # Plot and download student metrics
# student_metrics_fig = visualization.plot_student_metrics(student_metrics_df, attendance_avg_stats, engagement_avg_stats)
# visualization.download_chart(student_metrics_fig, "student_metrics_chart.png")
# # Evaluate each student and build decision tree diagrams
# student_metrics_df['Evaluation'] = student_metrics_df.apply(
# lambda row: data_processor.evaluate_student(row), axis=1
# )
# st.subheader("Student Evaluations")
# st.write(student_metrics_df[['Student', 'Evaluation']])
# # # Build and display decision tree diagrams for each student
# # for index, row in student_metrics_df.iterrows():
# # tree_diagram = visualization.build_tree_diagram(row)
# # st.graphviz_chart(tree_diagram.source)
# # Build and display decision tree diagrams for each student
# for index, row in student_metrics_df.iterrows():
# tree_diagram = visualization.build_tree_diagram(row)
# # Get the student's name from the DataFrame
# student_name = row['Student']
# # Use st.expander to wrap the graphviz chart with the student's name
# with st.expander(f"{student_name} Decision Tree", expanded=False):
# st.graphviz_chart(tree_diagram.source)
# # Prepare input for the language model
# llm_input = ai_analysis.prepare_llm_input(student_metrics_df)
# # Generate Notes and Recommendations using Hugging Face LLM
# with st.spinner("Generating AI analysis..."):
# recommendations = ai_analysis.prompt_response_from_hf_llm(llm_input)
# st.subheader("AI Analysis")
# st.markdown(recommendations)
# # Download AI output
# ai_analysis.download_llm_output(recommendations, "llm_output.txt")
# except Exception as e:
# st.error(f"Error processing the file: {str(e)}")
# if __name__ == '__main__':
# main()
import streamlit as st
import pandas as pd
from app_config import AppConfig # Import the configurations class
from data_processor import DataProcessor # Import the data analysis class
from visualization import Visualization # Import the data viz class
from ai_analysis import AIAnalysis # Import the ai analysis class
from sidebar import Sidebar # Import the Sidebar class
def main():
# Initialize the app configuration
app_config = AppConfig()
# Initialize the sidebar
sidebar = Sidebar()
sidebar.display()
# Initialize the data processor
data_processor = DataProcessor()
# Initialize the visualization handler
visualization = Visualization()
# Initialize the AI analysis handler
ai_analysis = AIAnalysis(data_processor.client)
st.title("Literacy Implementation Record Data Analysis")
# Add the descriptive text
st.markdown("""
This tool summarizes implementation record data for student attendance, engagement, and intervention dosage to address hypothesis #1: Have Students Received Adequate Instruction?
""")
# Date selection option
date_option = st.radio(
"Select data range:",
("All Data", "Date Range")
)
# Initialize start and end date variables
start_date = None
end_date = None
if date_option == "Date Range":
# Prompt user to enter start and end dates
start_date = st.date_input("Start Date")
end_date = st.date_input("End Date")
# Ensure start date is before end date
if start_date > end_date:
st.error("Start date must be before end date.")
return
# File uploader
uploaded_file = st.file_uploader("Upload your Excel file", type=["xlsx"])
if uploaded_file is not None:
try:
# Read the Excel file into a DataFrame
df = data_processor.read_excel(uploaded_file)
# Format the session data
df = data_processor.format_session_data(df)
# Replace student names with initials
df = data_processor.replace_student_names_with_initials(df)
# Filter data if date range is selected
if date_option == "Date Range":
# Convert start_date and end_date to datetime
start_date = pd.to_datetime(start_date).date()
end_date = pd.to_datetime(end_date).date()
# Identify the date column
date_column = next((col for col in df.columns if col in ["Date of Session", "Date"]), None)
if date_column:
# Filter the DataFrame based on the selected date range
df = df[(df[date_column] >= start_date) & (df[date_column] <= end_date)]
else:
st.error("Date column not found in the data.")
return
st.subheader("Uploaded Data")
st.write(df)
# Ensure the intervention column is determined
intervention_column = data_processor.get_intervention_column(df)
if intervention_column not in df.columns:
st.error(f"Expected column '{intervention_column}' not found.")
return
# Compute Intervention Session Statistics
intervention_stats = data_processor.compute_intervention_statistics(df)
st.subheader("Intervention Dosage")
st.write(intervention_stats)
# Plot and download intervention statistics: Two-column layout for the visualization and intervention frequency
col1, col2 = st.columns([3, 1]) # Set the column width ratio
with col1:
intervention_fig = visualization.plot_intervention_statistics(intervention_stats)
with col2:
intervention_frequency = intervention_stats['Intervention Dosage (%)'].values[0]
# Display the "Intervention Dosage (%)" text
st.markdown("<h3 style='color: #358E66;'>Intervention Dosage</h3>", unsafe_allow_html=True)
# Display the frequency value below it
st.markdown(f"<h1 style='color: #358E66;'>{intervention_frequency}%</h1>", unsafe_allow_html=True)
visualization.download_chart(intervention_fig, "intervention_statistics_chart.png")
# Compute Student Metrics
student_metrics_df = data_processor.compute_student_metrics(df)
st.subheader("Student Attendance and Engagement")
st.write(student_metrics_df)
# Compute Student Metric Averages
attendance_avg_stats, engagement_avg_stats = data_processor.compute_average_metrics(student_metrics_df)
# Plot and download student metrics
student_metrics_fig = visualization.plot_student_metrics(student_metrics_df, attendance_avg_stats, engagement_avg_stats)
visualization.download_chart(student_metrics_fig, "student_metrics_chart.png")
# Evaluate each student and build decision tree diagrams
student_metrics_df['Evaluation'] = student_metrics_df.apply(
lambda row: data_processor.evaluate_student(row), axis=1
)
st.subheader("Student Evaluations")
st.write(student_metrics_df[['Student', 'Evaluation']])
# Build and display decision tree diagrams for each student
for index, row in student_metrics_df.iterrows():
tree_diagram = visualization.build_tree_diagram(row)
# Get the student's name from the DataFrame
student_name = row['Student']
# Use st.expander to wrap the graphviz chart with the student's name
with st.expander(f"{student_name} Decision Tree", expanded=False):
st.graphviz_chart(tree_diagram.source)
# Prepare input for the language model
llm_input = ai_analysis.prepare_llm_input(student_metrics_df)
# Generate Notes and Recommendations using Hugging Face LLM
with st.spinner("Generating AI analysis..."):
recommendations = ai_analysis.prompt_response_from_hf_llm(llm_input)
st.subheader("AI Analysis")
st.markdown(recommendations)
# Download AI output
ai_analysis.download_llm_output(recommendations, "llm_output.txt")
except Exception as e:
st.error(f"Error processing the file: {str(e)}")
if __name__ == '__main__':
main() |