import gradio as gr from huggingface_hub import hf_hub_download import pickle from gradio import Progress import numpy as np import subprocess import shutil import matplotlib.pyplot as plt from sklearn.metrics import roc_curve, auc import pandas as pd # Define the function to process the input file and model selection def process_file(model_name,inc_slider,progress=Progress(track_tqdm=True)): # progress = gr.Progress(track_tqdm=True) progress(0, desc="Starting the processing") # with open(file.name, 'r') as f: # content = f.read() # saved_test_dataset = "train.txt" # saved_test_label = "train_label.txt" # saved_train_info="train_info.txt" # Save the uploaded file content to a specified location # shutil.copyfile(file.name, saved_test_dataset) # shutil.copyfile(label.name, saved_test_label) # shutil.copyfile(info.name, saved_train_info) parent_location="ratio_proportion_change3_2223/sch_largest_100-coded/finetuning/" test_info_location=parent_location+"fullTest/test_info.txt" test_location=parent_location+"fullTest/test.txt" if(model_name=="ASTRA-FT-HGR"): finetune_task="highGRschool10" # test_info_location=parent_location+"fullTest/test_info.txt" # test_location=parent_location+"fullTest/test.txt" elif(model_name== "ASTRA-FT-LGR" ): finetune_task="lowGRschoolAll" # test_info_location=parent_location+"lowGRschoolAll/test_info.txt" # test_location=parent_location+"lowGRschoolAll/test.txt" elif(model_name=="ASTRA-FT-FULL"): # test_info_location=parent_location+"fullTest/test_info.txt" # test_location=parent_location+"fullTest/test.txt" finetune_task="highGRschool10" else: finetune_task=None # Load the test_info file and the graduation rate file test_info = pd.read_csv(test_info_location, sep=',', header=None, engine='python') grad_rate_data = pd.DataFrame(pd.read_pickle('school_grduation_rate.pkl'),columns=['school_number','grad_rate']) # Load the grad_rate data # Step 1: Extract unique school numbers from test_info unique_schools = test_info[0].unique() # Step 2: Filter the grad_rate_data using the unique school numbers schools = grad_rate_data[grad_rate_data['school_number'].isin(unique_schools)] # Define a threshold for high and low graduation rates (adjust as needed) grad_rate_threshold = 0.9 # Step 4: Divide schools into high and low graduation rate groups high_grad_schools = schools[schools['grad_rate'] >= grad_rate_threshold]['school_number'].unique() low_grad_schools = schools[schools['grad_rate'] < grad_rate_threshold]['school_number'].unique() # Step 5: Sample percentage of schools from each group high_sample = pd.Series(high_grad_schools).sample(frac=inc_slider/100, random_state=1).tolist() low_sample = pd.Series(low_grad_schools).sample(frac=inc_slider/100, random_state=1).tolist() # Step 6: Combine the sampled schools random_schools = high_sample + low_sample # Step 7: Get indices for the sampled schools indices = test_info[test_info[0].isin(random_schools)].index.tolist() # Load the test file and select rows based on indices test = pd.read_csv(test_location, sep=',', header=None, engine='python') selected_rows_df2 = test.loc[indices] # Save the selected rows to a file selected_rows_df2.to_csv('selected_rows.txt', sep='\t', index=False, header=False, quoting=3, escapechar=' ') # For demonstration purposes, we'll just return the content with the selected model name # print(checkpoint) progress(0.1, desc="Files created and saved") # if (inc_val<5): # model_name="highGRschool10" # elif(inc_val>=5 & inc_val<10): # model_name="highGRschool10" # else: # model_name="highGRschool10" # Function to analyze each row def analyze_row(row): # Split the row into fields fields = row.split("\t") # Define tasks for OptionalTask_1, OptionalTask_2, and FinalAnswer optional_task_1_subtasks = ["DenominatorFactor", "NumeratorFactor", "EquationAnswer"] optional_task_2_subtasks = [ "FirstRow2:1", "FirstRow2:2", "FirstRow1:1", "FirstRow1:2", "SecondRow", "ThirdRow" ] final_answer_tasks = ["FinalAnswer"] # Helper function to evaluate task attempts def evaluate_tasks(fields, tasks): task_status = {} for task in tasks: relevant_attempts = [f for f in fields if task in f] # print(relevant_attempts) if any("OK" in attempt for attempt in relevant_attempts): task_status[task] = "Attempted (Successful)" elif any("ERROR" in attempt for attempt in relevant_attempts): task_status[task] = "Attempted (Error)" elif any("JIT" in attempt for attempt in relevant_attempts): task_status[task] = "Attempted (JIT)" else: task_status[task] = "Unattempted" return task_status # Evaluate tasks for each category optional_task_1_status = evaluate_tasks(fields, optional_task_1_subtasks) optional_task_2_status = evaluate_tasks(fields, optional_task_2_subtasks) final_answer_status = evaluate_tasks(fields, final_answer_tasks) # Combine results result = { "OptionalTask_1": optional_task_1_status, "OptionalTask_2": optional_task_2_status, "FinalAnswer": final_answer_status, } return result # Read data from test_info.txt with open(test_info_location, "r") as file: data = file.readlines() results = [analyze_row(row.strip()) for row in data if row.strip()] status_counts = {} for result in results: for task_category, task_statuses in result.items(): for task, status in task_statuses.items(): if task not in status_counts: status_counts[task] = {"Attempted (Successful)": 0, "Attempted (Error)": 0, "Attempted (JIT)": 0, "Unattempted": 0} status_counts[task][status] += 1 # Create a string output for results output_summary = "Task Analysis Summary:\n" output_summary += "-----------------------\n" for task, statuses in status_counts.items(): output_summary += f"Task: {task}\n" for status, count in statuses.items(): output_summary += f" {status}: {count}\n" progress(0.2, desc="analysis done!! Executing models") subprocess.run([ "python", "new_test_saved_finetuned_model.py", "-workspace_name", "ratio_proportion_change3_2223/sch_largest_100-coded", "-finetune_task", finetune_task, "-test_dataset_path","../../../../selected_rows.txt", # "-test_label_path","../../../../train_label.txt", "-finetuned_bert_classifier_checkpoint", "ratio_proportion_change3_2223/sch_largest_100-coded/output/highGRschool10/bert_fine_tuned.model.ep42", "-e",str(1), "-b",str(1000) ]) progress(0.6,desc="Model execution completed") result = {} with open("result.txt", 'r') as file: for line in file: key, value = line.strip().split(': ', 1) # print(type(key)) if key=='epoch': result[key]=value else: result[key]=float(value) # Create a plot with open("roc_data.pkl", "rb") as f: fpr, tpr, _ = pickle.load(f) roc_auc = auc(fpr, tpr) fig, ax = plt.subplots() ax.plot(fpr, tpr, color='blue', lw=2, label=f'ROC curve (area = {roc_auc:.2f})') ax.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--') ax.set(xlabel='False Positive Rate', ylabel='True Positive Rate', title=f'(Receiver Operating Curve) ROC') ax.legend(loc="lower right") ax.grid() # Save plot to a file plot_path = "plot.png" fig.savefig(plot_path) plt.close(fig) progress(1.0) # Prepare text output text_output = f"Model: {model_name}\nResult:\n{result}" # Prepare text output with HTML formatting text_output = f""" Model: {model_name}\n -----------------\n Time Taken: {result['time_taken_from_start']:.2f} seconds\n Total Schools in test: {len(unique_schools):.4f}\n Total number of instances having Schools with HGR : {len(high_sample):.4f}\n Total number of instances having Schools with LGR: {len(low_sample):.4f}\n -----------------\n """ return text_output,plot_path # List of models for the dropdown menu models = ["ASTRA-FT-HGR", "ASTRA-FT-LGR", "ASTRA-FT-FULL"] content = """
Welcome to a demo of ASTRA. ASTRA is a collaborative research project between researchers at the University of Memphis and Carnegie Learning to utilize AI to improve our understanding of math learning strategies.
This demo has been developed with a pre-trained model (based on an architecture similar to BERT) that learns math strategies using data collected from hundreds of schools in the U.S. who have used Carnegie Learning's MATHia (formerly known as Cognitive Tutor), the flagship Intelligent Tutor that is part of a core, blended math curriculum.
For this demo, we have used data from a specific domain (teaching ratio and proportions) within 7th grade math. The fine-tuning based on the pre-trained models learns to predict which strategies lead to correct vs. incorrect solutions.
To use the demo, please follow these steps:
Dashboard
") with gr.Row(): output_text = gr.Textbox(label="") output_image = gr.Image(label="ROC") # output_summary = gr.Textbox(label="Summary") btn = gr.Button("Submit") btn.click(fn=process_file, inputs=[model_dropdown,increment_slider], outputs=[output_text,output_image]) # Launch the app demo.launch()