diff --git a/data_modeling/evaluation/amazon-employee-access-challenge_eval.py b/data_modeling/evaluation/amazon-employee-access-challenge_eval.py deleted file mode 100644 index d8cb361c2b0634ab2c676353ce0f500f777990fb..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/amazon-employee-access-challenge_eval.py +++ /dev/null @@ -1,37 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="ACTION") - -args = parser.parse_args() - - - -# 定义 RMSLE 计算函数 -def rmsle(y_true, y_pred): - return np.sqrt(np.mean((np.log1p(y_pred) - np.log1p(y_true)) ** 2)) - - -actual = pd.read_csv(args.answer_file) -submission = pd.read_csv(args.predict_file) - -performance = roc_auc_score(actual[args.value], submission[args.value]) - - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/bike-sharing-demand_eval.py b/data_modeling/evaluation/bike-sharing-demand_eval.py deleted file mode 100644 index e087dad3f51fdb2e85b69118ff34dbac95da0fd6..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/bike-sharing-demand_eval.py +++ /dev/null @@ -1,29 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse - -# 计算RMSLE -def rmsle(predicted, actual): - sum_log_diff = np.sum((np.log(predicted + 1) - np.log(actual + 1)) ** 2) - mean_log_diff = sum_log_diff / len(predicted) - return np.sqrt(mean_log_diff) - -parser = argparse.ArgumentParser() -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="count") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = rmsle(predictions[args.value], answers[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/cat-in-the-dat-ii_eval.py b/data_modeling/evaluation/cat-in-the-dat-ii_eval.py deleted file mode 100644 index b7360946845a9261498fbec367a68ac4c064a9b2..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/cat-in-the-dat-ii_eval.py +++ /dev/null @@ -1,25 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import roc_auc_score - - -parser = argparse.ArgumentParser() -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = roc_auc_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/cat-in-the-dat_eval.py b/data_modeling/evaluation/cat-in-the-dat_eval.py deleted file mode 100644 index 05b4fff1521494604785e72212bab38877ca6f35..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/cat-in-the-dat_eval.py +++ /dev/null @@ -1,25 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import roc_auc_score - - -parser = argparse.ArgumentParser() -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv( args.predict_file) - -performance = roc_auc_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/commonlitreadabilityprize_eval.py b/data_modeling/evaluation/commonlitreadabilityprize_eval.py deleted file mode 100644 index f2c682d4676d585d225e1f98c88b195d153f0c50..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/commonlitreadabilityprize_eval.py +++ /dev/null @@ -1,28 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - -def rmse(targets, predictions): - return np.sqrt(((predictions - targets) ** 2).mean()) - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv( args.predict_file) - -performance = rmse(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/conways-reverse-game-of-life-2020_eval.py b/data_modeling/evaluation/conways-reverse-game-of-life-2020_eval.py deleted file mode 100644 index 92a09aafa5f42435daab219bd64f1bb12357fd86..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/conways-reverse-game-of-life-2020_eval.py +++ /dev/null @@ -1,34 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="generated") - -args = parser.parse_args() - -actual = pd.read_csv( args.answer_file) -submission = pd.read_csv(args.predict_file) - -# 移除id列,剩下的是矩阵的值 -submission_values = submission.drop(columns=['id']).values -actual_values = actual.drop(columns=['id']).values - -# 计算平均绝对误差 -performance = np.mean(np.abs(submission_values - actual_values)) - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/covid19-global-forecasting-week-1_eval.py b/data_modeling/evaluation/covid19-global-forecasting-week-1_eval.py deleted file mode 100644 index a517e5efca6b2a55a0a5b66b4e030116e1e90da6..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/covid19-global-forecasting-week-1_eval.py +++ /dev/null @@ -1,28 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse - -def rmsle(predictions, actuals): - rmsle_confirmed = np.sqrt(np.mean((np.log1p(predictions['ConfirmedCases']) - np.log1p(actuals['ConfirmedCases'])) ** 2)) - rmsle_fatalities = np.sqrt(np.mean((np.log1p(predictions['Fatalities']) - np.log1p(actuals['Fatalities'])) ** 2)) - return (rmsle_confirmed + rmsle_fatalities) / 2 - -parser = argparse.ArgumentParser() -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="count") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv( args.predict_file) - -performance = rmsle(predictions, answers) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/covid19-global-forecasting-week-2_eval.py b/data_modeling/evaluation/covid19-global-forecasting-week-2_eval.py deleted file mode 100644 index 219b4e470b13e728be116246a6e64951eccd1e41..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/covid19-global-forecasting-week-2_eval.py +++ /dev/null @@ -1,28 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse - -def rmsle(predictions, actuals): - rmsle_confirmed = np.sqrt(np.mean((np.log1p(predictions['ConfirmedCases']) - np.log1p(actuals['ConfirmedCases'])) ** 2)) - rmsle_fatalities = np.sqrt(np.mean((np.log1p(predictions['Fatalities']) - np.log1p(actuals['Fatalities'])) ** 2)) - return (rmsle_confirmed + rmsle_fatalities) / 2 - -parser = argparse.ArgumentParser() -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="count") - -args = parser.parse_args() - -answers = pd.read_csv(os.path.join(args.path, args.name, args.answer_file)) -predictions = pd.read_csv(os.path.join(args.path, args.name, args.predict_file)) - -performance = rmsle(predictions, answers) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/covid19-global-forecasting-week-3_eval.py b/data_modeling/evaluation/covid19-global-forecasting-week-3_eval.py deleted file mode 100644 index 219b4e470b13e728be116246a6e64951eccd1e41..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/covid19-global-forecasting-week-3_eval.py +++ /dev/null @@ -1,28 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse - -def rmsle(predictions, actuals): - rmsle_confirmed = np.sqrt(np.mean((np.log1p(predictions['ConfirmedCases']) - np.log1p(actuals['ConfirmedCases'])) ** 2)) - rmsle_fatalities = np.sqrt(np.mean((np.log1p(predictions['Fatalities']) - np.log1p(actuals['Fatalities'])) ** 2)) - return (rmsle_confirmed + rmsle_fatalities) / 2 - -parser = argparse.ArgumentParser() -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="count") - -args = parser.parse_args() - -answers = pd.read_csv(os.path.join(args.path, args.name, args.answer_file)) -predictions = pd.read_csv(os.path.join(args.path, args.name, args.predict_file)) - -performance = rmsle(predictions, answers) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/covid19-global-forecasting-week-4_eval.py b/data_modeling/evaluation/covid19-global-forecasting-week-4_eval.py deleted file mode 100644 index 219b4e470b13e728be116246a6e64951eccd1e41..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/covid19-global-forecasting-week-4_eval.py +++ /dev/null @@ -1,28 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse - -def rmsle(predictions, actuals): - rmsle_confirmed = np.sqrt(np.mean((np.log1p(predictions['ConfirmedCases']) - np.log1p(actuals['ConfirmedCases'])) ** 2)) - rmsle_fatalities = np.sqrt(np.mean((np.log1p(predictions['Fatalities']) - np.log1p(actuals['Fatalities'])) ** 2)) - return (rmsle_confirmed + rmsle_fatalities) / 2 - -parser = argparse.ArgumentParser() -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="count") - -args = parser.parse_args() - -answers = pd.read_csv(os.path.join(args.path, args.name, args.answer_file)) -predictions = pd.read_csv(os.path.join(args.path, args.name, args.predict_file)) - -performance = rmsle(predictions, answers) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/covid19-global-forecasting-week-5_eval.py b/data_modeling/evaluation/covid19-global-forecasting-week-5_eval.py deleted file mode 100644 index 219b4e470b13e728be116246a6e64951eccd1e41..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/covid19-global-forecasting-week-5_eval.py +++ /dev/null @@ -1,28 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse - -def rmsle(predictions, actuals): - rmsle_confirmed = np.sqrt(np.mean((np.log1p(predictions['ConfirmedCases']) - np.log1p(actuals['ConfirmedCases'])) ** 2)) - rmsle_fatalities = np.sqrt(np.mean((np.log1p(predictions['Fatalities']) - np.log1p(actuals['Fatalities'])) ** 2)) - return (rmsle_confirmed + rmsle_fatalities) / 2 - -parser = argparse.ArgumentParser() -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="count") - -args = parser.parse_args() - -answers = pd.read_csv(os.path.join(args.path, args.name, args.answer_file)) -predictions = pd.read_csv(os.path.join(args.path, args.name, args.predict_file)) - -performance = rmsle(predictions, answers) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/demand-forecasting-kernels-only_eval.py b/data_modeling/evaluation/demand-forecasting-kernels-only_eval.py deleted file mode 100644 index b4bdc8df3d139f504e96b9fde4d629664d89682f..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/demand-forecasting-kernels-only_eval.py +++ /dev/null @@ -1,30 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -# 定义 SMAPE 计算函数 -def smape(y_true, y_pred): - return 100/len(y_true) * np.sum(2 * np.abs(y_pred - y_true) / (np.abs(y_true) + np.abs(y_pred))) - - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="sales") - -args = parser.parse_args() - - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = smape(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/dont-overfit-ii_eval.py b/data_modeling/evaluation/dont-overfit-ii_eval.py deleted file mode 100644 index a3befa852dcbd04e92b57b09da8563badcd15ea3..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/dont-overfit-ii_eval.py +++ /dev/null @@ -1,31 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - -actual = pd.read_csv(os.path.join(args.path, args.name, args.answer_file)) -submission = pd.read_csv(os.path.join(args.path, args.name, args.predict_file)) - - -# 计算平均绝对误差 -performance = roc_auc_score(actual[args.value], submission[args.value]) - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/feedback-prize-english-language-learning_eval.py b/data_modeling/evaluation/feedback-prize-english-language-learning_eval.py deleted file mode 100644 index 02b5005720b5894ed1e6569ee03e1ab9baccccac..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/feedback-prize-english-language-learning_eval.py +++ /dev/null @@ -1,37 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) -parser.add_argument('--value', type=str, default="place_id") - -args = parser.parse_args() - -actual = pd.read_csv(os.path.join(args.path, args.name, args.answer_file)) -submission = pd.read_csv(os.path.join(args.path, args.name, args.predict_file)) - -def mcrmse(y_true, y_pred): - """ - 计算Mean Columnwise Root Mean Squared Error (MCRMSE) - """ - assert y_true.shape == y_pred.shape, "The shapes of true and predicted values do not match" - columnwise_rmse = np.sqrt(((y_true - y_pred) ** 2).mean(axis=0)) - return columnwise_rmse.mean() - -# 提取实际标签和预测结果 -actual_values = actual.iloc[:, 1:].values # 假设实际标签文件中第一列是text_id,后面是实际标签值 -predicted_values = submission.iloc[:, 1:].values # 假设提交文件中第一列是text_id,后面是预测标签值 - -# 计算MAP@3 -performance = mcrmse(actual_values, predicted_values) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) \ No newline at end of file diff --git a/data_modeling/evaluation/google-quest-challenge_eval.py b/data_modeling/evaluation/google-quest-challenge_eval.py deleted file mode 100644 index fb14f3505b9df01eaab2aa0cd906feb9a5455d47..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/google-quest-challenge_eval.py +++ /dev/null @@ -1,45 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse -from scipy.stats import spearmanr - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="place_id") - -args = parser.parse_args() - -actual = pd.read_csv(os.path.join(args.path, args.name, args.answer_file)) -submission = pd.read_csv(os.path.join(args.path, args.name, args.predict_file)) -def mean_spearmanr(y_true, y_pred): - """ - 计算每列的Spearman's rank correlation coefficient,并取平均值 - """ - assert y_true.shape == y_pred.shape, "The shapes of true and predicted values do not match" - correlations = [] - for col in range(y_true.shape[1]): - corr, _ = spearmanr(y_true[:, col], y_pred[:, col]) - correlations.append(corr) - return sum(correlations) / len(correlations) - - -# 提取实际标签和预测结果 -actual_values = actual.iloc[:, 1:].values # 假设实际标签文件中第一列是qa_id,后面是实际标签值 -predicted_values = submission.iloc[:, 1:].values # 假设提交文件中第一列是qa_id,后面是预测标签值 -# 计算MAP@3 -performance = mean_spearmanr(actual_values, predicted_values) - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/instant-gratification_eval.py b/data_modeling/evaluation/instant-gratification_eval.py deleted file mode 100644 index b02eebf06548925af2ba6f93303aec5338d56363..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/instant-gratification_eval.py +++ /dev/null @@ -1,43 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - -# Compute MAE -def mean_absolute_error(y_true, y_pred): - return np.mean(np.abs(y_pred - y_true)) - - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -y_true = answers[args.value].values -y_pred = predictions[args.value].values - - - - -performance = roc_auc_score(y_true, y_pred) - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) - - - diff --git a/data_modeling/evaluation/learning-agency-lab-automated-essay-scoring-2_eval.py b/data_modeling/evaluation/learning-agency-lab-automated-essay-scoring-2_eval.py deleted file mode 100644 index 542cde41282e7668b3258351fd31c5d0f69f5135..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/learning-agency-lab-automated-essay-scoring-2_eval.py +++ /dev/null @@ -1,45 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score -from sklearn.metrics import cohen_kappa_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="score") - -args = parser.parse_args() - -# Compute MAE -def mean_absolute_error(y_true, y_pred): - return np.mean(np.abs(y_pred - y_true)) - - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv( args.predict_file) - -answers = answers.sort_values('essay_id') -predictions = predictions.sort_values('essay_id') - - -y_true = answers[args.value].values -y_pred = predictions[args.value].values - - - - -performance = cohen_kappa_score(y_true, y_pred, weights='quadratic') - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/liverpool-ion-switching_eval.py b/data_modeling/evaluation/liverpool-ion-switching_eval.py deleted file mode 100644 index 8211211622f9960a091cb730ea1b399eeb939e02..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/liverpool-ion-switching_eval.py +++ /dev/null @@ -1,46 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score -from sklearn.metrics import cohen_kappa_score -from sklearn.metrics import f1_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="open_channels") - -args = parser.parse_args() - -# Compute MAE -def mean_absolute_error(y_true, y_pred): - return np.mean(np.abs(y_pred - y_true)) - - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv(args.predict_file) - -answers = answers.sort_values('time') -predictions = predictions.sort_values('time') - - -y_true = answers[args.value].values -y_pred = predictions[args.value].values - - - - -performance = f1_score(y_true, y_pred, average='macro') - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/lmsys-chatbot-arena_eval.py b/data_modeling/evaluation/lmsys-chatbot-arena_eval.py deleted file mode 100644 index 8df82fac4addd263725c998b13828910e555ce36..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/lmsys-chatbot-arena_eval.py +++ /dev/null @@ -1,42 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import log_loss - -from sklearn.metrics import roc_auc_score -from sklearn.metrics import cohen_kappa_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="score") - -args = parser.parse_args() - -# Compute MAE -def mean_absolute_error(y_true, y_pred): - return np.mean(np.abs(y_pred - y_true)) - - -actual = pd.read_csv( args.answer_file) -submission = pd.read_csv(args.predict_file) - -# 提取实际值和预测值 -actual_values = actual[['winner_model_a', 'winner_model_b', 'winner_tie']].values -predicted_values = submission[['winner_model_a', 'winner_model_b', 'winner_tie']].values - - - -performance = log_loss(actual_values, predicted_values) - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/microsoft-malware-prediction_eval.py b/data_modeling/evaluation/microsoft-malware-prediction_eval.py deleted file mode 100644 index 6216fceeede3e753491d46f461b52cc29751bd32..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/microsoft-malware-prediction_eval.py +++ /dev/null @@ -1,44 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="HasDetections") - -args = parser.parse_args() - -# Compute MAE -def mean_absolute_error(y_true, y_pred): - return np.mean(np.abs(y_pred - y_true)) - - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) -answers.sort_values(by=["MachineIdentifier"]) -predictions.sort_values(by=['MachineIdentifier']) -y_true = answers[args.value].values -y_pred = predictions[args.value].values - - - - -performance = roc_auc_score(y_true, y_pred) - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) - - - diff --git a/data_modeling/evaluation/nlp-getting-started_eval.py b/data_modeling/evaluation/nlp-getting-started_eval.py deleted file mode 100644 index 297c10718f42c2c3d6c390367850976362d11a25..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/nlp-getting-started_eval.py +++ /dev/null @@ -1,37 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import f1_score - - -# 计算多类对数损失 -def multiclass_logloss(actuals, predictions): - epsilon = 1e-15 # 避免对数运算中的数值问题 - predictions = np.clip(predictions, epsilon, 1 - epsilon) # 限制预测概率的范围,防止对数为无穷 - predictions /= predictions.sum(axis=1)[:, np.newaxis] # 归一化确保总和为1 - log_pred = np.log(predictions) - loss = -np.sum(actuals * log_pred) / len(actuals) - return loss - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv( args.predict_file) - -performance = f1_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e10_eval.py b/data_modeling/evaluation/playground-series-s3e10_eval.py deleted file mode 100644 index b112d9efce7006392e2636f9dd64b744d9c6f8f2..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e10_eval.py +++ /dev/null @@ -1,32 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import log_loss - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Class") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv( args.predict_file) -answers.sort_values(by=['id']) -predictions.sort_values(by=['id']) -if "Strength" in predictions: - performance = log_loss(answers[args.value], predictions["Strength"]) -else: - performance = log_loss(answers[args.value], predictions[args.value]) - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e11_eval.py b/data_modeling/evaluation/playground-series-s3e11_eval.py deleted file mode 100644 index 7ccd48eee96e440996b0a6aa346c05d0e4610c1d..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e11_eval.py +++ /dev/null @@ -1,27 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="cost") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv( args.predict_file) - -performance = np.sqrt(mean_squared_log_error(answers[args.value], predictions[args.value])) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e12_eval.py b/data_modeling/evaluation/playground-series-s3e12_eval.py deleted file mode 100644 index 12b1ff2360a3d59fb6b6fa6aecd4e539c544d84d..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e12_eval.py +++ /dev/null @@ -1,35 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - - -actual = pd.read_csv(args.answer_file) -submission = pd.read_csv(args.predict_file) - -actual.sort_values(by=['id']) -submission.sort_values(by=['id']) - -# 计算平均错误率 -performance = roc_auc_score(actual[args.value], submission[args.value]) - - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e13_eval.py b/data_modeling/evaluation/playground-series-s3e13_eval.py deleted file mode 100644 index 1f4f458058934b2560b5d8a2bb81b1fde0d40b25..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e13_eval.py +++ /dev/null @@ -1,57 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', default='', type=str, required=False) -parser.add_argument('--name', default='', type=str, required=False) -parser.add_argument('--answer_file', default='/Users/tencentintern/PycharmProjects/DSBench/kaggle_data/data_filted_csv/answers/playground-series-s3e13/test_answer.csv', type=str, required=False) -parser.add_argument('--predict_file', default='/Users/tencentintern/PycharmProjects/DSBench/kaggle_data/data_filted_csv/answers/playground-series-s3e13/test_answer.csv', type=str, required=False) - -parser.add_argument('--value', type=str, default="prognosis") - -args = parser.parse_args() - - -actual = pd.read_csv(args.answer_file) -submission = pd.read_csv(args.predict_file) - -actual.sort_values(by=['id']) -submission.sort_values(by=['id']) - - -def mpa_at_3(actual, predictions): - """ - Calculate Mean Percentage Agreement at 3 (MPA@3). - - Parameters: - actual (list): List of actual prognosis values. - predictions (list of lists): List of lists containing up to 3 predicted prognosis values. - - Returns: - float: The MPA@3 score. - """ - total = len(actual) - score = 0.0 - - for act, preds in zip(actual, predictions): - preds = preds.split() - if act in preds[:3]: - score += 1 - - return score / total - -# 计算平均错误率 -performance = mpa_at_3(actual[args.value], submission[args.value]) -print(performance) - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e14_eval.py b/data_modeling/evaluation/playground-series-s3e14_eval.py deleted file mode 100644 index 0eb7334a04c421ad1861d4d7b98dc052f3f1bdbe..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e14_eval.py +++ /dev/null @@ -1,30 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="yield") - -args = parser.parse_args() - -# Compute MAE -def mean_absolute_error(y_true, y_pred): - return np.mean(np.abs(y_pred - y_true)) - - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv( args.predict_file) - -performance = mean_absolute_error(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e16_eval.py b/data_modeling/evaluation/playground-series-s3e16_eval.py deleted file mode 100644 index a4d72b0d21e2e94ea615b412d8453039d64dfc27..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e16_eval.py +++ /dev/null @@ -1,33 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="yield") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv( args.predict_file) - -answers.sort_values(by=['id']) -predictions.sort_values(by=['id']) -if 'Age' in predictions: - performance = mean_absolute_error(answers['Age'], predictions['Age']) -else: - performance = mean_absolute_error(answers['Age'], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e17_eval.py b/data_modeling/evaluation/playground-series-s3e17_eval.py deleted file mode 100644 index d44abb21fd1fbc88ae7a7d5efaf5bee27f069099..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e17_eval.py +++ /dev/null @@ -1,29 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Machine failure") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv( args.predict_file) - -performance = roc_auc_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e18_eval.py b/data_modeling/evaluation/playground-series-s3e18_eval.py deleted file mode 100644 index 3f739c6d555d08a6f5b67db850ecc9bfc2ea24a3..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e18_eval.py +++ /dev/null @@ -1,29 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Machine failure") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = (roc_auc_score(answers['EC1'], predictions['EC1']) + roc_auc_score(answers['EC2'], predictions['EC2'])) / 2 - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e19_eval.py b/data_modeling/evaluation/playground-series-s3e19_eval.py deleted file mode 100644 index 87ef7c057ce025164962ef476857684a0a852dca..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e19_eval.py +++ /dev/null @@ -1,34 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Machine failure") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv( args.predict_file) - -# 提取预测值和实际标签 -predicted_values = predictions['num_sold'].values -actual_values = answers['num_sold'].values # 修改列名为answers - - -smape = np.mean(2 * np.abs(predicted_values - actual_values) / (np.abs(actual_values) + np.abs(predicted_values))) -performance = smape -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e1_eval.py b/data_modeling/evaluation/playground-series-s3e1_eval.py deleted file mode 100644 index 4b21da8a1344ba1d5dc6bd0e0e7f68b00b1fd33b..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e1_eval.py +++ /dev/null @@ -1,38 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score -from sklearn.metrics import mean_squared_error - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="MedHouseVal") - -args = parser.parse_args() - - - -# 定义 RMSLE 计算函数 -def rmsle(y_true, y_pred): - return np.sqrt(np.mean((np.log1p(y_pred) - np.log1p(y_true)) ** 2)) - - -actual = pd.read_csv( args.answer_file) -submission = pd.read_csv( args.predict_file) - -performance = np.sqrt(mean_squared_error(actual[args.value], submission[args.value])) - - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e20_eval.py b/data_modeling/evaluation/playground-series-s3e20_eval.py deleted file mode 100644 index 11ca4e926c639c43a8b76a64c16e3ceb9551aaed..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e20_eval.py +++ /dev/null @@ -1,36 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error -from math import sqrt -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Machine failure") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv(args.predict_file) - -answers.sort_values(by=['ID_LAT_LON_YEAR_WEEK']) -predictions.sort_values(by=['ID_LAT_LON_YEAR_WEEK']) -# 提取预测值和实际标签 -predicted_values = predictions['emission'].values -actual_values = answers['emission'].values # 修改列名为answers - - -smape = sqrt(mean_squared_error(actual_values, predicted_values)) -performance = smape -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e22_eval.py b/data_modeling/evaluation/playground-series-s3e22_eval.py deleted file mode 100644 index d1c3db190553f53b4331a73136a6ffe4126ad3ca..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e22_eval.py +++ /dev/null @@ -1,45 +0,0 @@ -from sklearn.metrics import f1_score - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="outcome") - -args = parser.parse_args() - -# Compute MAE -def mean_absolute_error(y_true, y_pred): - return np.mean(np.abs(y_pred - y_true)) - - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) -answers.sort_values(by=["id"]) -predictions.sort_values(by=['id']) -y_true = answers[args.value].values -y_pred = predictions[args.value].values - - - - -performance = f1_score(y_true, y_pred, average='micro') - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) - - - diff --git a/data_modeling/evaluation/playground-series-s3e23_eval.py b/data_modeling/evaluation/playground-series-s3e23_eval.py deleted file mode 100644 index 15fc332cc6f00c65a46bd5dc99ad109d7785898f..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e23_eval.py +++ /dev/null @@ -1,37 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="defects") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv( args.predict_file) - -answers.sort_values(by=['id']) -predictions.sort_values(by=['id']) -# 提取预测值和实际标签 -predicted_values = predictions['defects'].values -actual_values = answers['defects'].values # 修改列名为answers - -# 计算RMSE -rmse = roc_auc_score(actual_values, predicted_values) - -performance = rmse -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e24_eval.py b/data_modeling/evaluation/playground-series-s3e24_eval.py deleted file mode 100644 index e11795e049c2514c8a7b8d44127c6b1757212253..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e24_eval.py +++ /dev/null @@ -1,32 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="smoking") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv( args.predict_file) - -# 提取预测概率和实际标签 -predicted_probabilities = predictions['smoking'].values -actual_labels = answers['smoking'].values # - -performance = roc_auc_score(actual_labels, predicted_probabilities) -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e25_eval.py b/data_modeling/evaluation/playground-series-s3e25_eval.py deleted file mode 100644 index 024014cceacf00affaf6f16749b34b9c0bfae175..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e25_eval.py +++ /dev/null @@ -1,29 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import roc_auc_score -from sklearn.metrics import median_absolute_error - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Hardness") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv( args.predict_file) - -performance = median_absolute_error(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) - - diff --git a/data_modeling/evaluation/playground-series-s3e2_eval.py b/data_modeling/evaluation/playground-series-s3e2_eval.py deleted file mode 100644 index e352f349081887d300c776eb2a1bd12234826059..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e2_eval.py +++ /dev/null @@ -1,26 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import roc_auc_score - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="stroke") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv( args.predict_file) - -performance = roc_auc_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e3_eval.py b/data_modeling/evaluation/playground-series-s3e3_eval.py deleted file mode 100644 index 40cd49e2d18d4a671cc397bfe8b6d66ead6189e1..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e3_eval.py +++ /dev/null @@ -1,35 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Attrition") - -args = parser.parse_args() - - -actual = pd.read_csv(args.answer_file) -submission = pd.read_csv(args.predict_file) - -actual.sort_values(by=actual.columns[0]) -submission.sort_values(by=submission.columns[0]) - -# 计算平均错误率 -performance = roc_auc_score(actual[args.value], submission[args.value]) - - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e4_eval.py b/data_modeling/evaluation/playground-series-s3e4_eval.py deleted file mode 100644 index 117b4fffccecd299b86dbff85bef493060e8c23d..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e4_eval.py +++ /dev/null @@ -1,26 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import roc_auc_score - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Class") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = roc_auc_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e5_eval.py b/data_modeling/evaluation/playground-series-s3e5_eval.py deleted file mode 100644 index 2c8e7121350ff5a05e8279603ccbce412d760d95..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e5_eval.py +++ /dev/null @@ -1,61 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="quality") - -args = parser.parse_args() - - -actual = pd.read_csv(args.answer_file) -submission = pd.read_csv(args.predict_file) - -actual.sort_values(by=['Id']) -submission.sort_values(by=['Id']) - -def quadratic_weighted_kappa(actual, predicted, N): - O = np.zeros((N, N), dtype=int) - for a, p in zip(actual, predicted): - O[a][p] += 1 - - w = np.zeros((N, N)) - for i in range(N): - for j in range(N): - w[i][j] = ((i - j) ** 2) / ((N - 1) ** 2) - - actual_hist = np.zeros(N) - for a in actual: - actual_hist[a] += 1 - - pred_hist = np.zeros(N) - for p in predicted: - pred_hist[p] += 1 - - E = np.outer(actual_hist, pred_hist) - E = E / E.sum() * O.sum() - - num = (w * O).sum() - den = (w * E).sum() - - return 1 - num / den - -# 计算平均错误率 -performance = quadratic_weighted_kappa(actual[args.value], submission[args.value], 10) - - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e6_eval.py b/data_modeling/evaluation/playground-series-s3e6_eval.py deleted file mode 100644 index 6b17c12448c7ca0ffbdbc099d673b056d8b13b41..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e6_eval.py +++ /dev/null @@ -1,28 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="price") - -args = parser.parse_args() - -def rmse(targets, predictions): - return np.sqrt(((predictions - targets) ** 2).mean()) - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = rmse(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e7_eval.py b/data_modeling/evaluation/playground-series-s3e7_eval.py deleted file mode 100644 index 72753e14b22f85519523bbfb744435f13843a697..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e7_eval.py +++ /dev/null @@ -1,30 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="booking_status") - -args = parser.parse_args() - - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = roc_auc_score(answers[args.value], predictions[args.value]) - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e8_eval.py b/data_modeling/evaluation/playground-series-s3e8_eval.py deleted file mode 100644 index 658e91461da9083d5fcedcca84a01662120e8a96..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e8_eval.py +++ /dev/null @@ -1,26 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="price") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv( args.predict_file) - -performance = np.sqrt(mean_squared_error(answers[args.value], predictions[args.value])) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s3e9_eval.py b/data_modeling/evaluation/playground-series-s3e9_eval.py deleted file mode 100644 index 0d5037272908b8bbe0eacacc43c2320a073a6a12..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s3e9_eval.py +++ /dev/null @@ -1,42 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Strength") - -args = parser.parse_args() - - -actual = pd.read_csv(args.answer_file) -submission = pd.read_csv(args.predict_file) - -actual.sort_values(by=['id']) -submission.sort_values(by=['id']) - -def calculate_rmse(actual, predicted): - actual = np.array(actual) - predicted = np.array(predicted) - mse = np.mean((actual - predicted) ** 2) - rmse = np.sqrt(mse) - return rmse - -# 计算平均错误率 -performance = calculate_rmse(actual[args.value], submission[args.value]) - - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s4e1_eval.py b/data_modeling/evaluation/playground-series-s4e1_eval.py deleted file mode 100644 index ab03b75e90db4729d3c636b49d71161a3e5ce7e2..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s4e1_eval.py +++ /dev/null @@ -1,29 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Exited") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = roc_auc_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s4e2_eval.py b/data_modeling/evaluation/playground-series-s4e2_eval.py deleted file mode 100644 index 14c6a75a83e6782a43efbaa3b2b3b3166e73eb75..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s4e2_eval.py +++ /dev/null @@ -1,37 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import accuracy_score - - -# 计算多类对数损失 -def multiclass_logloss(actuals, predictions): - epsilon = 1e-15 # 避免对数运算中的数值问题 - predictions = np.clip(predictions, epsilon, 1 - epsilon) # 限制预测概率的范围,防止对数为无穷 - predictions /= predictions.sum(axis=1)[:, np.newaxis] # 归一化确保总和为1 - log_pred = np.log(predictions) - loss = -np.sum(actuals * log_pred) / len(actuals) - return loss - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="NObeyesdad") - -args = parser.parse_args() - - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = accuracy_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s4e3_eval.py b/data_modeling/evaluation/playground-series-s4e3_eval.py deleted file mode 100644 index 3b9afcf0856312cfa953d1419082c3a6741a46d7..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s4e3_eval.py +++ /dev/null @@ -1,50 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import accuracy_score -from sklearn.metrics import roc_auc_score - - -# 计算多类对数损失 -def multiclass_logloss(actuals, predictions): - epsilon = 1e-15 # 避免对数运算中的数值问题 - predictions = np.clip(predictions, epsilon, 1 - epsilon) # 限制预测概率的范围,防止对数为无穷 - predictions /= predictions.sum(axis=1)[:, np.newaxis] # 归一化确保总和为1 - log_pred = np.log(predictions) - loss = -np.sum(actuals * log_pred) / len(actuals) - return loss - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="NObeyesdad") - -args = parser.parse_args() - -actual = pd.read_csv(args.answer_file) -submission = pd.read_csv( args.predict_file) - -# 定义要计算的类别 -categories = ['Pastry', 'Z_Scratch', 'K_Scatch', 'Stains', 'Dirtiness', 'Bumps', 'Other_Faults'] - -# 提取数据并计算每个类别的 ROC AUC 分数 -auc_scores = {} -for category in categories: - y_true = actual[category].values - y_pred = submission[category].values - auc_scores[category] = roc_auc_score(y_true, y_pred) - -# 计算平均 AUC 分数 -performance = sum(auc_scores.values()) / len(auc_scores) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) - - diff --git a/data_modeling/evaluation/playground-series-s4e4_eval.py b/data_modeling/evaluation/playground-series-s4e4_eval.py deleted file mode 100644 index 4844cee8f5fa4abe7fbd9133ba0d0cde494891db..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s4e4_eval.py +++ /dev/null @@ -1,29 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Rings") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = np.sqrt(mean_squared_log_error(answers[args.value], predictions[args.value])) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s4e5_eval.py b/data_modeling/evaluation/playground-series-s4e5_eval.py deleted file mode 100644 index cfbc8ce27be5e5d11b9303b104dc053be3fa50dd..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s4e5_eval.py +++ /dev/null @@ -1,30 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error -from sklearn.metrics import r2_score - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="FloodProbability") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv( args.predict_file) - -performance = r2_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/playground-series-s4e6_eval.py b/data_modeling/evaluation/playground-series-s4e6_eval.py deleted file mode 100644 index be9853efbf3a1f8c4699c17a25b58801026f0394..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/playground-series-s4e6_eval.py +++ /dev/null @@ -1,31 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error -from sklearn.metrics import r2_score - -from sklearn.metrics import accuracy_score -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Target") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = accuracy_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/porto-seguro-safe-driver-prediction_eval.py b/data_modeling/evaluation/porto-seguro-safe-driver-prediction_eval.py deleted file mode 100644 index b84754fdc4901246925aa0209349252189b8a333..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/porto-seguro-safe-driver-prediction_eval.py +++ /dev/null @@ -1,44 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error -from sklearn.metrics import r2_score - -from sklearn.metrics import accuracy_score -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - -def gini(actual, pred): - assert len(actual) == len(pred) - all_data = np.asarray(np.c_[actual, pred, np.arange(len(actual))]) - all_data = all_data[np.lexsort((all_data[:, 2], -1 * all_data[:, 1]))] - total_losses = all_data[:, 0].sum() - gini_sum = all_data[:, 0].cumsum().sum() / total_losses - - gini_sum -= (len(actual) + 1) / 2. - return gini_sum / len(actual) - -def normalized_gini(actual, pred): - return gini(actual, pred) / gini(actual, actual) - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = normalized_gini(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/santander-customer-satisfaction_eval.py b/data_modeling/evaluation/santander-customer-satisfaction_eval.py deleted file mode 100644 index 4267458e8261f2548dfed49e229e1e81eb52ec6a..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/santander-customer-satisfaction_eval.py +++ /dev/null @@ -1,29 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="TARGET") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = roc_auc_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/santander-customer-transaction-prediction_eval.py b/data_modeling/evaluation/santander-customer-transaction-prediction_eval.py deleted file mode 100644 index cbd2184e2192ef13e44d2836f9fa3d52f12ce099..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/santander-customer-transaction-prediction_eval.py +++ /dev/null @@ -1,29 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = roc_auc_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/santander-value-prediction-challenge_eval.py b/data_modeling/evaluation/santander-value-prediction-challenge_eval.py deleted file mode 100644 index d037c477aec20d73ced2095abfa90db62d4543e2..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/santander-value-prediction-challenge_eval.py +++ /dev/null @@ -1,61 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error -from sklearn.metrics import r2_score -from sklearn.metrics import accuracy_score -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -answers.sort_values(by='ID') -predictions.sort_values(by='ID') - -def rmsle(actual, predicted): - """ - Calculate the Root Mean Squared Logarithmic Error (RMSLE). - - Parameters: - actual (list or np.array): Array of actual target values. - predicted (list or np.array): Array of predicted target values. - - Returns: - float: The RMSLE score. - """ - actual = np.array(actual) - predicted = np.array(predicted) - - # Calculate the log of actual and predicted values - log_actual = np.log(actual + 1) - log_predicted = np.log(predicted + 1) - - # Calculate the squared differences - squared_diff = (log_actual - log_predicted) ** 2 - - # Calculate the mean of the squared differences - mean_squared_diff = np.mean(squared_diff) - - # Return the square root of the mean squared differences - rmsle_value = np.sqrt(mean_squared_diff) - return rmsle_value - -performance = rmsle(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/see-click-predict-fix_eval.py b/data_modeling/evaluation/see-click-predict-fix_eval.py deleted file mode 100644 index 3237614cad053f10d4bbf79f2ea4a6c48d07ca91..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/see-click-predict-fix_eval.py +++ /dev/null @@ -1,50 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error -from sklearn.metrics import r2_score - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="FloodProbability") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv(args.predict_file) - - -def rmsle(y_true, y_pred): - assert len(y_true) == len(y_pred) - return np.sqrt(np.mean((np.log1p(y_pred) - np.log1p(y_true)) ** 2)) -# 提取预测值和实际值 -y_pred_views = predictions['num_views'] -y_true_views = answers['num_views'] -y_pred_votes = predictions['num_votes'] -y_true_votes = answers['num_votes'] -y_pred_comments = predictions['num_comments'] -y_true_comments = answers['num_comments'] - -# 计算RMSLE -rmsle_views = rmsle(y_true_views, y_pred_views) -rmsle_votes = rmsle(y_true_votes, y_pred_votes) -rmsle_comments = rmsle(y_true_comments, y_pred_comments) - - -average_rmsle = (rmsle_views + rmsle_votes + rmsle_comments) / 3 - -performance = average_rmsle - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/spaceship-titanic_eval.py b/data_modeling/evaluation/spaceship-titanic_eval.py deleted file mode 100644 index 50143305bcb478b67888903422df847e1d039262..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/spaceship-titanic_eval.py +++ /dev/null @@ -1,24 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse - - -parser = argparse.ArgumentParser() -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Transported") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv( args.predict_file) - -performance = (predictions[args.value] == answers[args.value]).mean() - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/tabular-playground-series-apr-2021_eval.py b/data_modeling/evaluation/tabular-playground-series-apr-2021_eval.py deleted file mode 100644 index dc79d1b637a292bc85f0fa1aed2c2d821d7057cf..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tabular-playground-series-apr-2021_eval.py +++ /dev/null @@ -1,30 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error -from sklearn.metrics import r2_score -from sklearn.metrics import accuracy_score -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Survived") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = accuracy_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/tabular-playground-series-aug-2021_eval.py b/data_modeling/evaluation/tabular-playground-series-aug-2021_eval.py deleted file mode 100644 index eab5f201657853533de0ec46b7c9c04f9166845f..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tabular-playground-series-aug-2021_eval.py +++ /dev/null @@ -1,30 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error -from sklearn.metrics import r2_score -from sklearn.metrics import accuracy_score -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="loss") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = np.sqrt(mean_squared_error(answers[args.value], predictions[args.value])) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/tabular-playground-series-aug-2022_eval.py b/data_modeling/evaluation/tabular-playground-series-aug-2022_eval.py deleted file mode 100644 index a3256929128cdb30d0aaa89d07e0145ce2aaaed6..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tabular-playground-series-aug-2022_eval.py +++ /dev/null @@ -1,32 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score -from sklearn.metrics import mean_squared_error - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="failure") - -args = parser.parse_args() - - -actual = pd.read_csv(args.answer_file) -submission = pd.read_csv(args.predict_file) - -performance = roc_auc_score(actual[args.value], submission[args.value]) - - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/tabular-playground-series-dec-2021_eval.py b/data_modeling/evaluation/tabular-playground-series-dec-2021_eval.py deleted file mode 100644 index bc55317c1649bb90c7b09f00be91edacfb53031f..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tabular-playground-series-dec-2021_eval.py +++ /dev/null @@ -1,30 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error -from sklearn.metrics import r2_score -from sklearn.metrics import accuracy_score -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Cover_Type") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = accuracy_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/tabular-playground-series-feb-2021_eval.py b/data_modeling/evaluation/tabular-playground-series-feb-2021_eval.py deleted file mode 100644 index a183a11305f83413a0676503922f6a86f50e17f1..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tabular-playground-series-feb-2021_eval.py +++ /dev/null @@ -1,30 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error -from sklearn.metrics import r2_score -from sklearn.metrics import accuracy_score -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = np.sqrt(mean_squared_error(answers[args.value], predictions[args.value])) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/tabular-playground-series-feb-2022_eval.py b/data_modeling/evaluation/tabular-playground-series-feb-2022_eval.py deleted file mode 100644 index 2cee81911c397adfe86acb6de42e6429974eb7ee..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tabular-playground-series-feb-2022_eval.py +++ /dev/null @@ -1,46 +0,0 @@ -import os.path -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import accuracy_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - - -def rmsle(y_true, y_pred): - return np.sqrt(np.mean((np.log1p(y_pred) - np.log1p(y_true)) ** 2)) - - -def mean_column_wise_rmsle(true_df, pred_df): - assert true_df.shape == pred_df.shape - num_columns = true_df.shape[1] - rmsle_values = [] - - for column in true_df.columns: - if column != 'date_time': # Skip the date_time column - rmsle_values.append(rmsle(true_df[column].values, pred_df[column].values)) - - return np.mean(rmsle_values) - - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) -answers.sort_values(by=["row_id"]) -predictions.sort_values(by=['row_id']) - - -performance = accuracy_score(answers[args.value], predictions[args.value]) - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/tabular-playground-series-jan-2021_eval.py b/data_modeling/evaluation/tabular-playground-series-jan-2021_eval.py deleted file mode 100644 index a183a11305f83413a0676503922f6a86f50e17f1..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tabular-playground-series-jan-2021_eval.py +++ /dev/null @@ -1,30 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error -from sklearn.metrics import r2_score -from sklearn.metrics import accuracy_score -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = np.sqrt(mean_squared_error(answers[args.value], predictions[args.value])) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/tabular-playground-series-jan-2022_eval.py b/data_modeling/evaluation/tabular-playground-series-jan-2022_eval.py deleted file mode 100644 index 7b2b46f2815e544b6e3aa91d46320a2353e25bb0..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tabular-playground-series-jan-2022_eval.py +++ /dev/null @@ -1,31 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse - -def smape(actual, predicted): - denominator = (np.abs(actual) + np.abs(predicted)) / 2.0 - diff = np.abs(actual - predicted) / denominator - diff[denominator == 0] = 0.0 # 避免除以零 - return 100 * np.mean(diff) - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="num_sold") - -args = parser.parse_args() - -answers = pd.read_csv( args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = smape(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/tabular-playground-series-jul-2021_eval.py b/data_modeling/evaluation/tabular-playground-series-jul-2021_eval.py deleted file mode 100644 index 748d5573090610afdfa8391f8c88f669adfd07c0..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tabular-playground-series-jul-2021_eval.py +++ /dev/null @@ -1,46 +0,0 @@ -import os.path -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="claim") - -args = parser.parse_args() - - -def rmsle(y_true, y_pred): - return np.sqrt(np.mean((np.log1p(y_pred) - np.log1p(y_true)) ** 2)) - - -def mean_column_wise_rmsle(true_df, pred_df): - assert true_df.shape == pred_df.shape - num_columns = true_df.shape[1] - rmsle_values = [] - - for column in true_df.columns: - if column != 'date_time': # Skip the date_time column - rmsle_values.append(rmsle(true_df[column].values, pred_df[column].values)) - - return np.mean(rmsle_values) - - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) -answers.sort_values(by=["date_time"]) -predictions.sort_values(by=['date_time']) - - -performance = mean_column_wise_rmsle(answers, predictions) - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/tabular-playground-series-mar-2021_eval.py b/data_modeling/evaluation/tabular-playground-series-mar-2021_eval.py deleted file mode 100644 index ba03795fa867e047ebed7eee8ea5b09f23dc8f5b..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tabular-playground-series-mar-2021_eval.py +++ /dev/null @@ -1,30 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error -from sklearn.metrics import r2_score -from sklearn.metrics import accuracy_score -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance =roc_auc_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/tabular-playground-series-mar-2022_eval.py b/data_modeling/evaluation/tabular-playground-series-mar-2022_eval.py deleted file mode 100644 index be6ec2549402b9cd75dc4eac921b93e52abf24c4..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tabular-playground-series-mar-2022_eval.py +++ /dev/null @@ -1,30 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error -from sklearn.metrics import r2_score -from sklearn.metrics import accuracy_score -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="congestion") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance =mean_squared_error(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/tabular-playground-series-may-2022_eval.py b/data_modeling/evaluation/tabular-playground-series-may-2022_eval.py deleted file mode 100644 index ba03795fa867e047ebed7eee8ea5b09f23dc8f5b..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tabular-playground-series-may-2022_eval.py +++ /dev/null @@ -1,30 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_squared_error -from sklearn.metrics import mean_squared_log_error -from sklearn.metrics import mean_absolute_error -from sklearn.metrics import r2_score -from sklearn.metrics import accuracy_score -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance =roc_auc_score(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/tabular-playground-series-nov-2021_eval.py b/data_modeling/evaluation/tabular-playground-series-nov-2021_eval.py deleted file mode 100644 index 7ce4829118e78b5deab081ff9622d349a3761380..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tabular-playground-series-nov-2021_eval.py +++ /dev/null @@ -1,44 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="target") - -args = parser.parse_args() - -# Compute MAE -def mean_absolute_error(y_true, y_pred): - return np.mean(np.abs(y_pred - y_true)) - - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) -answers.sort_values(by=["id"]) -predictions.sort_values(by=['id']) -y_true = answers[args.value].values -y_pred = predictions[args.value].values - - - - -performance = roc_auc_score(y_true, y_pred) - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) - - - diff --git a/data_modeling/evaluation/tabular-playground-series-sep-2021_eval.py b/data_modeling/evaluation/tabular-playground-series-sep-2021_eval.py deleted file mode 100644 index 398da6e7bd1d1bfb204e81b6e81e450ddb712958..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tabular-playground-series-sep-2021_eval.py +++ /dev/null @@ -1,44 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import roc_auc_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="claim") - -args = parser.parse_args() - -# Compute MAE -def mean_absolute_error(y_true, y_pred): - return np.mean(np.abs(y_pred - y_true)) - - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) -answers.sort_values(by=["id"]) -predictions.sort_values(by=['id']) -y_true = answers[args.value].values -y_pred = predictions[args.value].values - - - - -performance = roc_auc_score(y_true, y_pred) - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) - - - diff --git a/data_modeling/evaluation/tabular-playground-series-sep-2022_eval.py b/data_modeling/evaluation/tabular-playground-series-sep-2022_eval.py deleted file mode 100644 index 4ce7274c8fc52e1db9562f944354e036dd9d0702..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tabular-playground-series-sep-2022_eval.py +++ /dev/null @@ -1,30 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -# 定义 SMAPE 计算函数 -def smape(y_true, y_pred): - return 100/len(y_true) * np.sum(2 * np.abs(y_pred - y_true) / (np.abs(y_true) + np.abs(y_pred))) - - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="num_sold") - -args = parser.parse_args() - - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = smape(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/titanic_eval.py b/data_modeling/evaluation/titanic_eval.py deleted file mode 100644 index 9b3edef7aa933f905a5569016501c36a8f80e26f..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/titanic_eval.py +++ /dev/null @@ -1,35 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - -from sklearn.metrics import accuracy_score - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="Survived") - -args = parser.parse_args() - - -actual = pd.read_csv(args.answer_file) -submission = pd.read_csv(args.predict_file) - -actual.sort_values(by=['PassengerId']) -submission.sort_values(by=['PassengerId']) - -# 计算平均错误率 -performance = accuracy_score(actual[args.value], submission[args.value]) - - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/tmdb-box-office-prediction_eval.py b/data_modeling/evaluation/tmdb-box-office-prediction_eval.py deleted file mode 100644 index 2a2d96fd7dff996c2dbf6af0112a2d5332f7db66..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tmdb-box-office-prediction_eval.py +++ /dev/null @@ -1,29 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse - -def rmsle(y_true, y_pred): - return np.sqrt(np.mean((np.log1p(y_pred) - np.log1p(y_true)) ** 2)) - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="revenue") - -args = parser.parse_args() - - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -performance = rmsle(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) diff --git a/data_modeling/evaluation/tweet-sentiment-extraction_eval.py b/data_modeling/evaluation/tweet-sentiment-extraction_eval.py deleted file mode 100644 index 4eb4f00b549ec26d334ec9c60212600fdb8bc254..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/tweet-sentiment-extraction_eval.py +++ /dev/null @@ -1,46 +0,0 @@ - - -import os.path - -import numpy as np -import pandas as pd -import argparse - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="yield") - -args = parser.parse_args() - -# 计算 Jaccard 相似度 -def jaccard(str1, str2): - a = set(str1.lower().split()) - b = set(str2.lower().split()) - c = a.intersection(b) - return float(len(c)) / (len(a) + len(b) - len(c)) - - - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -# 提取数据 -y_true = answers['selected_text'].values -y_pred = predictions['selected_text'].values - -n = len(answers) -performance = sum(jaccard(y_true[i], y_pred[i]) for i in range(n)) / n - - - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) - - - diff --git a/data_modeling/evaluation/us-patent-phrase-to-phrase-matching_eval.py b/data_modeling/evaluation/us-patent-phrase-to-phrase-matching_eval.py deleted file mode 100644 index 5a8978a350fac25e344cc70d2cdcfe294a40818e..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/us-patent-phrase-to-phrase-matching_eval.py +++ /dev/null @@ -1,33 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import roc_auc_score -from sklearn.metrics import median_absolute_error -from scipy.stats import pearsonr - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="score") - -args = parser.parse_args() - -answers = pd.read_csv(args.answer_file) -predictions = pd.read_csv(args.predict_file) - -if predictions[args.value].isnull().any(): - print("Error: There are missing values in the score columns.") - -performance, p_value = pearsonr(predictions[args.value], answers[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance)) - - diff --git a/data_modeling/evaluation/ventilator-pressure-prediction_eval.py b/data_modeling/evaluation/ventilator-pressure-prediction_eval.py deleted file mode 100644 index 1930fbcccd5ac1cf0c12e3d495989bf04e9d58ab..0000000000000000000000000000000000000000 --- a/data_modeling/evaluation/ventilator-pressure-prediction_eval.py +++ /dev/null @@ -1,32 +0,0 @@ -import os.path - -import numpy as np -import pandas as pd -import argparse -from sklearn.metrics import mean_absolute_error - -def rmsle(y_true, y_pred): - return np.sqrt(np.mean((np.log1p(y_pred) - np.log1p(y_true)) ** 2)) - - -parser = argparse.ArgumentParser() - -parser.add_argument('--path', type=str, required=True) -parser.add_argument('--name', type=str, required=True) -parser.add_argument('--answer_file', type=str, required=True) -parser.add_argument('--predict_file', type=str, required=True) - -parser.add_argument('--value', type=str, default="pressure") - -args = parser.parse_args() - - -answers = pd.read_csv(os.path.join(args.path, args.name, args.answer_file)) -predictions = pd.read_csv(os.path.join(args.path, args.name, args.predict_file)) - -answers.sort_values(by=['id']) -predictions.sort_values(by=['id']) -performance = mean_absolute_error(answers[args.value], predictions[args.value]) - -with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: - f.write(str(performance))