|
import os.path |
|
|
|
import numpy as np |
|
import pandas as pd |
|
import argparse |
|
from sklearn.metrics import accuracy_score |
|
from sklearn.metrics import roc_auc_score |
|
|
|
|
|
|
|
def multiclass_logloss(actuals, predictions): |
|
epsilon = 1e-15 |
|
predictions = np.clip(predictions, epsilon, 1 - epsilon) |
|
predictions /= predictions.sum(axis=1)[:, np.newaxis] |
|
log_pred = np.log(predictions) |
|
loss = -np.sum(actuals * log_pred) / len(actuals) |
|
return loss |
|
|
|
|
|
parser = argparse.ArgumentParser() |
|
|
|
parser.add_argument('--path', type=str, required=True) |
|
parser.add_argument('--name', type=str, required=True) |
|
parser.add_argument('--answer_file', type=str, required=True) |
|
parser.add_argument('--predict_file', type=str, required=True) |
|
|
|
parser.add_argument('--value', type=str, default="NObeyesdad") |
|
|
|
args = parser.parse_args() |
|
|
|
actual = pd.read_csv(args.answer_file) |
|
submission = pd.read_csv( args.predict_file) |
|
|
|
|
|
categories = ['Pastry', 'Z_Scratch', 'K_Scatch', 'Stains', 'Dirtiness', 'Bumps', 'Other_Faults'] |
|
|
|
|
|
auc_scores = {} |
|
for category in categories: |
|
y_true = actual[category].values |
|
y_pred = submission[category].values |
|
auc_scores[category] = roc_auc_score(y_true, y_pred) |
|
|
|
|
|
performance = sum(auc_scores.values()) / len(auc_scores) |
|
|
|
with open(os.path.join(args.path, args.name, "result.txt"), "w") as f: |
|
f.write(str(performance)) |
|
|
|
|
|
|