LibVulnWatch / src /populate.py
seonglae-holistic's picture
feat: rejected eval queue to avoid ddos
78fdff4
"""Functions to populate the leaderboard"""
import pandas as pd
from src.display.utils import auto_eval_column_attrs
from src.leaderboard.read_evals import get_raw_assessment_results
def get_leaderboard_df(eval_results_path, eval_requests_path, cols, benchmark_cols):
"""Read all the runs in the folder and return a dataframe
Args:
eval_results_path: Path to the assessment result files
eval_requests_path: Path to the assessment request files
cols: Columns names to include in the dataframe
benchmark_cols: Risk categories column names (display names)
Returns:
Pandas dataframe for the leaderboard
"""
try:
assessment_results = get_raw_assessment_results(eval_results_path, eval_requests_path)
# If we get results, convert to dataframe
if len(assessment_results) > 0:
# Create dataframe from assessment results
all_df = pd.DataFrame.from_records([r.to_dict() for r in assessment_results])
# Ensure we have all the needed display columns
all_columns = set(all_df.columns)
for col in benchmark_cols:
if col not in all_columns:
print(f"Warning: Column '{col}' missing, adding empty column")
all_df[col] = 10.0 # Default to highest risk
# Sort by Trust Score (ascending - higher is better)
if auto_eval_column_attrs.overall_risk.name in all_df.columns:
all_df = all_df.sort_values(by=[auto_eval_column_attrs.overall_risk.name], ascending=False)
return all_df
return pd.DataFrame(columns=cols + benchmark_cols) # Empty dataframe with all columns
except Exception as e:
print(f"Error reading evaluation results: {e}")
import traceback
traceback.print_exc()
return pd.DataFrame(columns=cols + benchmark_cols) # Return empty dataframe with all columns
def get_evaluation_queue_df(eval_requests_path, eval_cols):
"""Read from the evaluation queue directory and return dataframes for each status
Args:
eval_requests_path: Path to the assessment request files
eval_cols: Columns for the queue dataframes
Returns:
Tuple of dataframes (finished, running, pending)
"""
try:
import glob
import json
import os
# Find all request files
request_files = glob.glob(os.path.join(eval_requests_path, "*.json"))
finished_data = []
running_data = []
pending_data = []
rejected_data = []
for file_path in request_files:
try:
with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f)
# Extract relevant fields
row = {
"library": data.get("library", ""),
"language": data.get("language", ""),
"status": data.get("status", "UNKNOWN")
}
# Add to appropriate dataframe based on status
if row["status"] == "FINISHED":
finished_data.append(row)
elif row["status"] == "RUNNING":
running_data.append(row)
elif row["status"] == "PENDING":
pending_data.append(row)
elif row["status"] == "REJECTED":
rejected_data.append(row)
except Exception as e:
print(f"Error reading request file {file_path}: {e}")
continue
# Convert to dataframes
finished_df = pd.DataFrame(finished_data, columns=eval_cols)
running_df = pd.DataFrame(running_data, columns=eval_cols)
pending_df = pd.DataFrame(pending_data, columns=eval_cols)
rejected_df = pd.DataFrame(rejected_data, columns=eval_cols)
return finished_df, running_df, pending_df, rejected_df
except Exception as e:
print(f"Error reading evaluation queue: {e}")
# Return empty dataframes
empty_df = pd.DataFrame(columns=eval_cols)
return empty_df.copy(), empty_df.copy(), empty_df.copy(), empty_df.copy()