Spaces:
Building
Building
File size: 3,637 Bytes
84f40ff df330ee 84f40ff 6d97820 84f40ff 6d97820 215c2d8 6d97820 aa0703f 84f40ff d779781 3caf072 d779781 3caf072 d779781 3caf072 84f40ff 6d97820 84f40ff 215c2d8 84f40ff 6d97820 84f40ff 215c2d8 aa0703f 84f40ff df330ee 84f40ff 6d97820 df330ee 84f40ff 6d97820 84f40ff 6d97820 84f40ff 6d97820 84f40ff ba0a549 84f40ff |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
import json
import os
from datetime import datetime, timezone
from ast import literal_eval
from src.display.formatting import styled_error, styled_message, styled_warning
from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
from src.submission.check_validity import (
already_submitted_models,
check_model_card,
get_model_size,
is_model_on_hub,
)
REQUESTED_MODELS = None
USERS_TO_SUBMISSION_DATES = None
class CustomJSONEncoder(json.JSONEncoder):
def default(self, obj):
try:
return super().default(obj)
except TypeError:
return str(obj) # Convert non-serializable object to string
def add_new_eval_json(eval_entry, out_path):
with open(out_path, "w") as f:
f.write(json.dumps(eval_entry, cls=CustomJSONEncoder))
def add_new_eval(
author,
email,
relbench_version,
model,
official_or_not,
test_performance,
valid_performance,
paper_url,
github_url,
#parameters,
honor_code,
task_track
):
global REQUESTED_MODELS
global USERS_TO_SUBMISSION_DATES
if not REQUESTED_MODELS:
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
if task_track in ['Node Classification', 'Entity Classification']:
task_type = 'nc'
elif task_track in ['Node Regression', 'Entity Regression']:
task_type = 'nr'
elif task_track in ['Link Prediction', 'Recommendation']:
task_type = 'lp'
model_path = model + '_' + task_type
#precision = precision.split(" ")[0]
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
#model_size = parameters
# Seems good, creating the eval
print("Adding new eval")
eval_entry = {
"model": model,
"author": author,
"email": email,
"relbench_version": relbench_version,
"official_or_not": official_or_not,
"test": test_performance,
"valid": valid_performance,
"paper_url": paper_url,
"github_url": github_url,
"honor_code": honor_code,
"status": "PENDING",
"submitted_time": current_time,
#"params": model_size,
"task": task_track,
"private": False,
}
## add a checking to verify if the submission has no bug
try:
xx = literal_eval(eval_entry["test"])
xx = literal_eval(eval_entry["valid"])
except:
return styled_error("The testing/validation performance submitted do not follow the correct format. Please check the format and resubmit.")
# TODO: Check for duplicate submission
#if f"{model}" in REQUESTED_MODELS:
# return styled_error("This model has been already submitted.")
print("Creating eval file")
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{model}"
os.makedirs(OUT_DIR, exist_ok=True)
out_path = f"{OUT_DIR}/{model_path}_eval_request_False.json"
print(eval_entry)
#with open(out_path, "w") as f:
# f.write(json.dumps(eval_entry))
add_new_eval_json(eval_entry, out_path)
print("Uploading eval file")
print(out_path)
print(QUEUE_REPO)
print(TOKEN)
print(API)
API.upload_file(
path_or_fileobj=out_path,
path_in_repo=out_path.split("eval-queue/")[1],
repo_id=QUEUE_REPO,
repo_type="dataset",
commit_message=f"Add {model} to eval queue",
)
# Remove the local file
os.remove(out_path)
return styled_message(
"Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
)
|