Gradio update + submit fixes and temp changes
Browse files- app.py +1 -2
- requirements.txt +2 -3
- src/submission/submit.py +34 -20
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import gradio as gr
|
2 |
-
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns,SearchColumns
|
3 |
import pandas as pd
|
4 |
from apscheduler.schedulers.background import BackgroundScheduler
|
5 |
#from huggingface_hub import snapshot_download
|
@@ -358,7 +357,7 @@ with demo:
|
|
358 |
|
359 |
with gr.Row():
|
360 |
with gr.Column():
|
361 |
-
model_name_textbox = gr.Textbox(label="Model name")
|
362 |
|
363 |
submit_button = gr.Button("Submit Eval", variant="huggingface", interactive=False )
|
364 |
|
|
|
1 |
import gradio as gr
|
|
|
2 |
import pandas as pd
|
3 |
from apscheduler.schedulers.background import BackgroundScheduler
|
4 |
#from huggingface_hub import snapshot_download
|
|
|
357 |
|
358 |
with gr.Row():
|
359 |
with gr.Column():
|
360 |
+
model_name_textbox = gr.Textbox(label="Model name", placeholder="org/model-name" )
|
361 |
|
362 |
submit_button = gr.Button("Submit Eval", variant="huggingface", interactive=False )
|
363 |
|
requirements.txt
CHANGED
@@ -1,10 +1,9 @@
|
|
1 |
APScheduler
|
2 |
black
|
3 |
datasets
|
4 |
-
gradio
|
5 |
gradio[oauth]
|
6 |
-
|
7 |
-
gradio_client
|
8 |
huggingface-hub>=0.18.0
|
9 |
matplotlib
|
10 |
numpy
|
|
|
1 |
APScheduler
|
2 |
black
|
3 |
datasets
|
4 |
+
gradio==5.29.0
|
5 |
gradio[oauth]
|
6 |
+
gradio_client==1.10.0
|
|
|
7 |
huggingface-hub>=0.18.0
|
8 |
matplotlib
|
9 |
numpy
|
src/submission/submit.py
CHANGED
@@ -29,6 +29,8 @@ def add_new_eval(
|
|
29 |
if not REQUESTED_MODELS:
|
30 |
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
31 |
|
|
|
|
|
32 |
user_name = ""
|
33 |
model_path = model
|
34 |
if "/" in model:
|
@@ -42,8 +44,8 @@ def add_new_eval(
|
|
42 |
progress(0.1, desc=f"Checking model {model} on hub")
|
43 |
|
44 |
if not is_model_on_hub(model_name=model, token=TOKEN, test_tokenizer=True): #revision=revision
|
45 |
-
|
46 |
-
|
47 |
|
48 |
##check for org banning
|
49 |
progress(0.2, desc=f"Checking for banned orgs")
|
@@ -53,9 +55,10 @@ def add_new_eval(
|
|
53 |
}]
|
54 |
|
55 |
if user_name in [banned_org['org_name'] for banned_org in banned_orgs]:
|
56 |
-
|
57 |
f"Your org \"{user_name}\" is banned from submitting models on ABL. If you think this is a mistake then please contact [email protected]"
|
58 |
-
)
|
|
|
59 |
|
60 |
"""
|
61 |
if model_type is None or model_type == "":
|
@@ -80,25 +83,31 @@ def add_new_eval(
|
|
80 |
try:
|
81 |
model_info = API.model_info(repo_id=model)#, revision=revision
|
82 |
except Exception:
|
83 |
-
|
84 |
-
|
|
|
85 |
progress(0.3, desc=f"Checking model size")
|
86 |
model_size = get_model_size(model_info=model_info)#, precision=precision
|
87 |
|
88 |
-
if model_size>
|
89 |
-
|
90 |
-
|
|
|
91 |
# Were the model card and license filled?
|
92 |
try:
|
93 |
license = model_info.cardData["license"]
|
94 |
except Exception:
|
95 |
-
|
|
|
|
|
|
|
96 |
|
97 |
modelcard_OK, error_msg = check_model_card(model)
|
98 |
if not modelcard_OK:
|
99 |
-
|
|
|
100 |
|
101 |
-
|
102 |
|
103 |
##check if org have submitted in the last 30 days
|
104 |
progress(0.6, desc=f"Checking last submission date")
|
@@ -112,17 +121,19 @@ def add_new_eval(
|
|
112 |
|
113 |
time_since_last_submission = datetime.now() - most_recent_submission
|
114 |
if time_since_last_submission < timedelta(days=30):
|
115 |
-
|
116 |
f"Your org \"{user_name}\" have already submitted a model in the last 30 days. Please wait before submitting another model. For exceptions please contact [email protected]"
|
117 |
)
|
|
|
118 |
|
119 |
|
120 |
-
|
121 |
progress(0.8, desc=f"Checking same model submissions")
|
122 |
|
123 |
# Check for duplicate submission
|
124 |
if f"{model}" in REQUESTED_MODELS: #_{revision}_{precision}
|
125 |
-
|
|
|
126 |
|
127 |
# Seems good, creating the eval
|
128 |
print("Preparing a new eval")
|
@@ -143,7 +154,8 @@ def add_new_eval(
|
|
143 |
#"private": False,
|
144 |
}
|
145 |
|
146 |
-
|
|
|
147 |
|
148 |
print("Creating eval file")
|
149 |
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
@@ -174,9 +186,10 @@ def add_new_eval(
|
|
174 |
|
175 |
if queue_len == 0:
|
176 |
queue_data = []
|
177 |
-
elif queue_len >=
|
178 |
-
|
179 |
-
|
|
|
180 |
queue_data.append(eval_entry)
|
181 |
|
182 |
print(queue_data)
|
@@ -208,6 +221,7 @@ def add_new_eval(
|
|
208 |
os.remove(out_path)
|
209 |
|
210 |
|
211 |
-
|
212 |
"✅ Good news! Your model has been added to the evaluation queue.<br>If you do not see the results after 3 hours then please let us know by opening a community discussion."
|
213 |
)
|
|
|
|
29 |
if not REQUESTED_MODELS:
|
30 |
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
31 |
|
32 |
+
yield "..."
|
33 |
+
|
34 |
user_name = ""
|
35 |
model_path = model
|
36 |
if "/" in model:
|
|
|
44 |
progress(0.1, desc=f"Checking model {model} on hub")
|
45 |
|
46 |
if not is_model_on_hub(model_name=model, token=TOKEN, test_tokenizer=True): #revision=revision
|
47 |
+
yield styled_error("Model does not exist on HF Hub. Please select a valid model name.")
|
48 |
+
return
|
49 |
|
50 |
##check for org banning
|
51 |
progress(0.2, desc=f"Checking for banned orgs")
|
|
|
55 |
}]
|
56 |
|
57 |
if user_name in [banned_org['org_name'] for banned_org in banned_orgs]:
|
58 |
+
yield styled_error(
|
59 |
f"Your org \"{user_name}\" is banned from submitting models on ABL. If you think this is a mistake then please contact [email protected]"
|
60 |
+
)
|
61 |
+
return
|
62 |
|
63 |
"""
|
64 |
if model_type is None or model_type == "":
|
|
|
83 |
try:
|
84 |
model_info = API.model_info(repo_id=model)#, revision=revision
|
85 |
except Exception:
|
86 |
+
yield styled_error("Could not get your model information. Please fill it up properly.")
|
87 |
+
return
|
88 |
+
|
89 |
progress(0.3, desc=f"Checking model size")
|
90 |
model_size = get_model_size(model_info=model_info)#, precision=precision
|
91 |
|
92 |
+
if model_size>150:##********************CHANGE
|
93 |
+
yield styled_error("We currently accept community-submitted models up to 15 billion parameters only. If you represent an organization then please contact us at [email protected]")
|
94 |
+
return
|
95 |
+
|
96 |
# Were the model card and license filled?
|
97 |
try:
|
98 |
license = model_info.cardData["license"]
|
99 |
except Exception:
|
100 |
+
yield styled_error("Please select a license for your model")
|
101 |
+
return
|
102 |
+
|
103 |
+
progress(0.5, desc=f"Checking model card")
|
104 |
|
105 |
modelcard_OK, error_msg = check_model_card(model)
|
106 |
if not modelcard_OK:
|
107 |
+
yield styled_error(error_msg)
|
108 |
+
return
|
109 |
|
110 |
+
|
111 |
|
112 |
##check if org have submitted in the last 30 days
|
113 |
progress(0.6, desc=f"Checking last submission date")
|
|
|
121 |
|
122 |
time_since_last_submission = datetime.now() - most_recent_submission
|
123 |
if time_since_last_submission < timedelta(days=30):
|
124 |
+
yield styled_warning(
|
125 |
f"Your org \"{user_name}\" have already submitted a model in the last 30 days. Please wait before submitting another model. For exceptions please contact [email protected]"
|
126 |
)
|
127 |
+
return
|
128 |
|
129 |
|
130 |
+
|
131 |
progress(0.8, desc=f"Checking same model submissions")
|
132 |
|
133 |
# Check for duplicate submission
|
134 |
if f"{model}" in REQUESTED_MODELS: #_{revision}_{precision}
|
135 |
+
yield styled_warning("This model has been already submitted.")
|
136 |
+
return
|
137 |
|
138 |
# Seems good, creating the eval
|
139 |
print("Preparing a new eval")
|
|
|
154 |
#"private": False,
|
155 |
}
|
156 |
|
157 |
+
|
158 |
+
progress(0.9, desc=f"Creating Eval ...")
|
159 |
|
160 |
print("Creating eval file")
|
161 |
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
|
|
186 |
|
187 |
if queue_len == 0:
|
188 |
queue_data = []
|
189 |
+
elif queue_len >= 10:##********************CHANGE
|
190 |
+
yield styled_warning("The evaluation queue is full at the moment. Please try again in one hour")
|
191 |
+
return
|
192 |
+
|
193 |
queue_data.append(eval_entry)
|
194 |
|
195 |
print(queue_data)
|
|
|
221 |
os.remove(out_path)
|
222 |
|
223 |
|
224 |
+
yield styled_message(
|
225 |
"✅ Good news! Your model has been added to the evaluation queue.<br>If you do not see the results after 3 hours then please let us know by opening a community discussion."
|
226 |
)
|
227 |
+
return
|