Upload 2 files
Browse files- app.py +196 -303
- constants.py +6 -59
app.py
CHANGED
@@ -22,242 +22,136 @@ def upload_file(files):
|
|
22 |
file_paths = [file.name for file in files]
|
23 |
return file_paths
|
24 |
|
25 |
-
# def add_new_eval(
|
26 |
-
# input_file,
|
27 |
-
# model_name_textbox: str,
|
28 |
-
# revision_name_textbox: str,
|
29 |
-
# model_link: str,
|
30 |
-
# team_name: str,
|
31 |
-
# contact_email: str,
|
32 |
-
# access_type: str,
|
33 |
-
# model_publish: str,
|
34 |
-
# model_resolution: str,
|
35 |
-
# model_fps: str,
|
36 |
-
# model_frame: str,
|
37 |
-
# model_video_length: str,
|
38 |
-
# model_checkpoint: str,
|
39 |
-
# model_commit_id: str,
|
40 |
-
# model_video_format: str
|
41 |
-
# ):
|
42 |
-
# if input_file is None:
|
43 |
-
# return "Error! Empty file!"
|
44 |
-
# if model_link == '' or model_name_textbox == '' or contact_email == '':
|
45 |
-
# return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)
|
46 |
-
# # upload_data=json.loads(input_file)
|
47 |
-
# upload_content = input_file
|
48 |
-
# submission_repo = Repository(local_dir=SUBMISSION_NAME, clone_from=SUBMISSION_URL, use_auth_token=HF_TOKEN, repo_type="dataset")
|
49 |
-
# submission_repo.git_pull()
|
50 |
-
# filename = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
51 |
-
|
52 |
-
# now = datetime.datetime.now()
|
53 |
-
# update_time = now.strftime("%Y-%m-%d") # Capture update time
|
54 |
-
# with open(f'{SUBMISSION_NAME}/{filename}.zip','wb') as f:
|
55 |
-
# f.write(input_file)
|
56 |
-
# # shutil.copyfile(CSV_DIR, os.path.join(SUBMISSION_NAME, f"{input_file}"))
|
57 |
-
|
58 |
-
# csv_data = pd.read_csv(CSV_DIR)
|
59 |
-
|
60 |
-
# if revision_name_textbox == '':
|
61 |
-
# col = csv_data.shape[0]
|
62 |
-
# model_name = model_name_textbox.replace(',',' ')
|
63 |
-
# else:
|
64 |
-
# model_name = revision_name_textbox.replace(',',' ')
|
65 |
-
# model_name_list = csv_data['Model Name (clickable)']
|
66 |
-
# name_list = [name.split(']')[0][1:] for name in model_name_list]
|
67 |
-
# if revision_name_textbox not in name_list:
|
68 |
-
# col = csv_data.shape[0]
|
69 |
-
# else:
|
70 |
-
# col = name_list.index(revision_name_textbox)
|
71 |
-
# if model_link == '':
|
72 |
-
# model_name = model_name # no url
|
73 |
-
# else:
|
74 |
-
# model_name = '[' + model_name + '](' + model_link + ')'
|
75 |
-
|
76 |
-
# os.makedirs(filename, exist_ok=True)
|
77 |
-
# with zipfile.ZipFile(io.BytesIO(input_file), 'r') as zip_ref:
|
78 |
-
# zip_ref.extractall(filename)
|
79 |
-
|
80 |
-
# upload_data = {}
|
81 |
-
# for file in os.listdir(filename):
|
82 |
-
# if file.startswith('.') or file.startswith('__'):
|
83 |
-
# print(f"Skip the file: {file}")
|
84 |
-
# continue
|
85 |
-
# cur_file = os.path.join(filename, file)
|
86 |
-
# if os.path.isdir(cur_file):
|
87 |
-
# for subfile in os.listdir(cur_file):
|
88 |
-
# if subfile.endswith(".json"):
|
89 |
-
# with open(os.path.join(cur_file, subfile)) as ff:
|
90 |
-
# cur_json = json.load(ff)
|
91 |
-
# print(file, type(cur_json))
|
92 |
-
# if isinstance(cur_json, dict):
|
93 |
-
# print(cur_json.keys())
|
94 |
-
# for key in cur_json:
|
95 |
-
# upload_data[key.replace('_',' ')] = cur_json[key][0]
|
96 |
-
# print(f"{key}:{cur_json[key][0]}")
|
97 |
-
# elif cur_file.endswith('json'):
|
98 |
-
# with open(cur_file) as ff:
|
99 |
-
# cur_json = json.load(ff)
|
100 |
-
# print(file, type(cur_json))
|
101 |
-
# if isinstance(cur_json, dict):
|
102 |
-
# print(cur_json.keys())
|
103 |
-
# for key in cur_json:
|
104 |
-
# upload_data[key.replace('_',' ')] = cur_json[key][0]
|
105 |
-
# print(f"{key}:{cur_json[key][0]}")
|
106 |
-
# # add new data
|
107 |
-
# new_data = [model_name]
|
108 |
-
# print('upload_data:', upload_data)
|
109 |
-
# for key in TASK_INFO:
|
110 |
-
# if key in upload_data:
|
111 |
-
# new_data.append(upload_data[key])
|
112 |
-
# else:
|
113 |
-
# new_data.append(0)
|
114 |
-
# if team_name =='' or 'vbench' in team_name.lower():
|
115 |
-
# new_data.append("User Upload")
|
116 |
-
# else:
|
117 |
-
# new_data.append(team_name)
|
118 |
-
|
119 |
-
# new_data.append(contact_email.replace(',',' and ')) # Add contact email [private]
|
120 |
-
# new_data.append(update_time) # Add the update time
|
121 |
-
# new_data.append(team_name)
|
122 |
-
# new_data.append(access_type)
|
123 |
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
# "Aesthetic Quality": "aesthetic_quality",
|
158 |
-
# "Imaging Quality": "imaging_quality",
|
159 |
-
# "Temporal Flickering": "temporal_flickering"
|
160 |
-
# }
|
161 |
-
# if input_file is None:
|
162 |
-
# return "Error! Empty file!"
|
163 |
-
# if model_link == '' or model_name_textbox == '' or contact_email == '':
|
164 |
-
# return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)
|
165 |
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
#
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
#
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
|
262 |
def get_baseline_df():
|
263 |
submission_repo = Repository(local_dir=SUBMISSION_NAME, clone_from=SUBMISSION_URL, use_auth_token=HF_TOKEN, repo_type="dataset")
|
@@ -338,78 +232,77 @@ with block:
|
|
338 |
outputs=data_component
|
339 |
)
|
340 |
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
# fail_textbox_i2v = gr.Markdown('<span style="color:red;">Please ensure that the `Model Name`, `Project Page`, and `Email` are filled in correctly.</span>', elem_classes="markdown-text",visible=False)
|
389 |
|
390 |
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
|
414 |
|
415 |
|
|
|
22 |
file_paths = [file.name for file in files]
|
23 |
return file_paths
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
+
def add_new_eval_i2v(
|
27 |
+
input_file,
|
28 |
+
model_name_textbox: str,
|
29 |
+
revision_name_textbox: str,
|
30 |
+
model_link: str,
|
31 |
+
team_name: str,
|
32 |
+
contact_email: str,
|
33 |
+
access_type: str,
|
34 |
+
model_publish: str,
|
35 |
+
model_resolution: str,
|
36 |
+
model_fps: str,
|
37 |
+
model_frame: str,
|
38 |
+
model_video_length: str,
|
39 |
+
model_checkpoint: str,
|
40 |
+
model_commit_id: str,
|
41 |
+
model_video_format: str
|
42 |
+
):
|
43 |
+
COLNAME2KEY={
|
44 |
+
"Video-Text Camera Motion":"camera_motion",
|
45 |
+
"Video-Image Subject Consistency": "i2v_subject",
|
46 |
+
"Video-Image Background Consistency": "i2v_background",
|
47 |
+
"Subject Consistency": "subject_consistency",
|
48 |
+
"Background Consistency": "background_consistency",
|
49 |
+
"Motion Smoothness": "motion_smoothness",
|
50 |
+
"Dynamic Degree": "dynamic_degree",
|
51 |
+
"Aesthetic Quality": "aesthetic_quality",
|
52 |
+
"Imaging Quality": "imaging_quality",
|
53 |
+
"Temporal Flickering": "temporal_flickering"
|
54 |
+
}
|
55 |
+
if input_file is None:
|
56 |
+
return "Error! Empty file!"
|
57 |
+
if model_link == '' or model_name_textbox == '' or contact_email == '':
|
58 |
+
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
+
upload_content = input_file
|
61 |
+
submission_repo = Repository(local_dir=SUBMISSION_NAME, clone_from=SUBMISSION_URL, use_auth_token=HF_TOKEN, repo_type="dataset")
|
62 |
+
submission_repo.git_pull()
|
63 |
+
filename = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
64 |
|
65 |
+
now = datetime.datetime.now()
|
66 |
+
update_time = now.strftime("%Y-%m-%d") # Capture update time
|
67 |
+
with open(f'{SUBMISSION_NAME}/{filename}.zip','wb') as f:
|
68 |
+
f.write(input_file)
|
69 |
+
# shutil.copyfile(CSV_DIR, os.path.join(SUBMISSION_NAME, f"{input_file}"))
|
70 |
+
|
71 |
+
csv_data = pd.read_csv(I2V_DIR)
|
72 |
+
|
73 |
+
if revision_name_textbox == '':
|
74 |
+
col = csv_data.shape[0]
|
75 |
+
model_name = model_name_textbox.replace(',',' ')
|
76 |
+
else:
|
77 |
+
model_name = revision_name_textbox.replace(',',' ')
|
78 |
+
model_name_list = csv_data['Model Name (clickable)']
|
79 |
+
name_list = [name.split(']')[0][1:] for name in model_name_list]
|
80 |
+
if revision_name_textbox not in name_list:
|
81 |
+
col = csv_data.shape[0]
|
82 |
+
else:
|
83 |
+
col = name_list.index(revision_name_textbox)
|
84 |
+
if model_link == '':
|
85 |
+
model_name = model_name # no url
|
86 |
+
else:
|
87 |
+
model_name = '[' + model_name + '](' + model_link + ')'
|
88 |
+
|
89 |
+
os.makedirs(filename, exist_ok=True)
|
90 |
+
with zipfile.ZipFile(io.BytesIO(input_file), 'r') as zip_ref:
|
91 |
+
zip_ref.extractall(filename)
|
92 |
+
|
93 |
+
upload_data = {}
|
94 |
+
for file in os.listdir(filename):
|
95 |
+
if file.startswith('.') or file.startswith('__'):
|
96 |
+
print(f"Skip the file: {file}")
|
97 |
+
continue
|
98 |
+
cur_file = os.path.join(filename, file)
|
99 |
+
if os.path.isdir(cur_file):
|
100 |
+
for subfile in os.listdir(cur_file):
|
101 |
+
if subfile.endswith(".json"):
|
102 |
+
with open(os.path.join(cur_file, subfile)) as ff:
|
103 |
+
cur_json = json.load(ff)
|
104 |
+
print(file, type(cur_json))
|
105 |
+
if isinstance(cur_json, dict):
|
106 |
+
print(cur_json.keys())
|
107 |
+
for key in cur_json:
|
108 |
+
upload_data[key] = cur_json[key][0]
|
109 |
+
print(f"{key}:{cur_json[key][0]}")
|
110 |
+
elif cur_file.endswith('json'):
|
111 |
+
with open(cur_file) as ff:
|
112 |
+
cur_json = json.load(ff)
|
113 |
+
print(file, type(cur_json))
|
114 |
+
if isinstance(cur_json, dict):
|
115 |
+
print(cur_json.keys())
|
116 |
+
for key in cur_json:
|
117 |
+
upload_data[key] = cur_json[key][0]
|
118 |
+
print(f"{key}:{cur_json[key][0]}")
|
119 |
+
# add new data
|
120 |
+
new_data = [model_name]
|
121 |
+
print('upload_data:', upload_data)
|
122 |
+
I2V_HEAD= ["Video-Text Camera Motion",
|
123 |
+
"Video-Image Subject Consistency",
|
124 |
+
"Video-Image Background Consistency",
|
125 |
+
"Subject Consistency",
|
126 |
+
"Background Consistency",
|
127 |
+
"Temporal Flickering",
|
128 |
+
"Motion Smoothness",
|
129 |
+
"Dynamic Degree",
|
130 |
+
"Aesthetic Quality",
|
131 |
+
"Imaging Quality" ]
|
132 |
+
for key in I2V_HEAD :
|
133 |
+
sub_key = COLNAME2KEY[key]
|
134 |
+
if sub_key in upload_data:
|
135 |
+
new_data.append(upload_data[sub_key])
|
136 |
+
else:
|
137 |
+
new_data.append(0)
|
138 |
+
if team_name =='' or 'vbench' in team_name.lower():
|
139 |
+
new_data.append("User Upload")
|
140 |
+
else:
|
141 |
+
new_data.append(team_name)
|
142 |
+
|
143 |
+
new_data.append(contact_email.replace(',',' and ')) # Add contact email [private]
|
144 |
+
new_data.append(update_time) # Add the update time
|
145 |
+
new_data.append(team_name)
|
146 |
+
new_data.append(access_type)
|
147 |
+
|
148 |
+
csv_data.loc[col] = new_data
|
149 |
+
csv_data = csv_data.to_csv(I2V_DIR , index=False)
|
150 |
+
with open(INFO_DIR,'a') as f:
|
151 |
+
f.write(f"{model_name}\t{update_time}\t{model_publish}\t{model_resolution}\t{model_fps}\t{model_frame}\t{model_video_length}\t{model_checkpoint}\t{model_commit_id}\t{model_video_format}\n")
|
152 |
+
submission_repo.push_to_hub()
|
153 |
+
print("success update", model_name)
|
154 |
+
return gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
|
155 |
|
156 |
def get_baseline_df():
|
157 |
submission_repo = Repository(local_dir=SUBMISSION_NAME, clone_from=SUBMISSION_URL, use_auth_token=HF_TOKEN, repo_type="dataset")
|
|
|
232 |
outputs=data_component
|
233 |
)
|
234 |
|
235 |
+
with gr.TabItem("🚀 Submit here! ", elem_id="submit-tab-table", id=7):
|
236 |
+
|
237 |
+
with gr.Row():
|
238 |
+
gr.Markdown(SUBMIT_INTRODUCTION, elem_classes="markdown-text")
|
239 |
+
|
240 |
+
with gr.Row():
|
241 |
+
gr.Markdown("# Submit your world generation model evaluation json file here!", elem_classes="markdown-text")
|
242 |
+
|
243 |
+
with gr.Row():
|
244 |
+
gr.Markdown("Here is a required field", elem_classes="markdown-text")
|
245 |
+
with gr.Row():
|
246 |
+
with gr.Column():
|
247 |
+
model_name_textbox_i2v = gr.Textbox(
|
248 |
+
label="Model name", placeholder="Required field"
|
249 |
+
)
|
250 |
+
revision_name_textbox_i2v = gr.Textbox(
|
251 |
+
label="Revision Model Name(Optional)", placeholder="If you need to update the previous results, please fill in this line"
|
252 |
+
)
|
253 |
+
access_type_i2v = gr.Dropdown(["Open Source", "Ready to Open Source", "API", "Close"], label="Please select the way user can access your model. You can update the content by revision_name, or contact the VBench Team.")
|
254 |
+
|
255 |
+
|
256 |
+
with gr.Column():
|
257 |
+
model_link_i2v = gr.Textbox(
|
258 |
+
label="Project Page/Paper Link/Github/HuggingFace Repo", placeholder="Required field. If filling in the wrong information, your results may be removed."
|
259 |
+
)
|
260 |
+
team_name_i2v = gr.Textbox(
|
261 |
+
label="Your Team Name(If left blank, it will be user upload)", placeholder="User Upload"
|
262 |
+
)
|
263 |
+
contact_email_i2v = gr.Textbox(
|
264 |
+
label="E-Mail(Will not be displayed)", placeholder="Required field"
|
265 |
+
)
|
266 |
+
with gr.Row():
|
267 |
+
gr.Markdown("The following is optional and will be synced to [GitHub] (https://github.com/Vchitect/VBench/tree/master/sampled_videos#what-are-the-details-of-the-video-generation-models)", elem_classes="markdown-text")
|
268 |
+
with gr.Row():
|
269 |
+
release_time_i2v = gr.Textbox(label="Time of Publish", placeholder="1970-01-01")
|
270 |
+
model_resolution_i2v = gr.Textbox(label="resolution", placeholder="Width x Height")
|
271 |
+
model_fps_i2v = gr.Textbox(label="model fps", placeholder="FPS(int)")
|
272 |
+
model_frame_i2v = gr.Textbox(label="model frame count", placeholder="INT")
|
273 |
+
model_video_length_i2v = gr.Textbox(label="model video length", placeholder="float(2.0)")
|
274 |
+
model_checkpoint_i2v = gr.Textbox(label="model checkpoint", placeholder="optional")
|
275 |
+
model_commit_id_i2v = gr.Textbox(label="github commit id", placeholder='main')
|
276 |
+
model_video_format_i2v = gr.Textbox(label="pipeline format", placeholder='mp4')
|
277 |
+
with gr.Column():
|
278 |
+
input_file_i2v = gr.components.File(label = "Click to Upload a ZIP File", file_count="single", type='binary')
|
279 |
+
submit_button_i2v = gr.Button("Submit Eval")
|
280 |
+
submit_succ_button_i2v = gr.Markdown("Submit Success! Please press refresh and retfurn to LeaderBoard!", visible=False)
|
281 |
+
fail_textbox_i2v = gr.Markdown('<span style="color:red;">Please ensure that the `Model Name`, `Project Page`, and `Email` are filled in correctly.</span>', elem_classes="markdown-text",visible=False)
|
|
|
282 |
|
283 |
|
284 |
+
submission_result_i2v = gr.Markdown()
|
285 |
+
# submit_button_i2v.click(
|
286 |
+
# add_new_eval_i2v,
|
287 |
+
# inputs = [
|
288 |
+
# input_file_i2v,
|
289 |
+
# model_name_textbox_i2v,
|
290 |
+
# revision_name_textbox_i2v,
|
291 |
+
# model_link_i2v,
|
292 |
+
# team_name_i2v,
|
293 |
+
# contact_email_i2v,
|
294 |
+
# release_time_i2v,
|
295 |
+
# access_type_i2v,
|
296 |
+
# model_resolution_i2v,
|
297 |
+
# model_fps_i2v,
|
298 |
+
# model_frame_i2v,
|
299 |
+
# model_video_length_i2v,
|
300 |
+
# model_checkpoint_i2v,
|
301 |
+
# model_commit_id_i2v,
|
302 |
+
# model_video_format_i2v
|
303 |
+
# ],
|
304 |
+
# outputs=[submit_button_i2v, submit_succ_button_i2v, fail_textbox_i2v]
|
305 |
+
# )
|
306 |
|
307 |
|
308 |
|
constants.py
CHANGED
@@ -67,69 +67,16 @@ LEADERBORAD_INTRODUCTION = """# WorldScore Leaderboard
|
|
67 |
<a href='https://huggingface.co/datasets/Howieeeee/WorldScore'><img src='https://img.shields.io/badge/WordScore-Dataset-yellow?logo=huggingface&logoColor=yellow'></a>
|
68 |
</div>
|
69 |
|
|
|
|
|
70 |
**Join Leaderboard**: Please see the [instructions](https://github.com/Vchitect/VBench/tree/master?tab=readme-ov-file#trophy-leaderboard) for 3 options to participate. One option is to follow [VBench Usage info](https://github.com/Vchitect/VBench?tab=readme-ov-file#usage), and upload the generated `result.json` file here. After clicking the `Submit here!` button, click the `Refresh` button.
|
71 |
"""
|
72 |
|
73 |
-
SUBMIT_INTRODUCTION = """#
|
74 |
|
75 |
-
|
76 |
-
1. Please note that you need to obtain the file `evaluation_results/*.json` by running VBench in Github. You may conduct an [Offline Check](https://github.com/Vchitect/VBench?tab=readme-ov-file#get-final-score-and-submit-to-leaderboard) before uploading.
|
77 |
2. Then, pack these JSON files into a `ZIP` archive, ensuring that the top-level directory of the ZIP contains the individual JSON files.
|
78 |
3. Finally, upload the ZIP archive below.
|
79 |
|
80 |
-
⚠️
|
81 |
-
|
82 |
-
"""
|
83 |
-
|
84 |
-
TABLE_INTRODUCTION = """
|
85 |
-
"""
|
86 |
-
|
87 |
-
LEADERBORAD_INFO = """
|
88 |
-
VBench, a comprehensive benchmark suite for video generative models. We design a comprehensive and hierarchical Evaluation Dimension Suite to decompose "video generation quality" into multiple well-defined dimensions to facilitate fine-grained and objective evaluation. For each dimension and each content category, we carefully design a Prompt Suite as test cases, and sample Generated Videos from a set of video generation models. For each evaluation dimension, we specifically design an Evaluation Method Suite, which uses carefully crafted method or designated pipeline for automatic objective evaluation. We also conduct Human Preference Annotation for the generated videos for each dimension, and show that VBench evaluation results are well aligned with human perceptions. VBench can provide valuable insights from multiple perspectives.
|
89 |
-
"""
|
90 |
-
|
91 |
-
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
92 |
-
CITATION_BUTTON_TEXT = r"""@inproceedings{huang2023vbench,
|
93 |
-
title={{VBench}: Comprehensive Benchmark Suite for Video Generative Models},
|
94 |
-
author={Huang, Ziqi and He, Yinan and Yu, Jiashuo and Zhang, Fan and Si, Chenyang and Jiang, Yuming and Zhang, Yuanhan and Wu, Tianxing and Jin, Qingyang and Chanpaisit, Nattapol and Wang, Yaohui and Chen, Xinyuan and Wang, Limin and Lin, Dahua and Qiao, Yu and Liu, Ziwei},
|
95 |
-
booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},
|
96 |
-
year={2024}
|
97 |
-
}"""
|
98 |
-
|
99 |
-
QUALITY_CLAIM_TEXT = "We use all the videos on Sora website (https://openai.com/sora) for a preliminary evaluation, including the failure case videos Sora provided."
|
100 |
-
|
101 |
-
I2V_CLAIM_TEXT = "Since the open-sourced SVD models do not accept text input during the I2V stage, we are unable to evaluate its `camera motion` in terms of `video-text consistency`. The total score is calculated based on all dimensions except `camera motion`."
|
102 |
-
|
103 |
-
LONG_CLAIM_TEXT = ""
|
104 |
-
|
105 |
-
NORMALIZE_DIC = {
|
106 |
-
"subject consistency": {"Min": 0.1462, "Max": 1.0},
|
107 |
-
"background consistency": {"Min": 0.2615, "Max": 1.0},
|
108 |
-
"temporal flickering": {"Min": 0.6293, "Max": 1.0},
|
109 |
-
"motion smoothness": {"Min": 0.706, "Max": 0.9975},
|
110 |
-
"dynamic degree": {"Min": 0.0, "Max": 1.0},
|
111 |
-
"aesthetic quality": {"Min": 0.0, "Max": 1.0},
|
112 |
-
"imaging quality": {"Min": 0.0, "Max": 1.0},
|
113 |
-
"object class": {"Min": 0.0, "Max": 1.0},
|
114 |
-
"multiple objects": {"Min": 0.0, "Max": 1.0},
|
115 |
-
"human action": {"Min": 0.0, "Max": 1.0},
|
116 |
-
"color": {"Min": 0.0, "Max": 1.0},
|
117 |
-
"spatial relationship": {"Min": 0.0, "Max": 1.0},
|
118 |
-
"scene": {"Min": 0.0, "Max": 0.8222},
|
119 |
-
"appearance style": {"Min": 0.0009, "Max": 0.2855},
|
120 |
-
"temporal style": {"Min": 0.0, "Max": 0.364},
|
121 |
-
"overall consistency": {"Min": 0.0, "Max": 0.364}
|
122 |
-
}
|
123 |
-
|
124 |
-
NORMALIZE_DIC_I2V = {
|
125 |
-
"Video-Text Camera Motion" :{"Min": 0.0, "Max":1.0 },
|
126 |
-
"Video-Image Subject Consistency":{"Min": 0.1462, "Max": 1.0},
|
127 |
-
"Video-Image Background Consistency":{"Min": 0.2615, "Max":1.0 },
|
128 |
-
"Subject Consistency":{"Min": 0.1462, "Max": 1.0},
|
129 |
-
"Background Consistency":{"Min": 0.2615, "Max": 1.0 },
|
130 |
-
"Motion Smoothness":{"Min": 0.7060, "Max": 0.9975},
|
131 |
-
"Dynamic Degree":{"Min": 0.0, "Max": 1.0},
|
132 |
-
"Aesthetic Quality":{"Min": 0.0, "Max": 1.0},
|
133 |
-
"Imaging Quality":{"Min": 0.0, "Max": 1.0},
|
134 |
-
"Temporal Flickering":{"Min":0.6293, "Max": 1.0}
|
135 |
-
}
|
|
|
67 |
<a href='https://huggingface.co/datasets/Howieeeee/WorldScore'><img src='https://img.shields.io/badge/WordScore-Dataset-yellow?logo=huggingface&logoColor=yellow'></a>
|
68 |
</div>
|
69 |
|
70 |
+
[Paper](https://arxiv.org/abs/2503.15669) | [Website](https://haoyi-duan.github.io/WorldScore/) | [Code](https://github.com/haoyi-duan/WorldScore) | [Dataset](https://huggingface.co/datasets/Howieeeee/WorldScore)
|
71 |
+
|
72 |
**Join Leaderboard**: Please see the [instructions](https://github.com/Vchitect/VBench/tree/master?tab=readme-ov-file#trophy-leaderboard) for 3 options to participate. One option is to follow [VBench Usage info](https://github.com/Vchitect/VBench?tab=readme-ov-file#usage), and upload the generated `result.json` file here. After clicking the `Submit here!` button, click the `Refresh` button.
|
73 |
"""
|
74 |
|
75 |
+
SUBMIT_INTRODUCTION = """# Instruction for WorldScore Leaderboard Submission
|
76 |
|
77 |
+
1. Please note that you need to obtain the file `worldscore_output/worldscore.json` by running WorldScore in Github. You may conduct an [Offline Check](https://github.com/Vchitect/VBench?tab=readme-ov-file#get-final-score-and-submit-to-leaderboard) before uploading.
|
|
|
78 |
2. Then, pack these JSON files into a `ZIP` archive, ensuring that the top-level directory of the ZIP contains the individual JSON files.
|
79 |
3. Finally, upload the ZIP archive below.
|
80 |
|
81 |
+
⚠️ Submissions that do not correctly fill in the model name and model link may be removed by the WorldScore team. The contact information you filled in will not be made public.
|
82 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|