lixuejing commited on
Commit
271401b
·
1 Parent(s): df8d09e
Files changed (3) hide show
  1. app.py +4 -2
  2. src/envs.py +1 -0
  3. src/submission/submit.py +23 -2
app.py CHANGED
@@ -26,7 +26,7 @@ from src.display.utils import (
26
  Precision,
27
  NUMERIC_INTERVALS
28
  )
29
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, DYNAMIC_INFO_REPO, DYNAMIC_INFO_FILE_PATH, DYNAMIC_INFO_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
30
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
31
  from src.submission.submit import add_new_eval
32
  from src.scripts.update_all_request_files import update_dynamic_files
@@ -67,7 +67,7 @@ def init_space():
67
  cols=COLS,
68
  benchmark_cols=BENCHMARK_COLS
69
  )
70
- #update_collections(original_df.copy())
71
  leaderboard_df = original_df.copy()
72
 
73
  plot_df = create_plot_df(create_scores_df(raw_data))
@@ -421,6 +421,7 @@ with demo:
421
  model_name_textbox = gr.Textbox(label="Model name")
422
  model_url_textbox = gr.Textbox(label="Model online api url")
423
  revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
 
424
  model_type = gr.Dropdown(
425
  choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
426
  label="Model type",
@@ -458,6 +459,7 @@ with demo:
458
  base_model_name_textbox,
459
  revision_name_textbox,
460
  precision,
 
461
  weight_type,
462
  model_type,
463
  ],
 
26
  Precision,
27
  NUMERIC_INTERVALS
28
  )
29
+ from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, DYNAMIC_INFO_REPO, DYNAMIC_INFO_FILE_PATH, DYNAMIC_INFO_PATH, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
30
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
31
  from src.submission.submit import add_new_eval
32
  from src.scripts.update_all_request_files import update_dynamic_files
 
67
  cols=COLS,
68
  benchmark_cols=BENCHMARK_COLS
69
  )
70
+ update_collections(original_df.copy())
71
  leaderboard_df = original_df.copy()
72
 
73
  plot_df = create_plot_df(create_scores_df(raw_data))
 
421
  model_name_textbox = gr.Textbox(label="Model name")
422
  model_url_textbox = gr.Textbox(label="Model online api url")
423
  revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
424
+ private = gr.Checkbox(False, label="Private", visible=not IS_PUBLIC)
425
  model_type = gr.Dropdown(
426
  choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
427
  label="Model type",
 
459
  base_model_name_textbox,
460
  revision_name_textbox,
461
  precision,
462
+ private,
463
  weight_type,
464
  model_type,
465
  ],
src/envs.py CHANGED
@@ -20,6 +20,7 @@ QUEUE_REPO = "open-cn-llm-leaderboard/vlm_requests"
20
  DYNAMIC_INFO_REPO = "open-cn-llm-leaderboard/vlm_dynamic_model_information"
21
  RESULTS_REPO = "open-cn-llm-leaderboard/vlm_results"
22
 
 
23
  # If you setup a cache later, just change HF_HOME
24
  CACHE_PATH=os.getenv("HF_HOME", ".")
25
 
 
20
  DYNAMIC_INFO_REPO = "open-cn-llm-leaderboard/vlm_dynamic_model_information"
21
  RESULTS_REPO = "open-cn-llm-leaderboard/vlm_results"
22
 
23
+ IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True))
24
  # If you setup a cache later, just change HF_HOME
25
  CACHE_PATH=os.getenv("HF_HOME", ".")
26
 
src/submission/submit.py CHANGED
@@ -3,7 +3,7 @@ import os
3
  from datetime import datetime, timezone
4
 
5
  from src.display.formatting import styled_error, styled_message, styled_warning
6
- from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
7
  from src.submission.check_validity import (
8
  already_submitted_models,
9
  check_model_card,
@@ -21,6 +21,7 @@ def add_new_eval(
21
  base_model: str,
22
  revision: str,
23
  precision: str,
 
24
  weight_type: str,
25
  model_type: str,
26
  ):
@@ -84,6 +85,7 @@ def add_new_eval(
84
  "base_model": base_model,
85
  "revision": revision,
86
  "precision": precision,
 
87
  "weight_type": weight_type,
88
  "status": "PENDING",
89
  "submitted_time": current_time,
@@ -113,7 +115,26 @@ def add_new_eval(
113
  repo_id=QUEUE_REPO,
114
  repo_type="dataset",
115
  commit_message=f"Add {model} to eval queue",
116
- create_pr=1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  )
118
 
119
  # Remove the local file
 
3
  from datetime import datetime, timezone
4
 
5
  from src.display.formatting import styled_error, styled_message, styled_warning
6
+ from src.envs import API, EVAL_REQUESTS_PATH, DYNAMIC_INFO_PATH, DYNAMIC_INFO_FILE_PATH, DYNAMIC_INFO_REPO, TOKEN, QUEUE_REPO, RATE_LIMIT_PERIOD, RATE_LIMIT_QUOTA
7
  from src.submission.check_validity import (
8
  already_submitted_models,
9
  check_model_card,
 
21
  base_model: str,
22
  revision: str,
23
  precision: str,
24
+ private: str,
25
  weight_type: str,
26
  model_type: str,
27
  ):
 
85
  "base_model": base_model,
86
  "revision": revision,
87
  "precision": precision,
88
+ "private": private,
89
  "weight_type": weight_type,
90
  "status": "PENDING",
91
  "submitted_time": current_time,
 
115
  repo_id=QUEUE_REPO,
116
  repo_type="dataset",
117
  commit_message=f"Add {model} to eval queue",
118
+ )
119
+
120
+ # We want to grab the latest version of the submission file to not accidentally overwrite it
121
+ snapshot_download(
122
+ repo_id=DYNAMIC_INFO_REPO, local_dir=DYNAMIC_INFO_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
123
+ )
124
+
125
+ with open(DYNAMIC_INFO_FILE_PATH) as f:
126
+ all_supplementary_info = json.load(f)
127
+
128
+ all_supplementary_info[model] = supplementary_info
129
+ with open(DYNAMIC_INFO_FILE_PATH, "w") as f:
130
+ json.dump(all_supplementary_info, f, indent=2)
131
+
132
+ API.upload_file(
133
+ path_or_fileobj=DYNAMIC_INFO_FILE_PATH,
134
+ path_in_repo=DYNAMIC_INFO_FILE_PATH.split("/")[-1],
135
+ repo_id=DYNAMIC_INFO_REPO,
136
+ repo_type="dataset",
137
+ commit_message=f"Add {model} to dynamic info queue",
138
  )
139
 
140
  # Remove the local file