MINGYISU commited on
Commit
0e518fd
·
verified ·
1 Parent(s): d7e78e8

update urls for qqmm (#38)

Browse files

- add url for qqmm (5fd164df56de7d1c2a089ff2a330f779c316478f)
- fixed refresh issue (bf1a3f7ccd53bab9be62ccbfb9267ce6817e3498)

Files changed (2) hide show
  1. urls.csv +1 -0
  2. utils.py +15 -7
urls.csv CHANGED
@@ -22,3 +22,4 @@ LLaVE-0.5B,https://huggingface.co/zhibinlan/LLaVE-0.5B
22
  UniME(LLaVA-OneVision-7B-LoRA-Res336),https://huggingface.co/DeepGlint-AI/UniME-LLaVA-OneVision-7B
23
  UniME(LLaVA-1.6-7B-LoRA-LowRes),https://huggingface.co/DeepGlint-AI/UniME-LLaVA-1.6-7B
24
  UniME(Phi-3.5-V-LoRA),https://huggingface.co/DeepGlint-AI/UniME-Phi3.5-V-4.2B
 
 
22
  UniME(LLaVA-OneVision-7B-LoRA-Res336),https://huggingface.co/DeepGlint-AI/UniME-LLaVA-OneVision-7B
23
  UniME(LLaVA-1.6-7B-LoRA-LowRes),https://huggingface.co/DeepGlint-AI/UniME-LLaVA-1.6-7B
24
  UniME(Phi-3.5-V-LoRA),https://huggingface.co/DeepGlint-AI/UniME-Phi3.5-V-4.2B
25
+ QQMM-embed,https://github.com/QQ-MM/QQMM-embed
utils.py CHANGED
@@ -101,8 +101,7 @@ Github link: https://github.com/TIGER-AI-Lab/VLM2Vec. \n
101
  Please send us an email at [email protected], attaching the JSON file. We will review your submission and update the leaderboard accordingly.
102
  """
103
 
104
- MODEL_URLS = pd.read_csv("urls.csv")
105
- MODEL_URLS = dict(zip(MODEL_URLS['Models'], MODEL_URLS['URL']))
106
 
107
  def create_hyperlinked_names(df):
108
  def convert_url(url, model_name):
@@ -130,14 +129,22 @@ def create_hyperlinked_names(df):
130
  df['Models'] = df['Models'].apply(add_link_to_model_name)
131
  return df
132
 
133
- def get_df():
134
  # fetch the leaderboard data
135
- url = "https://huggingface.co/spaces/TIGER-Lab/MMEB/resolve/main/results.csv"
 
 
136
  response = requests.get(url, headers={"Authorization": f"Bearer {HF_TOKEN}"})
137
  if response.status_code != 200:
138
- import sys
139
- sys.exit(f"Error: {response.status_code}")
140
- df = pd.read_csv(io.StringIO(response.text))
 
 
 
 
 
 
141
  df.to_csv(CSV_DIR, index=False) # update local file
142
  df['Model Size(B)'] = df['Model Size(B)'].apply(process_model_size)
143
  df = df.sort_values(by=['Overall'], ascending=False)
@@ -184,6 +191,7 @@ def add_new_eval(input_file):
184
 
185
  def refresh_data():
186
  df = get_df()
 
187
  return df[COLUMN_NAMES]
188
 
189
 
 
101
  Please send us an email at [email protected], attaching the JSON file. We will review your submission and update the leaderboard accordingly.
102
  """
103
 
104
+ MODEL_URLS = get_urls("urls.csv")
 
105
 
106
  def create_hyperlinked_names(df):
107
  def convert_url(url, model_name):
 
129
  df['Models'] = df['Models'].apply(add_link_to_model_name)
130
  return df
131
 
132
+ def fetch_data(url: str) -> pd.DataFrame:
133
  # fetch the leaderboard data
134
+ if url is None:
135
+ raise ValueError("URL Not Provided")
136
+ url = f"https://huggingface.co/spaces/TIGER-Lab/MMEB/resolve/main/{url}"
137
  response = requests.get(url, headers={"Authorization": f"Bearer {HF_TOKEN}"})
138
  if response.status_code != 200:
139
+ raise requests.HTTPError(f"Failed to fetch data: HTTP status code {response.status_code}")
140
+ return pd.read_csv(io.StringIO(response.text))
141
+
142
+ def get_urls(csv: str='urls.csv') -> dict:
143
+ urls = fetch_data(csv)
144
+ return dict(zip(urls['Models'], urls['URL']))
145
+
146
+ def get_df(csv="results.csv"):
147
+ df = fetch_data(csv)
148
  df.to_csv(CSV_DIR, index=False) # update local file
149
  df['Model Size(B)'] = df['Model Size(B)'].apply(process_model_size)
150
  df = df.sort_values(by=['Overall'], ascending=False)
 
191
 
192
  def refresh_data():
193
  df = get_df()
194
+ MODEL_URLS = get_urls()
195
  return df[COLUMN_NAMES]
196
 
197