emozilla commited on
Commit
1e5dafb
·
1 Parent(s): 1684477

increase backoff time for subtensor errors

Browse files
Files changed (1) hide show
  1. app.py +12 -8
app.py CHANGED
@@ -25,8 +25,8 @@ VALIDATOR_WANDB_PROJECT = os.environ["VALIDATOR_WANDB_PROJECT"]
25
  H4_TOKEN = os.environ.get("H4_TOKEN", None)
26
  API = HfApi(token=H4_TOKEN)
27
  REPO_ID = "NousResearch/finetuning_subnet_leaderboard"
28
- METAGRAPH_RETRIES = 5
29
- METAGRAPH_DELAY_SECS = 3
30
  NETUID = 6
31
  SUBNET_START_BLOCK = 2225782
32
  SECONDS_PER_BLOCK = 12
@@ -134,14 +134,16 @@ def get_float_score(key: str, history) -> typing.Tuple[typing.Optional[float], b
134
  return float(data[-1]), False
135
  return None, False
136
 
137
- def get_sample(uid, history) -> typing.Optional[typing.Tuple[str, str]]:
138
  prompt_key = f"sample_prompt_data.{uid}"
139
  response_key = f"sample_response_data.{uid}"
140
- if prompt_key and response_key in history:
 
141
  prompt = list(history[prompt_key])[-1]
142
  response = list(history[response_key])[-1]
143
- if isinstance(prompt, str) and isinstance(response, str):
144
- return prompt, response
 
145
  return None
146
 
147
  def get_scores(uids: typing.List[int]) -> typing.Dict[int, typing.Dict[str, typing.Optional[float | str]]]:
@@ -248,8 +250,10 @@ with demo:
248
  for entry in leaderboard_df:
249
  sample = scores[entry.uid]["sample"]
250
  if sample is not None:
251
- with gr.Tab(f"{entry.namespace}/{entry.name} ({entry.commit[0:8]})"):
252
- gr.Chatbot([sample])
 
 
253
 
254
  show_stale = gr.Checkbox(label="Show Stale", interactive=True)
255
  leaderboard_table = gr.components.Dataframe(
 
25
  H4_TOKEN = os.environ.get("H4_TOKEN", None)
26
  API = HfApi(token=H4_TOKEN)
27
  REPO_ID = "NousResearch/finetuning_subnet_leaderboard"
28
+ METAGRAPH_RETRIES = 10
29
+ METAGRAPH_DELAY_SECS = 30
30
  NETUID = 6
31
  SUBNET_START_BLOCK = 2225782
32
  SECONDS_PER_BLOCK = 12
 
134
  return float(data[-1]), False
135
  return None, False
136
 
137
+ def get_sample(uid, history) -> typing.Optional[typing.Tuple[str, str, str]]:
138
  prompt_key = f"sample_prompt_data.{uid}"
139
  response_key = f"sample_response_data.{uid}"
140
+ truth_key = f"sample_truth_data.{uid}"
141
+ if prompt_key in history and response_key in history and truth_key in history:
142
  prompt = list(history[prompt_key])[-1]
143
  response = list(history[response_key])[-1]
144
+ truth = list(history[truth_key])[-1]
145
+ if isinstance(prompt, str) and isinstance(response, str) and isinstance(truth, str):
146
+ return prompt, response, truth
147
  return None
148
 
149
  def get_scores(uids: typing.List[int]) -> typing.Dict[int, typing.Dict[str, typing.Optional[float | str]]]:
 
250
  for entry in leaderboard_df:
251
  sample = scores[entry.uid]["sample"]
252
  if sample is not None:
253
+ name = f"{entry.namespace}/{entry.name} ({entry.commit[0:8]})"
254
+ with gr.Tab(name):
255
+ gr.Chatbot([(sample[0], sample[1])])
256
+ # gr.Chatbot([(sample[0], f"*{name}*: {sample[1]}"), (None, f"*GPT-4*: {sample[2]}")])
257
 
258
  show_stale = gr.Checkbox(label="Show Stale", interactive=True)
259
  leaderboard_table = gr.components.Dataframe(