import gradio as gr import bittensor as bt import typing from bittensor.extrinsics.serving import get_metadata from dataclasses import dataclass import requests import math import os import datetime import time from dotenv import load_dotenv from huggingface_hub import HfApi from apscheduler.schedulers.background import BackgroundScheduler load_dotenv() FONT = """""" TITLE = """

Subnet 6 Leaderboard

""" IMAGE = """nousgirl""" HEADER = """

Subnet 6 is a Bittensor subnet that incentivizes the creation of the best open models by evaluating submissions on a constant stream of newly generated synthetic GPT-4 data. The models with the best head-to-head loss on the evaluation data receive a steady emission of TAO.

""" EVALUATION_DETAILS = """Name is the 🤗 Hugging Face model name (click to go to the model card). Rewards / Day are the expected rewards per day for each model. Perplexity is represents the loss on all of the evaluation data for the model as calculated by the validator (lower is better). UID is the Bittensor user id of the submitter. Block is the Bittensor block that the model was submitted in. More stats on taostats.""" EVALUATION_HEADER = """

Shows the latest internal evaluation statistics as calculated by a validator run by Nous Research

""" H4_TOKEN = os.environ.get("H4_TOKEN", None) API = HfApi(token=H4_TOKEN) REPO_ID = "winglian/finetuning_subnet_leaderboard" METAGRAPH_RETRIES = 5 METAGRAPH_DELAY_SECS = 3 NETUID = 6 SUBNET_START_BLOCK = 2225782 SECONDS_PER_BLOCK = 12 def get_subtensor_and_metagraph() -> typing.Tuple[bt.subtensor, bt.metagraph]: for i in range(0, METAGRAPH_RETRIES): try: subtensor: bt.subtensor = bt.subtensor("finney") metagraph: bt.metagraph = subtensor.metagraph(6, lite=False) return subtensor, metagraph except: if i == METAGRAPH_RETRIES - 1: raise time.sleep(METAGRAPH_DELAY_SECS) raise RuntimeError() @dataclass class ModelData: uid: int hotkey: str namespace: str name: str commit: str hash: str block: int incentive: float emission: float @classmethod def from_compressed_str(cls, uid: int, hotkey: str, cs: str, block: int, incentive: float, emission: float): """Returns an instance of this class from a compressed string representation""" tokens = cs.split(":") return ModelData( uid=uid, hotkey=hotkey, namespace=tokens[0], name=tokens[1], commit=tokens[2] if tokens[2] != "None" else None, hash=tokens[3] if tokens[3] != "None" else None, block=block, incentive=incentive, emission=emission ) def get_tao_price() -> float: for i in range(0, METAGRAPH_RETRIES): try: return float(requests.get("https://api.kucoin.com/api/v1/market/stats?symbol=TAO-USDT").json()["data"]["last"]) except: if i == METAGRAPH_RETRIES - 1: raise time.sleep(METAGRAPH_DELAY_SECS) raise RuntimeError() def get_validator_weights(metagraph: bt.metagraph) -> typing.Dict[int, typing.Tuple[float, int, typing.Dict[int, float]]]: ret = {} for uid in metagraph.uids.tolist(): vtrust = metagraph.validator_trust[uid].item() if vtrust > 0: ret[uid] = (vtrust, metagraph.S[uid].item(), {}) for ouid in metagraph.uids.tolist(): if ouid == uid: continue weight = round(metagraph.weights[uid][ouid].item(), 4) if weight > 0: ret[uid][-1][ouid] = weight return ret def get_subnet_data(subtensor: bt.subtensor, metagraph: bt.metagraph) -> typing.List[ModelData]: result = [] for uid in metagraph.uids.tolist(): hotkey = metagraph.hotkeys[uid] metadata = get_metadata(subtensor, metagraph.netuid, hotkey) if not metadata: continue commitment = metadata["info"]["fields"][0] hex_data = commitment[list(commitment.keys())[0]][2:] chain_str = bytes.fromhex(hex_data).decode() block = metadata["block"] incentive = metagraph.incentive[uid].nan_to_num().item() emission = metagraph.emission[uid].nan_to_num().item() * 20 # convert to daily TAO model_data = None try: model_data = ModelData.from_compressed_str(uid, hotkey, chain_str, block, incentive, emission) except: continue result.append(model_data) return result def floatable(x) -> bool: return (isinstance(x, float) and not math.isnan(x) and not math.isinf(x)) or isinstance(x, int) def get_float_score(key: str, history) -> typing.Tuple[typing.Optional[float], bool]: if key in history: data = list(history[key]) if len(data) > 0: if floatable(data[-1]): return float(data[-1]), True else: data = [float(x) for x in data if floatable(x)] if len(data) > 0: return float(data[-1]), False return None, False def get_sample(uid, history) -> typing.Optional[typing.Tuple[str, str]]: prompt_key = f"sample_prompt_data.{uid}" response_key = f"sample_response_data.{uid}" if prompt_key and response_key in history: prompt = list(history[prompt_key])[-1] response = list(history[response_key])[-1] if isinstance(prompt, str) and isinstance(response, str): return prompt, response return None def next_tempo(start_block, tempo, block): start_num = start_block + tempo intervals = (block - start_num) // tempo nearest_num = start_num + ((intervals + 1) * tempo) return nearest_num subtensor, metagraph = get_subtensor_and_metagraph() tao_price = get_tao_price() leaderboard_df = get_subnet_data(subtensor, metagraph) leaderboard_df.sort(key=lambda x: x.incentive, reverse=True) current_block = metagraph.block.item() next_update = next_tempo( SUBNET_START_BLOCK, subtensor.get_subnet_hyperparameters(NETUID).tempo, current_block ) blocks_to_go = next_update - current_block current_time = datetime.datetime.now() next_update_time = current_time + datetime.timedelta(seconds=blocks_to_go * SECONDS_PER_BLOCK) validator_df = get_validator_weights(metagraph) weight_keys = set() for uid, stats in validator_df.items(): weight_keys.update(stats[-1].keys()) def get_next_update(): now = datetime.datetime.now() delta = next_update_time - now return f"""
Next reward update: {blocks_to_go} blocks (~{int(delta.total_seconds() // 60)} minutes)
""" demo = gr.Blocks(css=".typewriter {font-family: 'JMH Typewriter', sans-serif;}") with demo: gr.HTML(FONT) gr.HTML(TITLE) gr.HTML(IMAGE) gr.HTML(HEADER) gr.HTML(value=get_next_update()) gr.Label( value={ f"{c.namespace}/{c.name} ({c.commit[0:8]}) · ${round(c.emission * tao_price, 2):,} (τ{round(c.emission, 2):,})": c.incentive for c in leaderboard_df if c.incentive}, num_top_classes=10, ) with gr.Accordion("Validator Stats"): validator_table = gr.components.Dataframe( value=[ [uid, int(validator_df[uid][1]), round(validator_df[uid][0], 4)] + [validator_df[uid][-1].get(c.uid) for c in leaderboard_df if c.incentive] for uid, _ in sorted( zip(validator_df.keys(), [validator_df[x][1] for x in validator_df.keys()]), key=lambda x: x[1], reverse=True ) ], headers=["UID", "Stake (τ)", "V-Trust"] + [f"{c.namespace}/{c.name} ({c.commit[0:8]})" for c in leaderboard_df if c.incentive], datatype=["number", "number", "number"] + ["number" for c in leaderboard_df if c.incentive], interactive=False, visible=True, ) def restart_space(): API.restart_space(repo_id=REPO_ID, token=H4_TOKEN) scheduler = BackgroundScheduler() scheduler.add_job(restart_space, "interval", seconds=60 * 10) # restart every 10 minutes scheduler.start() demo.launch()