from huggingface_hub import HfApi, HfFileSystem import re from tqdm import tqdm import concurrent.futures import gradio as gr import datetime import pandas as pd import os import threading import time HF_TOKEN = os.getenv('HF_TOKEN') api = HfApi() fs = HfFileSystem() def restart_space(): time.sleep(36000) api.restart_space(repo_id="Tanvir1337/lonestriker-quantized-models", token=HF_TOKEN) text = f""" 🎯 The Leaderboard aims to track lonestriker's exl2 quantized models. ## 🛠️ Backend The leaderboard's backend mainly runs on the [Hugging Face Hub API](https://huggingface.co/docs/huggingface_hub/v0.5.1/en/package_reference/hf_api). ## 🔍 Searching You can search for author or a specific model using the search bar. ## ⌛ Last Update This space is last updated in **{str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"))}**. ## 📒 Important Note This space potentially includes incorrectly quantized models for a model. If you find any incorrectly quantized model, please report it to me. """ quant_models = [i.__dict__['id'] for i in api.list_models(author="lonestriker") if "exl2" in i.__dict__['id']] pattern = r'\(https://huggingface\.co/([^/]+)/([^/]+)\)' liste = {} def process_model(i, pattern, liste): text = fs.read_text(i + "/README.md") matches = re.search(pattern, text) if matches: author = matches.group(1) model_name = matches.group(2) full_id = (author + "/" + model_name).split(")")[0] try: liste[full_id].append(i) except KeyError: liste[full_id] = [i] num_threads = 64 with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor: futures = [] for i in quant_models: future = executor.submit(process_model, i, pattern, liste) futures.append(future) concurrent.futures.wait(futures) authors, models, exl2 = [], [], [] for model, values in liste.items(): models.append(model) exl2_value = None for value in values: if "-exl2" in value: exl2_value = value authors.append(model.split('/')[0]) exl2.append(exl2_value) df = pd.DataFrame({'👤 Author Name': authors, '🤖 Model Name': models, '📥 EXL2': exl2}) def search(search_text): if not search_text: return df if len(search_text.split('/'))>1: return df[df['🤖 Model Name'] == clickable(search_text)] else: return df[df['👤 Author Name'] == clickable(search_text)] def clickable(x): return None if not x else f'{x}' def to_clickable(df): for column in list(df.columns): df[column] = df[column].apply(lambda x: clickable(x)) return df with gr.Blocks() as demo: gr.Markdown("""