type
stringclasses 1
value | id
stringlengths 5
122
| num_branches
int64 1
1.76k
| branches
sequencelengths 1
1.76k
| main_branch_size
int64 0
32,943B
|
---|---|---|---|---|
model | vyshnavid/emotions_mteb_qlora_2_im | 1 | [
"main"
] | 85,783,291 |
model | solidrust/Merge-Mayhem-L3-V2.1-AWQ | 1 | [
"main"
] | 5,737,143,567 |
model | Thermostatic/Llama-3-NeuralTranslate-70b-v0.3-lora | 1 | [
"main"
] | 3,313,655,765 |
model | Thermostatic/Llama-3-NeuralTranslate-70b-v0.3-checkpoints | 1 | [
"main"
] | 1,543 |
model | Greko89/NatanProfeta | 1 | [
"main"
] | 73,383,185 |
model | mradermacher/Mistral-9B-Instruct-GGUF | 1 | [
"main"
] | 90,991,102,668 |
model | ytcheng/Qwen-14B-lora-pretrain | 1 | [
"main"
] | 28,997,367 |
model | ytcheng/llama3-70b-lora-pretrain | 1 | [
"main"
] | 74,760,796 |
model | solidrust/Top-Western-Maid-7B-AWQ | 1 | [
"main"
] | 4,153,175,873 |
model | HuggingBookNew/my-meta-llama-model | 1 | [
"main"
] | 1,519 |
model | ttc0000/mistral_Progressive_Home_Homesite_text_scan_lora_r64_a128_info_extract_v2 | 1 | [
"main"
] | 1,204,686,008 |
model | yohana7/textclass | 1 | [
"main"
] | 51,960,969 |
model | solidrust/Fett-Eris-Mix-7B-AWQ | 1 | [
"main"
] | 4,153,175,852 |
model | Hrishicool/Emotion_Repo | 1 | [
"main"
] | 59,089,735 |
model | yregelman/jerry-s-only-slono-lora | 1 | [
"main"
] | 176,971,843 |
model | LoneStriker/dolphin-2.9.1-llama-3-8b-GGUF | 1 | [
"main"
] | 30,112,525,354 |
model | pszemraj/bert-L-4_H-128_A-2-OCR-quality | 1 | [
"main"
] | 20,093,538 |
model | fant92fk/LLMt | 1 | [
"main"
] | 1,519 |
model | CyberHarem/ermengarde_arknights | 1 | [
"main"
] | 1,472,641,195 |
model | solidrust/Multi-Verse-RP-7B-AWQ | 1 | [
"main"
] | 4,153,176,391 |
model | mulanai/mulan-lang-adapter | 1 | [
"main"
] | 769,211,408 |
model | solidrust/Llama3-ChatQA-1.5-8B-AWQ | 1 | [
"main"
] | 5,737,142,971 |
model | habulaj/1715396525979x979534020910776300 | 1 | [
"main"
] | 228,466,227 |
model | mradermacher/LuminRP-13B-128k-i1-GGUF | 1 | [
"main"
] | 124,203,818,121 |
model | AlignmentResearch/robust_llm_pythia-70m_niki-046_enronspam_random-token-1280_seed-0 | 31 | [
"adv-training-round-29",
"adv-training-round-28",
"adv-training-round-27",
"adv-training-round-26",
"adv-training-round-25",
"adv-training-round-24",
"adv-training-round-23",
"adv-training-round-22",
"adv-training-round-21",
"adv-training-round-20",
"adv-training-round-19",
"adv-training-round-18",
"adv-training-round-17",
"adv-training-round-16",
"adv-training-round-15",
"adv-training-round-14",
"adv-training-round-13",
"adv-training-round-12",
"adv-training-round-11",
"adv-training-round-10",
"adv-training-round-9",
"adv-training-round-8",
"adv-training-round-7",
"adv-training-round-6",
"adv-training-round-5",
"adv-training-round-4",
"adv-training-round-3",
"adv-training-round-2",
"adv-training-round-1",
"adv-training-round-0",
"main"
] | 1,519 |
model | Kitajiang/bge_finetune | 1 | [
"main"
] | 1,519 |
model | AlignmentResearch/robust_llm_pythia-70m_niki-046_enronspam_random-token-1280_seed-1 | 31 | [
"adv-training-round-29",
"adv-training-round-28",
"adv-training-round-27",
"adv-training-round-26",
"adv-training-round-25",
"adv-training-round-24",
"adv-training-round-23",
"adv-training-round-22",
"adv-training-round-21",
"adv-training-round-20",
"adv-training-round-19",
"adv-training-round-18",
"adv-training-round-17",
"adv-training-round-16",
"adv-training-round-15",
"adv-training-round-14",
"adv-training-round-13",
"adv-training-round-12",
"adv-training-round-11",
"adv-training-round-10",
"adv-training-round-9",
"adv-training-round-8",
"adv-training-round-7",
"adv-training-round-6",
"adv-training-round-5",
"adv-training-round-4",
"adv-training-round-3",
"adv-training-round-2",
"adv-training-round-1",
"adv-training-round-0",
"main"
] | 1,519 |
model | RichardErkhov/gradientai_-_Llama-3-8B-Instruct-Gradient-1048k-gguf | 1 | [
"main"
] | 99,035,570,462 |
model | hoangtrong/project_ai | 1 | [
"main"
] | 1,519 |
model | solidrust/Orthocopter_8B-AWQ | 1 | [
"main"
] | 5,737,143,544 |
model | abc88767/2c14 | 1 | [
"main"
] | 3,295,853,121 |
model | AlignmentResearch/robust_llm_pythia-410m_niki-044a_imdb_random-token-1280_seed-0 | 31 | [
"adv-training-round-29",
"adv-training-round-28",
"adv-training-round-27",
"adv-training-round-26",
"adv-training-round-25",
"adv-training-round-24",
"adv-training-round-23",
"adv-training-round-22",
"adv-training-round-21",
"adv-training-round-20",
"adv-training-round-19",
"adv-training-round-18",
"adv-training-round-17",
"adv-training-round-16",
"adv-training-round-15",
"adv-training-round-14",
"adv-training-round-13",
"adv-training-round-12",
"adv-training-round-11",
"adv-training-round-10",
"adv-training-round-9",
"adv-training-round-8",
"adv-training-round-7",
"adv-training-round-6",
"adv-training-round-5",
"adv-training-round-4",
"adv-training-round-3",
"adv-training-round-2",
"adv-training-round-1",
"adv-training-round-0",
"main"
] | 1,519 |
model | mulanai/mulan-pixart | 1 | [
"main"
] | 9,778,211,937 |
model | LoneStriker/dolphin-2.9.1-llama-3-8b-3.0bpw-h6-exl2 | 1 | [
"main"
] | 4,089,391,971 |
model | Kitajiang/bge_finetune60000 | 1 | [
"main"
] | 1,129,521,000 |
model | solidrust/dolphin-2.9.1-llama-3-8b-AWQ | 1 | [
"main"
] | 5,737,179,946 |
model | LoneStriker/dolphin-2.9.1-llama-3-8b-4.0bpw-h6-exl2 | 1 | [
"main"
] | 4,965,972,367 |
model | LoneStriker/dolphin-2.9.1-llama-3-8b-5.0bpw-h6-exl2 | 1 | [
"main"
] | 5,837,363,087 |
model | backyardai/Llama-3-Soliloquy-8B-v2-GGUF | 1 | [
"main"
] | 101,770,151,944 |
model | tkempto1/hybrid-qa-repo | 1 | [
"main"
] | 66,325 |
model | yuyuclimb/yxyllama2-v1.3 | 1 | [
"main"
] | 13,477,462,465 |
model | LoneStriker/dolphin-2.9.1-llama-3-8b-6.0bpw-h6-exl2 | 1 | [
"main"
] | 6,707,832,759 |
model | PhoenixStormJr/Glide-Megaman-NT-Warrior-RVC | 1 | [
"main"
] | 212,091,789 |
model | CultriX/CultMerge-7B-v1 | 1 | [
"main"
] | 14,485,814,535 |
model | erplegend/monica | 1 | [
"main"
] | 2,593 |
model | LoneStriker/dolphin-2.9.1-llama-3-8b-8.0bpw-h8-exl2 | 1 | [
"main"
] | 8,509,999,443 |
model | truongquangchu/my_awesome_qa_model | 1 | [
"main"
] | 266,423,056 |
model | solidrust/Buzz-8b-Large-v0.5-AWQ | 1 | [
"main"
] | 5,737,144,370 |
model | mradermacher/Meta-Llama-3-8B-Instruct-FineTuned-Aspirin-GGUF | 1 | [
"main"
] | 82,806,128,511 |
model | Ho97/n24_w2_v6_base | 1 | [
"main"
] | 26,954,228,865 |
model | north/mistral-7b-reference-instruction3 | 1 | [
"main"
] | 28,968,785,323 |
model | Ho97/n24_w2_v6_adapter | 1 | [
"main"
] | 639,699,286 |
model | zhanjun/lora-trained-xl-notion_trans | 1 | [
"main"
] | 1,440,187,967 |
model | Ho97/n24_w3_v6_base | 1 | [
"main"
] | 26,954,228,865 |
model | Ho97/n24_w3_v6_adapter | 1 | [
"main"
] | 639,699,286 |
model | Ho97/n24_w4_v6_base | 1 | [
"main"
] | 26,954,228,865 |
model | Ho97/n24_w4_v6_adapter | 1 | [
"main"
] | 639,699,286 |
model | qminh369/token-classification-llmlingua2-xlm-roberta-1k7_yte_10_epoch_paper | 1 | [
"main"
] | 2,257,590,913 |
model | Raneechu/litreviewUnsupervised | 1 | [
"main"
] | 35,929,743 |
model | thinhkosay/sentiment-bert-base-uncased | 1 | [
"main"
] | 437,970,901 |
model | kali1/ko-address-whisper-medium-processor | 1 | [
"main"
] | 2,026,386 |
model | solidrust/Mahou-1.1-llama3-8B-AWQ | 1 | [
"main"
] | 5,737,143,339 |
model | code801/test | 1 | [
"main"
] | 1,550 |
model | Litzy619/Phi0503HMA20 | 1 | [
"main"
] | 8,352,589,467 |
model | thaisonatk/phi-3-sft-full-domain | 1 | [
"main"
] | 1,519 |
model | qompass/Meta-Llama-3-8B-GGUF | 1 | [
"main"
] | 1,519 |
model | Danikdsa/Leeseo | 1 | [
"main"
] | 145,552,547 |
model | solidrust/Llama-3-8B-Base-Coder-v3.5-10k-AWQ | 1 | [
"main"
] | 5,737,142,902 |
model | solidrust/Llama-3-8b-tagalog-v1-AWQ | 1 | [
"main"
] | 5,737,144,457 |
model | SharryOG/Sharry | 1 | [
"main"
] | 86,354,755,653 |
model | AlignmentResearch/robust_llm_pythia-1b_niki-044a_imdb_random-token-1280_seed-2 | 31 | [
"adv-training-round-29",
"adv-training-round-28",
"adv-training-round-27",
"adv-training-round-26",
"adv-training-round-25",
"adv-training-round-24",
"adv-training-round-23",
"adv-training-round-22",
"adv-training-round-21",
"adv-training-round-20",
"adv-training-round-19",
"adv-training-round-18",
"adv-training-round-17",
"adv-training-round-16",
"adv-training-round-15",
"adv-training-round-14",
"adv-training-round-13",
"adv-training-round-12",
"adv-training-round-11",
"adv-training-round-10",
"adv-training-round-9",
"adv-training-round-8",
"adv-training-round-7",
"adv-training-round-6",
"adv-training-round-5",
"adv-training-round-4",
"adv-training-round-3",
"adv-training-round-2",
"adv-training-round-1",
"adv-training-round-0",
"main"
] | 1,519 |
model | ytzi/starcoder2-3b-rkt-macros-epoch-0.5 | 1 | [
"main"
] | 21,514,714 |
model | RichardErkhov/SeaLLMs_-_SeaLLM-7B-v2-8bits | 1 | [
"main"
] | 7,781,691,004 |
model | solidrust/Maverick-8B-AWQ | 1 | [
"main"
] | 5,737,144,726 |
model | osouza/gemma-portuguese-luana-2b-questoes-string | 1 | [
"main"
] | 5,034,106,393 |
model | Litzy619/Phi0503HMA21 | 1 | [
"main"
] | 8,352,589,467 |
model | solidrust/West-Hermes-7B-AWQ | 1 | [
"main"
] | 4,153,175,838 |
model | pranjaldas11/Emotion_Repo | 1 | [
"main"
] | 1,519 |
model | saaduddinM/Gemma2B_mul_large | 1 | [
"main"
] | 7,841,502 |
model | mradermacher/NeoBase_8b_v0.2-GGUF | 1 | [
"main"
] | 82,806,317,913 |
model | solidrust/Kuno-Lake-7B-AWQ | 1 | [
"main"
] | 4,153,175,824 |
model | RichardErkhov/SeaLLMs_-_SeaLLM-7B-v2-gguf | 1 | [
"main"
] | 89,455,908,039 |
model | AlignmentResearch/robust_llm_pythia-410m_niki-044a_imdb_random-token-1280_seed-1 | 31 | [
"adv-training-round-29",
"adv-training-round-28",
"adv-training-round-27",
"adv-training-round-26",
"adv-training-round-25",
"adv-training-round-24",
"adv-training-round-23",
"adv-training-round-22",
"adv-training-round-21",
"adv-training-round-20",
"adv-training-round-19",
"adv-training-round-18",
"adv-training-round-17",
"adv-training-round-16",
"adv-training-round-15",
"adv-training-round-14",
"adv-training-round-13",
"adv-training-round-12",
"adv-training-round-11",
"adv-training-round-10",
"adv-training-round-9",
"adv-training-round-8",
"adv-training-round-7",
"adv-training-round-6",
"adv-training-round-5",
"adv-training-round-4",
"adv-training-round-3",
"adv-training-round-2",
"adv-training-round-1",
"adv-training-round-0",
"main"
] | 1,519 |
model | Sardean-UChicago/FOX_Mistral-7B-Headline-QLoRA | 1 | [
"main"
] | 14,485,815,718 |
model | pmolodo/ppo-Huggy | 1 | [
"main"
] | 284,449,485 |
model | ahson17/glemma | 1 | [
"main"
] | 630,867,993 |
model | kali1/whisper-medium-1000 | 1 | [
"main"
] | 3,057,579,101 |
model | GenTrendGPT/Model-GEN-Type | 1 | [
"main"
] | 1,519 |
model | jsingh/autoflow-math-v0.4 | 1 | [
"main"
] | 13,655,915 |
model | Xuezha/Qwen-mental | 1 | [
"main"
] | 9,208,938,990 |
model | GoldoBasic/StylophoneRemake | 1 | [
"main"
] | 60,077,755 |
model | GraydientPlatformAPI/loras-may10 | 1 | [
"main"
] | 835,793,755 |
model | solidrust/Kuro-Lotus-10.7B-AWQ | 1 | [
"main"
] | 5,966,563,131 |
model | TheRealheavy/SniperAggressive | 1 | [
"main"
] | 221,214,956 |
model | thuvasooriya/malwaredet-vit | 1 | [
"main"
] | 428,830,227 |
model | ahmedgongi/Llamatokenizer_version_finale4 | 1 | [
"main"
] | 9,143,722 |
model | ahmedgongi/Llama3model_version_finale4 | 1 | [
"main"
] | 89,488,708 |
model | phannhat/CRF_Transformer_Whisper_tiny_en | 1 | [
"main"
] | 151,070,670 |
model | Marionis/Choruses | 1 | [
"main"
] | 1,519 |
model | mmnga/aixsatoshi-Ex-karakuri-8x12B-chat-v1-gguf | 1 | [
"main"
] | 976,127,231,934 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.