type
stringclasses
1 value
id
stringlengths
5
122
num_branches
int64
1
1.76k
branches
sequencelengths
1
1.76k
main_branch_size
int64
0
32,943B
model
janani4office2/akam_product_NER_mistral-7b-4bitv0.2_SFT
1
[ "main" ]
48,477,926
model
Longnt70/bert-base-uncased-issues-128
1
[ "main" ]
1,519
model
moetezsa/first_train_2NDTRY
1
[ "main" ]
337,912,537
model
CycyAI/Llama-finetuned-Farmalpaca
1
[ "main" ]
54,814,187
model
blockblockblock/neo_7b-bpw4.6-exl2
1
[ "main" ]
4,807,491,018
model
cypienai/cymist2-v01-SFT
1
[ "main" ]
14,485,818,855
model
Xerror/Mimi-Chatbot-0.2v
1
[ "main" ]
671,152,017
model
Kinopiko01/Cassie_Cage
1
[ "main" ]
148,502,541
model
Aspik101/llama13
1
[ "main" ]
16,069,719,672
model
hatakeyama-llm-team/tokenizer_65000
1
[ "main" ]
4,259,634
model
Michael791107/FirstChatbot
1
[ "main" ]
2,173
model
Ftfyhh/saiga_llama3_kto_8b-GGUF
1
[ "main" ]
5,599,295,345
model
Wouter01/diffusion_re10k_hard
1
[ "main" ]
58,281,975,302
model
karanliktavals/karanliktavas
1
[ "main" ]
1,519
model
KingMakati/Smasha
1
[ "main" ]
1,519
model
euiyulsong/mistral-7b-qlora-semi-noqg-10000
1
[ "main" ]
4,977,957,950
model
AlignmentResearch/robust_llm_pythia-160m_niki-047_wl_random-token-1280_seed-1
31
[ "adv-training-round-29", "adv-training-round-28", "adv-training-round-27", "adv-training-round-26", "adv-training-round-25", "adv-training-round-24", "adv-training-round-23", "adv-training-round-22", "adv-training-round-21", "adv-training-round-20", "adv-training-round-19", "adv-training-round-18", "adv-training-round-17", "adv-training-round-16", "adv-training-round-15", "adv-training-round-14", "adv-training-round-13", "adv-training-round-12", "adv-training-round-11", "adv-training-round-10", "adv-training-round-9", "adv-training-round-8", "adv-training-round-7", "adv-training-round-6", "adv-training-round-5", "adv-training-round-4", "adv-training-round-3", "adv-training-round-2", "adv-training-round-1", "adv-training-round-0", "main" ]
1,519
model
AlignmentResearch/robust_llm_pythia-160m_niki-047_wl_random-token-1280_seed-2
31
[ "adv-training-round-29", "adv-training-round-28", "adv-training-round-27", "adv-training-round-26", "adv-training-round-25", "adv-training-round-24", "adv-training-round-23", "adv-training-round-22", "adv-training-round-21", "adv-training-round-20", "adv-training-round-19", "adv-training-round-18", "adv-training-round-17", "adv-training-round-16", "adv-training-round-15", "adv-training-round-14", "adv-training-round-13", "adv-training-round-12", "adv-training-round-11", "adv-training-round-10", "adv-training-round-9", "adv-training-round-8", "adv-training-round-7", "adv-training-round-6", "adv-training-round-5", "adv-training-round-4", "adv-training-round-3", "adv-training-round-2", "adv-training-round-1", "adv-training-round-0", "main" ]
1,519
model
Kinopiko01/Skarlet
1
[ "main" ]
53,229,503
model
Longnt70/test
1
[ "main" ]
1,519
model
munna1825/whisper-small-practice
1
[ "main" ]
968,922,170
model
karanliktavals/deneme1
1
[ "main" ]
357,923,013
model
Mag0g/Ezekiel26_1
1
[ "main" ]
3,295,853,094
model
siddharth-magesh/yahoo-flan-t5-large-chat
1
[ "main" ]
998,269,075
model
ahsanmalik/whisper-small-hi
1
[ "main" ]
968,922,088
model
kaaang-g/noffbot-v4_01
1
[ "main" ]
60,984,073
model
blockblockblock/neo_7b-bpw4.8-exl2
1
[ "main" ]
4,988,140,718
model
AmeenAli023/mamba_text_classification_grifin
1
[ "main" ]
1,519
model
AlignmentResearch/robust_llm_pythia-410m_niki-047_wl_random-token-1280_seed-1
31
[ "adv-training-round-29", "adv-training-round-28", "adv-training-round-27", "adv-training-round-26", "adv-training-round-25", "adv-training-round-24", "adv-training-round-23", "adv-training-round-22", "adv-training-round-21", "adv-training-round-20", "adv-training-round-19", "adv-training-round-18", "adv-training-round-17", "adv-training-round-16", "adv-training-round-15", "adv-training-round-14", "adv-training-round-13", "adv-training-round-12", "adv-training-round-11", "adv-training-round-10", "adv-training-round-9", "adv-training-round-8", "adv-training-round-7", "adv-training-round-6", "adv-training-round-5", "adv-training-round-4", "adv-training-round-3", "adv-training-round-2", "adv-training-round-1", "adv-training-round-0", "main" ]
1,519
model
GenSEC-LLM/SLT-Task2-ngram-baseline
1
[ "main" ]
2,032,013
model
trudnyvozrast/MakSimV3
1
[ "main" ]
222,241,183
model
Wouter01/InstructIR_re10k_hard
1
[ "main" ]
63,629,113
model
facebook/hiera-small-224-mae-hf
1
[ "main" ]
248,518,804
model
facebook/hiera-small-224-hf
1
[ "main" ]
136,986,762
model
asadullah797/whisper-tiny-ar
1
[ "main" ]
152,997,467
model
JoseRFJunior/Llama-3-8B-AWQ-2bit
1
[ "main" ]
1,543
model
facebook/hiera-small-224-in1k-hf
1
[ "main" ]
140,132,715
model
tosanoob/test-dummy-uniformer-finetuned
1
[ "main" ]
39,454,734
model
facebook/hiera-base-224-in1k-hf
1
[ "main" ]
206,211,762
model
facebook/hiera-base-224-hf
1
[ "main" ]
203,065,214
model
facebook/hiera-base-224-mae-hf
1
[ "main" ]
314,597,954
model
vinhnx90/Phi-3-mini-128k-instruct-Q8_0-GGUF
1
[ "main" ]
4,061,224,238
model
GraydientPlatformAPI/loras-may12c
1
[ "main" ]
1,543,114,359
model
blockblockblock/neo_7b-bpw5-exl2
1
[ "main" ]
5,175,309,638
model
saeidebbei/Beriztoo
1
[ "main" ]
146,951,359
model
acbdkk/mjhq30k-model
1
[ "main" ]
250,455
model
facebook/hiera-base-plus-224-mae-hf
1
[ "main" ]
390,391,654
model
RichardErkhov/scb10x_-_typhoon-7b-4bits
1
[ "main" ]
4,506,136,263
model
PavanAddanki/santhosh
1
[ "main" ]
1,519
model
flammenai/Mahou-1.1-mistral-7B-GGUF
1
[ "main" ]
13,018,838,161
model
oscpalML/DeBERTa-political-classification
1
[ "main" ]
1,751,435,203
model
biplab2008/TestingViscosity
1
[ "main" ]
1,549,951,557
model
facebook/hiera-base-plus-224-hf
1
[ "main" ]
276,039,956
model
cjsanjay/llama-3-8B-gorilla-meraki_v1
1
[ "main" ]
167,835,066
model
RichardErkhov/scb10x_-_typhoon-7b-8bits
1
[ "main" ]
7,565,094,848
model
Coolwowsocoolwow/Infomaniac_Lego_Island
1
[ "main" ]
203,158,024
model
kaaang-g/noffbot-v4_02
1
[ "main" ]
1,519
model
kaaang-g/noffbot-v4_03
1
[ "main" ]
60,984,073
model
RichardErkhov/scb10x_-_typhoon-7b-gguf
1
[ "main" ]
87,893,610,728
model
facebook/hiera-base-plus-224-in1k-hf
1
[ "main" ]
279,698,505
model
SZ0/Slavyori
1
[ "main" ]
127,440,420
model
LordNoah/schedule-iter0-b_se1
1
[ "main" ]
28,968,791,099
model
saishf/Ortho-SOVL-8B-L3
1
[ "main" ]
16,069,723,467
model
blockblockblock/neo_7b-bpw5.5-exl2
1
[ "main" ]
5,638,461,354
model
Jaydeehhh/my-trained-llama-ccsa
1
[ "main" ]
1,523
model
mradermacher/Tess-2.0-Mixtral-8x22B-i1-GGUF
1
[ "main" ]
1,358,570,019,940
model
suzeai/model
1
[ "main" ]
1,519
model
dbands/llama-3-8b-orca-math-word-problems-instruct
1
[ "main" ]
67,860,557,451
model
MX4T/ayty
1
[ "main" ]
463,759,543
model
zainalikhokhar/mistral_VLLM_LORA
1
[ "main" ]
170,130,475
model
Hydroflu/rl_inversion
1
[ "main" ]
891,705,872
model
zainalikhokhar/mistral_VLLM_4bit
1
[ "main" ]
4,127,986,396
model
giliit/capstone_v3
1
[ "main" ]
495,598,133
model
zainalikhokhar/mistral_default
1
[ "main" ]
7,494
model
Kelvin950/trained_weigths
1
[ "main" ]
642,052,994
model
zainalikhokhar/mistral_VLLM_16bit
1
[ "main" ]
28,444,463,930
model
clxudiajazmin/t5-small-finetuned-xsum
1
[ "main" ]
1,519
model
Ola172/article_classification_modelv1.1
1
[ "main" ]
543,227,512
model
AlignmentResearch/robust_llm_pythia-31m_niki-052_imdb_gcg_seed-1
31
[ "adv-training-round-29", "adv-training-round-28", "adv-training-round-27", "adv-training-round-26", "adv-training-round-25", "adv-training-round-24", "adv-training-round-23", "adv-training-round-22", "adv-training-round-21", "adv-training-round-20", "adv-training-round-19", "adv-training-round-18", "adv-training-round-17", "adv-training-round-16", "adv-training-round-15", "adv-training-round-14", "adv-training-round-13", "adv-training-round-12", "adv-training-round-11", "adv-training-round-10", "adv-training-round-9", "adv-training-round-8", "adv-training-round-7", "adv-training-round-6", "adv-training-round-5", "adv-training-round-4", "adv-training-round-3", "adv-training-round-2", "adv-training-round-1", "adv-training-round-0", "main" ]
1,519
model
AlignmentResearch/robust_llm_pythia-31m_niki-052_imdb_gcg_seed-2
31
[ "adv-training-round-29", "adv-training-round-28", "adv-training-round-27", "adv-training-round-26", "adv-training-round-25", "adv-training-round-24", "adv-training-round-23", "adv-training-round-22", "adv-training-round-21", "adv-training-round-20", "adv-training-round-19", "adv-training-round-18", "adv-training-round-17", "adv-training-round-16", "adv-training-round-15", "adv-training-round-14", "adv-training-round-13", "adv-training-round-12", "adv-training-round-11", "adv-training-round-10", "adv-training-round-9", "adv-training-round-8", "adv-training-round-7", "adv-training-round-6", "adv-training-round-5", "adv-training-round-4", "adv-training-round-3", "adv-training-round-2", "adv-training-round-1", "adv-training-round-0", "main" ]
1,519
model
facebook/hiera-large-224-in1k-hf
1
[ "main" ]
855,106,622
model
ramirces/anomalydataset
1
[ "main" ]
2,235,639
model
abc88767/4sc23
1
[ "main" ]
3,295,853,063
model
AmeenAli023/mamba_text_classification_yelp
1
[ "main" ]
1,517,969,613
model
CoBaLD/xlm-roberta-base-cobald-parser
1
[ "main" ]
848,939,280
model
dhruv2000/my_awesome_qa_model
1
[ "main" ]
266,531,919
model
kapliff89/roberta-base-finetuned-nmrv-finetuned-ced
1
[ "main" ]
443,582,796
model
TirthankarSlg/phi3-hypogen-new
1
[ "main" ]
15,286,297,683
model
facebook/hiera-large-224-mae-hf
1
[ "main" ]
971,593,503
model
libertyfromthinking/test
1
[ "main" ]
1,519
model
bartowski/Pantheon-RP-1.0-8b-Llama-3-exl2
6
[ "3_5", "4_25", "5_0", "6_5", "8_0", "main" ]
1,790,529
model
gvlk/qa-full-model-flan-t5
1
[ "main" ]
495,197,946
model
hxy8898/OpenELM-270M-Instruct-FT
1
[ "main" ]
545,474,471
model
ucuncubayram/distilbert-emotion
1
[ "main" ]
268,798,233
model
goyobop/Apollo-34b-GGUF
1
[ "main" ]
23,646,764,709
model
blockblockblock/neo_7b-bpw6-exl2
1
[ "main" ]
6,097,386,174
model
adamo1139/Lumina-Next-T2I-quantized
1
[ "main" ]
74,464,618,768
model
JieShibo/MemVP-LLaMA-scienceqa
1
[ "main" ]
37,508,290
model
IvashinMaxim/electra128
1
[ "main" ]
181,291,916
model
MoMonir/SFR-Embedding-Mistral-GGUF
1
[ "main" ]
29,926,646,948