pipeline_tag
stringclasses
48 values
library_name
stringclasses
198 values
text
stringlengths
1
900k
metadata
stringlengths
2
438k
id
stringlengths
5
122
last_modified
null
tags
sequencelengths
1
1.84k
sha
null
created_at
stringlengths
25
25
arxiv
sequencelengths
0
201
languages
sequencelengths
0
1.83k
tags_str
stringlengths
17
9.34k
text_str
stringlengths
0
389k
text_lists
sequencelengths
0
722
processed_texts
sequencelengths
1
723
tokens_length
sequencelengths
1
723
input_texts
sequencelengths
1
1
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
lunarsylph/stablecell_v55
null
[ "transformers", "safetensors", "stablelm", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:02:13+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 41, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_prom_prom_300_all-seqsight_32768_512_30M-L1_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_prom_prom_300_all](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_prom_prom_300_all) dataset. It achieves the following results on the evaluation set: - Loss: 0.2233 - F1 Score: 0.9091 - Accuracy: 0.9091 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.4264 | 0.54 | 200 | 0.2903 | 0.8858 | 0.8858 | | 0.3006 | 1.08 | 400 | 0.2596 | 0.8981 | 0.8981 | | 0.2764 | 1.62 | 600 | 0.2417 | 0.9047 | 0.9047 | | 0.2554 | 2.16 | 800 | 0.2420 | 0.9067 | 0.9068 | | 0.2497 | 2.7 | 1000 | 0.2326 | 0.9073 | 0.9073 | | 0.242 | 3.24 | 1200 | 0.2334 | 0.9061 | 0.9061 | | 0.2433 | 3.78 | 1400 | 0.2248 | 0.9108 | 0.9108 | | 0.2411 | 4.32 | 1600 | 0.2225 | 0.9118 | 0.9118 | | 0.2329 | 4.86 | 1800 | 0.2215 | 0.9115 | 0.9115 | | 0.2302 | 5.41 | 2000 | 0.2211 | 0.9132 | 0.9132 | | 0.2306 | 5.95 | 2200 | 0.2171 | 0.9127 | 0.9127 | | 0.2304 | 6.49 | 2400 | 0.2172 | 0.9135 | 0.9135 | | 0.2293 | 7.03 | 2600 | 0.2164 | 0.9130 | 0.9130 | | 0.2251 | 7.57 | 2800 | 0.2149 | 0.9120 | 0.9120 | | 0.2249 | 8.11 | 3000 | 0.2143 | 0.9135 | 0.9135 | | 0.2238 | 8.65 | 3200 | 0.2129 | 0.9128 | 0.9128 | | 0.2175 | 9.19 | 3400 | 0.2118 | 0.9147 | 0.9147 | | 0.2172 | 9.73 | 3600 | 0.2080 | 0.9139 | 0.9139 | | 0.224 | 10.27 | 3800 | 0.2063 | 0.9144 | 0.9144 | | 0.2162 | 10.81 | 4000 | 0.2058 | 0.9160 | 0.9160 | | 0.218 | 11.35 | 4200 | 0.2037 | 0.9186 | 0.9186 | | 0.2158 | 11.89 | 4400 | 0.2046 | 0.9171 | 0.9171 | | 0.2118 | 12.43 | 4600 | 0.2055 | 0.9157 | 0.9157 | | 0.2142 | 12.97 | 4800 | 0.2031 | 0.9182 | 0.9182 | | 0.2116 | 13.51 | 5000 | 0.2036 | 0.9191 | 0.9191 | | 0.2147 | 14.05 | 5200 | 0.2027 | 0.9179 | 0.9179 | | 0.2106 | 14.59 | 5400 | 0.2012 | 0.9193 | 0.9193 | | 0.2094 | 15.14 | 5600 | 0.1992 | 0.9203 | 0.9203 | | 0.2089 | 15.68 | 5800 | 0.2003 | 0.9179 | 0.9179 | | 0.2124 | 16.22 | 6000 | 0.1985 | 0.9198 | 0.9198 | | 0.2083 | 16.76 | 6200 | 0.1997 | 0.9208 | 0.9208 | | 0.2121 | 17.3 | 6400 | 0.1997 | 0.9191 | 0.9191 | | 0.2094 | 17.84 | 6600 | 0.1996 | 0.9193 | 0.9193 | | 0.2024 | 18.38 | 6800 | 0.1999 | 0.9201 | 0.9201 | | 0.2116 | 18.92 | 7000 | 0.1975 | 0.9196 | 0.9196 | | 0.2087 | 19.46 | 7200 | 0.1978 | 0.9211 | 0.9211 | | 0.2052 | 20.0 | 7400 | 0.1964 | 0.9230 | 0.9230 | | 0.2071 | 20.54 | 7600 | 0.1988 | 0.9209 | 0.9209 | | 0.204 | 21.08 | 7800 | 0.1966 | 0.9213 | 0.9213 | | 0.2057 | 21.62 | 8000 | 0.1982 | 0.9211 | 0.9211 | | 0.2051 | 22.16 | 8200 | 0.1969 | 0.9209 | 0.9209 | | 0.1994 | 22.7 | 8400 | 0.1984 | 0.9223 | 0.9223 | | 0.2098 | 23.24 | 8600 | 0.1964 | 0.9211 | 0.9211 | | 0.2024 | 23.78 | 8800 | 0.1972 | 0.9213 | 0.9213 | | 0.2056 | 24.32 | 9000 | 0.1976 | 0.9206 | 0.9206 | | 0.2018 | 24.86 | 9200 | 0.1981 | 0.9213 | 0.9213 | | 0.2014 | 25.41 | 9400 | 0.1974 | 0.9216 | 0.9216 | | 0.2022 | 25.95 | 9600 | 0.1972 | 0.9221 | 0.9221 | | 0.2029 | 26.49 | 9800 | 0.1961 | 0.9218 | 0.9218 | | 0.203 | 27.03 | 10000 | 0.1966 | 0.9221 | 0.9221 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_prom_prom_300_all-seqsight_32768_512_30M-L1_f", "results": []}]}
mahdibaghbanzadeh/GUE_prom_prom_300_all-seqsight_32768_512_30M-L1_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:03:04+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_prom\_prom\_300\_all-seqsight\_32768\_512\_30M-L1\_f ========================================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_prom\_prom\_300\_all dataset. It achieves the following results on the evaluation set: * Loss: 0.2233 * F1 Score: 0.9091 * Accuracy: 0.9091 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_prom_prom_core_tata-seqsight_32768_512_30M-L32_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_prom_prom_core_tata](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_prom_prom_core_tata) dataset. It achieves the following results on the evaluation set: - Loss: 0.6278 - F1 Score: 0.8418 - Accuracy: 0.8418 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:------:|:-----:|:---------------:|:--------:|:--------:| | 0.5627 | 5.13 | 200 | 0.5373 | 0.7354 | 0.7357 | | 0.4667 | 10.26 | 400 | 0.5145 | 0.7635 | 0.7667 | | 0.41 | 15.38 | 600 | 0.4455 | 0.8053 | 0.8059 | | 0.3673 | 20.51 | 800 | 0.4164 | 0.8221 | 0.8222 | | 0.3324 | 25.64 | 1000 | 0.4136 | 0.8270 | 0.8271 | | 0.3039 | 30.77 | 1200 | 0.3964 | 0.8417 | 0.8418 | | 0.2827 | 35.9 | 1400 | 0.3928 | 0.8434 | 0.8434 | | 0.2594 | 41.03 | 1600 | 0.3932 | 0.8332 | 0.8336 | | 0.2454 | 46.15 | 1800 | 0.3887 | 0.8467 | 0.8467 | | 0.2301 | 51.28 | 2000 | 0.4146 | 0.8483 | 0.8483 | | 0.2181 | 56.41 | 2200 | 0.4038 | 0.8434 | 0.8434 | | 0.2026 | 61.54 | 2400 | 0.4016 | 0.8434 | 0.8434 | | 0.1905 | 66.67 | 2600 | 0.4172 | 0.8482 | 0.8483 | | 0.1809 | 71.79 | 2800 | 0.4441 | 0.8385 | 0.8385 | | 0.1741 | 76.92 | 3000 | 0.4264 | 0.8465 | 0.8467 | | 0.1585 | 82.05 | 3200 | 0.4547 | 0.8367 | 0.8369 | | 0.1519 | 87.18 | 3400 | 0.5098 | 0.8342 | 0.8352 | | 0.1542 | 92.31 | 3600 | 0.4655 | 0.8597 | 0.8597 | | 0.1399 | 97.44 | 3800 | 0.4824 | 0.8515 | 0.8515 | | 0.1333 | 102.56 | 4000 | 0.4525 | 0.8564 | 0.8564 | | 0.1288 | 107.69 | 4200 | 0.4617 | 0.8564 | 0.8564 | | 0.125 | 112.82 | 4400 | 0.5046 | 0.8499 | 0.8499 | | 0.1209 | 117.95 | 4600 | 0.4963 | 0.8532 | 0.8532 | | 0.1125 | 123.08 | 4800 | 0.5106 | 0.8481 | 0.8483 | | 0.1095 | 128.21 | 5000 | 0.5352 | 0.8563 | 0.8564 | | 0.1101 | 133.33 | 5200 | 0.5064 | 0.8630 | 0.8630 | | 0.1038 | 138.46 | 5400 | 0.5283 | 0.8548 | 0.8548 | | 0.1004 | 143.59 | 5600 | 0.5361 | 0.8515 | 0.8515 | | 0.0947 | 148.72 | 5800 | 0.5382 | 0.8548 | 0.8548 | | 0.0909 | 153.85 | 6000 | 0.5442 | 0.8466 | 0.8467 | | 0.0932 | 158.97 | 6200 | 0.5373 | 0.8564 | 0.8564 | | 0.0874 | 164.1 | 6400 | 0.5438 | 0.8532 | 0.8532 | | 0.0867 | 169.23 | 6600 | 0.5365 | 0.8630 | 0.8630 | | 0.0862 | 174.36 | 6800 | 0.5783 | 0.8482 | 0.8483 | | 0.0791 | 179.49 | 7000 | 0.5920 | 0.8564 | 0.8564 | | 0.0822 | 184.62 | 7200 | 0.5735 | 0.8581 | 0.8581 | | 0.0794 | 189.74 | 7400 | 0.5573 | 0.8613 | 0.8613 | | 0.0752 | 194.87 | 7600 | 0.5611 | 0.8581 | 0.8581 | | 0.0731 | 200.0 | 7800 | 0.5884 | 0.8564 | 0.8564 | | 0.0727 | 205.13 | 8000 | 0.5914 | 0.8531 | 0.8532 | | 0.0719 | 210.26 | 8200 | 0.5868 | 0.8515 | 0.8515 | | 0.0703 | 215.38 | 8400 | 0.5914 | 0.8548 | 0.8548 | | 0.0686 | 220.51 | 8600 | 0.5958 | 0.8564 | 0.8564 | | 0.0716 | 225.64 | 8800 | 0.6047 | 0.8515 | 0.8515 | | 0.0723 | 230.77 | 9000 | 0.5930 | 0.8564 | 0.8564 | | 0.0646 | 235.9 | 9200 | 0.6007 | 0.8597 | 0.8597 | | 0.0678 | 241.03 | 9400 | 0.5928 | 0.8548 | 0.8548 | | 0.0662 | 246.15 | 9600 | 0.5984 | 0.8581 | 0.8581 | | 0.0668 | 251.28 | 9800 | 0.5998 | 0.8581 | 0.8581 | | 0.0669 | 256.41 | 10000 | 0.5949 | 0.8515 | 0.8515 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_prom_prom_core_tata-seqsight_32768_512_30M-L32_f", "results": []}]}
mahdibaghbanzadeh/GUE_prom_prom_core_tata-seqsight_32768_512_30M-L32_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:03:04+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_prom\_prom\_core\_tata-seqsight\_32768\_512\_30M-L32\_f ============================================================ This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_prom\_prom\_core\_tata dataset. It achieves the following results on the evaluation set: * Loss: 0.6278 * F1 Score: 0.8418 * Accuracy: 0.8418 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # selfbiorag-7b-dpo-full-sft-wo-healthsearch_qa This model is a fine-tuned version of [Minbyul/selfbiorag-7b-wo-healthsearch_qa-sft](https://huggingface.co/Minbyul/selfbiorag-7b-wo-healthsearch_qa-sft) on the HuggingFaceH4/ultrafeedback_binarized dataset. It achieves the following results on the evaluation set: - Loss: 0.4465 - Rewards/chosen: -0.5929 - Rewards/rejected: -1.6772 - Rewards/accuracies: 0.7846 - Rewards/margins: 1.0843 - Logps/rejected: -1480.8429 - Logps/chosen: -984.8102 - Logits/rejected: -3.4642 - Logits/chosen: -2.6475 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 4 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - total_eval_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.0.dev0 - Pytorch 2.1.2 - Datasets 2.14.6 - Tokenizers 0.15.2
{"tags": ["alignment-handbook", "trl", "dpo", "generated_from_trainer", "trl", "dpo", "generated_from_trainer"], "datasets": ["HuggingFaceH4/ultrafeedback_binarized"], "base_model": "Minbyul/selfbiorag-7b-wo-healthsearch_qa-sft", "model-index": [{"name": "selfbiorag-7b-dpo-full-sft-wo-healthsearch_qa", "results": []}]}
Minbyul/selfbiorag-7b-dpo-full-sft-wo-healthsearch_qa
null
[ "transformers", "safetensors", "llama", "text-generation", "alignment-handbook", "trl", "dpo", "generated_from_trainer", "dataset:HuggingFaceH4/ultrafeedback_binarized", "base_model:Minbyul/selfbiorag-7b-wo-healthsearch_qa-sft", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T04:03:26+00:00
[]
[]
TAGS #transformers #safetensors #llama #text-generation #alignment-handbook #trl #dpo #generated_from_trainer #dataset-HuggingFaceH4/ultrafeedback_binarized #base_model-Minbyul/selfbiorag-7b-wo-healthsearch_qa-sft #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# selfbiorag-7b-dpo-full-sft-wo-healthsearch_qa This model is a fine-tuned version of Minbyul/selfbiorag-7b-wo-healthsearch_qa-sft on the HuggingFaceH4/ultrafeedback_binarized dataset. It achieves the following results on the evaluation set: - Loss: 0.4465 - Rewards/chosen: -0.5929 - Rewards/rejected: -1.6772 - Rewards/accuracies: 0.7846 - Rewards/margins: 1.0843 - Logps/rejected: -1480.8429 - Logps/chosen: -984.8102 - Logits/rejected: -3.4642 - Logits/chosen: -2.6475 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 4 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - total_eval_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.0.dev0 - Pytorch 2.1.2 - Datasets 2.14.6 - Tokenizers 0.15.2
[ "# selfbiorag-7b-dpo-full-sft-wo-healthsearch_qa\n\nThis model is a fine-tuned version of Minbyul/selfbiorag-7b-wo-healthsearch_qa-sft on the HuggingFaceH4/ultrafeedback_binarized dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.4465\n- Rewards/chosen: -0.5929\n- Rewards/rejected: -1.6772\n- Rewards/accuracies: 0.7846\n- Rewards/margins: 1.0843\n- Logps/rejected: -1480.8429\n- Logps/chosen: -984.8102\n- Logits/rejected: -3.4642\n- Logits/chosen: -2.6475", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-06\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 4\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 64\n- total_eval_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.1\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.0.dev0\n- Pytorch 2.1.2\n- Datasets 2.14.6\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #alignment-handbook #trl #dpo #generated_from_trainer #dataset-HuggingFaceH4/ultrafeedback_binarized #base_model-Minbyul/selfbiorag-7b-wo-healthsearch_qa-sft #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# selfbiorag-7b-dpo-full-sft-wo-healthsearch_qa\n\nThis model is a fine-tuned version of Minbyul/selfbiorag-7b-wo-healthsearch_qa-sft on the HuggingFaceH4/ultrafeedback_binarized dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.4465\n- Rewards/chosen: -0.5929\n- Rewards/rejected: -1.6772\n- Rewards/accuracies: 0.7846\n- Rewards/margins: 1.0843\n- Logps/rejected: -1480.8429\n- Logps/chosen: -984.8102\n- Logits/rejected: -3.4642\n- Logits/chosen: -2.6475", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-06\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 4\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 64\n- total_eval_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.1\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.0.dev0\n- Pytorch 2.1.2\n- Datasets 2.14.6\n- Tokenizers 0.15.2" ]
[ 94, 179, 7, 9, 9, 4, 155, 5, 43 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #alignment-handbook #trl #dpo #generated_from_trainer #dataset-HuggingFaceH4/ultrafeedback_binarized #base_model-Minbyul/selfbiorag-7b-wo-healthsearch_qa-sft #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# selfbiorag-7b-dpo-full-sft-wo-healthsearch_qa\n\nThis model is a fine-tuned version of Minbyul/selfbiorag-7b-wo-healthsearch_qa-sft on the HuggingFaceH4/ultrafeedback_binarized dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.4465\n- Rewards/chosen: -0.5929\n- Rewards/rejected: -1.6772\n- Rewards/accuracies: 0.7846\n- Rewards/margins: 1.0843\n- Logps/rejected: -1480.8429\n- Logps/chosen: -984.8102\n- Logits/rejected: -3.4642\n- Logits/chosen: -2.6475## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-06\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 4\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 64\n- total_eval_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.1\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.0.dev0\n- Pytorch 2.1.2\n- Datasets 2.14.6\n- Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_prom_prom_300_all-seqsight_32768_512_30M-L8_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_prom_prom_300_all](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_prom_prom_300_all) dataset. It achieves the following results on the evaluation set: - Loss: 0.2083 - F1 Score: 0.9155 - Accuracy: 0.9155 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.3754 | 0.54 | 200 | 0.2521 | 0.9011 | 0.9012 | | 0.2585 | 1.08 | 400 | 0.2312 | 0.9098 | 0.9098 | | 0.2443 | 1.62 | 600 | 0.2205 | 0.9133 | 0.9133 | | 0.2314 | 2.16 | 800 | 0.2192 | 0.9123 | 0.9123 | | 0.2293 | 2.7 | 1000 | 0.2133 | 0.9140 | 0.9140 | | 0.2236 | 3.24 | 1200 | 0.2145 | 0.9142 | 0.9142 | | 0.2247 | 3.78 | 1400 | 0.2088 | 0.9172 | 0.9172 | | 0.2214 | 4.32 | 1600 | 0.2041 | 0.9188 | 0.9187 | | 0.2143 | 4.86 | 1800 | 0.2076 | 0.9182 | 0.9182 | | 0.2114 | 5.41 | 2000 | 0.2012 | 0.9214 | 0.9215 | | 0.2122 | 5.95 | 2200 | 0.1988 | 0.9235 | 0.9235 | | 0.2113 | 6.49 | 2400 | 0.1976 | 0.9242 | 0.9242 | | 0.209 | 7.03 | 2600 | 0.1972 | 0.9206 | 0.9206 | | 0.2032 | 7.57 | 2800 | 0.1967 | 0.9208 | 0.9208 | | 0.2066 | 8.11 | 3000 | 0.1971 | 0.9228 | 0.9228 | | 0.2018 | 8.65 | 3200 | 0.1946 | 0.9247 | 0.9247 | | 0.1975 | 9.19 | 3400 | 0.1939 | 0.9240 | 0.9240 | | 0.1968 | 9.73 | 3600 | 0.1925 | 0.9248 | 0.9248 | | 0.2032 | 10.27 | 3800 | 0.1913 | 0.9223 | 0.9223 | | 0.1969 | 10.81 | 4000 | 0.1899 | 0.9260 | 0.9260 | | 0.1965 | 11.35 | 4200 | 0.1903 | 0.9253 | 0.9253 | | 0.1948 | 11.89 | 4400 | 0.1922 | 0.9250 | 0.925 | | 0.1927 | 12.43 | 4600 | 0.1906 | 0.9248 | 0.9248 | | 0.1933 | 12.97 | 4800 | 0.1895 | 0.9245 | 0.9245 | | 0.1907 | 13.51 | 5000 | 0.1934 | 0.9243 | 0.9243 | | 0.1943 | 14.05 | 5200 | 0.1900 | 0.9257 | 0.9257 | | 0.1884 | 14.59 | 5400 | 0.1902 | 0.9255 | 0.9255 | | 0.189 | 15.14 | 5600 | 0.1914 | 0.9233 | 0.9233 | | 0.1887 | 15.68 | 5800 | 0.1912 | 0.9230 | 0.9230 | | 0.1919 | 16.22 | 6000 | 0.1898 | 0.9241 | 0.9242 | | 0.1891 | 16.76 | 6200 | 0.1873 | 0.9265 | 0.9265 | | 0.1909 | 17.3 | 6400 | 0.1891 | 0.9253 | 0.9253 | | 0.1898 | 17.84 | 6600 | 0.1876 | 0.9260 | 0.9260 | | 0.1819 | 18.38 | 6800 | 0.1878 | 0.9252 | 0.9252 | | 0.189 | 18.92 | 7000 | 0.1877 | 0.9270 | 0.9270 | | 0.1888 | 19.46 | 7200 | 0.1873 | 0.9269 | 0.9269 | | 0.1837 | 20.0 | 7400 | 0.1869 | 0.9279 | 0.9279 | | 0.1864 | 20.54 | 7600 | 0.1869 | 0.9280 | 0.9280 | | 0.1821 | 21.08 | 7800 | 0.1850 | 0.9287 | 0.9287 | | 0.185 | 21.62 | 8000 | 0.1865 | 0.9280 | 0.9280 | | 0.1847 | 22.16 | 8200 | 0.1857 | 0.9279 | 0.9279 | | 0.1796 | 22.7 | 8400 | 0.1863 | 0.9275 | 0.9275 | | 0.1875 | 23.24 | 8600 | 0.1861 | 0.9257 | 0.9257 | | 0.1818 | 23.78 | 8800 | 0.1854 | 0.9267 | 0.9267 | | 0.1853 | 24.32 | 9000 | 0.1859 | 0.9263 | 0.9264 | | 0.181 | 24.86 | 9200 | 0.1857 | 0.9274 | 0.9274 | | 0.18 | 25.41 | 9400 | 0.1860 | 0.9270 | 0.9270 | | 0.18 | 25.95 | 9600 | 0.1859 | 0.9265 | 0.9265 | | 0.18 | 26.49 | 9800 | 0.1858 | 0.9277 | 0.9277 | | 0.1807 | 27.03 | 10000 | 0.1858 | 0.9274 | 0.9274 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_prom_prom_300_all-seqsight_32768_512_30M-L8_f", "results": []}]}
mahdibaghbanzadeh/GUE_prom_prom_300_all-seqsight_32768_512_30M-L8_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:03:47+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_prom\_prom\_300\_all-seqsight\_32768\_512\_30M-L8\_f ========================================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_prom\_prom\_300\_all dataset. It achieves the following results on the evaluation set: * Loss: 0.2083 * F1 Score: 0.9155 * Accuracy: 0.9155 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
HenryCai1129/adapter-llama-adaptertoxic2nontoxic-2k-search-50-0.0003
null
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:04:09+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 26, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K14ac-seqsight_32768_512_30M-L1_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K14ac](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K14ac) dataset. It achieves the following results on the evaluation set: - Loss: 0.5313 - F1 Score: 0.7362 - Accuracy: 0.7349 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6211 | 0.97 | 200 | 0.6106 | 0.6738 | 0.6741 | | 0.5868 | 1.93 | 400 | 0.5878 | 0.6990 | 0.6977 | | 0.5796 | 2.9 | 600 | 0.5923 | 0.6891 | 0.6887 | | 0.5747 | 3.86 | 800 | 0.5552 | 0.7198 | 0.7195 | | 0.57 | 4.83 | 1000 | 0.5918 | 0.6950 | 0.6944 | | 0.5651 | 5.8 | 1200 | 0.5861 | 0.7025 | 0.7014 | | 0.5631 | 6.76 | 1400 | 0.5698 | 0.7107 | 0.7089 | | 0.5593 | 7.73 | 1600 | 0.5561 | 0.7273 | 0.7256 | | 0.5561 | 8.7 | 1800 | 0.5569 | 0.7253 | 0.7234 | | 0.5523 | 9.66 | 2000 | 0.5574 | 0.7266 | 0.7247 | | 0.5528 | 10.63 | 2200 | 0.5828 | 0.6924 | 0.6920 | | 0.5442 | 11.59 | 2400 | 0.5509 | 0.7280 | 0.7262 | | 0.5464 | 12.56 | 2600 | 0.5646 | 0.7185 | 0.7168 | | 0.5458 | 13.53 | 2800 | 0.5711 | 0.7114 | 0.7101 | | 0.5415 | 14.49 | 3000 | 0.5805 | 0.6999 | 0.6992 | | 0.5403 | 15.46 | 3200 | 0.5439 | 0.7296 | 0.7280 | | 0.5397 | 16.43 | 3400 | 0.5778 | 0.7075 | 0.7068 | | 0.5392 | 17.39 | 3600 | 0.5535 | 0.7241 | 0.7222 | | 0.5353 | 18.36 | 3800 | 0.5459 | 0.7355 | 0.7337 | | 0.5361 | 19.32 | 4000 | 0.5543 | 0.7229 | 0.7210 | | 0.5324 | 20.29 | 4200 | 0.5556 | 0.7271 | 0.7253 | | 0.5351 | 21.26 | 4400 | 0.5629 | 0.7185 | 0.7171 | | 0.5328 | 22.22 | 4600 | 0.5702 | 0.7138 | 0.7129 | | 0.5348 | 23.19 | 4800 | 0.5479 | 0.7319 | 0.7301 | | 0.532 | 24.15 | 5000 | 0.5561 | 0.7257 | 0.7241 | | 0.5272 | 25.12 | 5200 | 0.5689 | 0.7164 | 0.7153 | | 0.5292 | 26.09 | 5400 | 0.5568 | 0.7270 | 0.7253 | | 0.5311 | 27.05 | 5600 | 0.5823 | 0.7017 | 0.7017 | | 0.5285 | 28.02 | 5800 | 0.5512 | 0.7289 | 0.7271 | | 0.5264 | 28.99 | 6000 | 0.5661 | 0.7172 | 0.7162 | | 0.5261 | 29.95 | 6200 | 0.5738 | 0.7123 | 0.7116 | | 0.5263 | 30.92 | 6400 | 0.5544 | 0.7275 | 0.7259 | | 0.5249 | 31.88 | 6600 | 0.5614 | 0.7196 | 0.7183 | | 0.5209 | 32.85 | 6800 | 0.5667 | 0.7203 | 0.7192 | | 0.5282 | 33.82 | 7000 | 0.5726 | 0.7109 | 0.7104 | | 0.5241 | 34.78 | 7200 | 0.5687 | 0.7146 | 0.7138 | | 0.5249 | 35.75 | 7400 | 0.5636 | 0.7219 | 0.7207 | | 0.5203 | 36.71 | 7600 | 0.5581 | 0.7285 | 0.7271 | | 0.523 | 37.68 | 7800 | 0.5587 | 0.7257 | 0.7244 | | 0.5222 | 38.65 | 8000 | 0.5512 | 0.7306 | 0.7289 | | 0.5262 | 39.61 | 8200 | 0.5592 | 0.7269 | 0.7256 | | 0.5176 | 40.58 | 8400 | 0.5688 | 0.7169 | 0.7162 | | 0.5192 | 41.55 | 8600 | 0.5576 | 0.7276 | 0.7262 | | 0.5205 | 42.51 | 8800 | 0.5582 | 0.7254 | 0.7241 | | 0.5211 | 43.48 | 9000 | 0.5708 | 0.7156 | 0.7150 | | 0.5205 | 44.44 | 9200 | 0.5639 | 0.7210 | 0.7198 | | 0.5225 | 45.41 | 9400 | 0.5589 | 0.7257 | 0.7244 | | 0.5169 | 46.38 | 9600 | 0.5664 | 0.7184 | 0.7174 | | 0.5211 | 47.34 | 9800 | 0.5622 | 0.7220 | 0.7207 | | 0.5202 | 48.31 | 10000 | 0.5599 | 0.7254 | 0.7241 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K14ac-seqsight_32768_512_30M-L1_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K14ac-seqsight_32768_512_30M-L1_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:04:32+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K14ac-seqsight\_32768\_512\_30M-L1\_f ================================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K14ac dataset. It achieves the following results on the evaluation set: * Loss: 0.5313 * F1 Score: 0.7362 * Accuracy: 0.7349 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_prom_prom_300_all-seqsight_32768_512_30M-L32_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_prom_prom_300_all](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_prom_prom_300_all) dataset. It achieves the following results on the evaluation set: - Loss: 0.2076 - F1 Score: 0.9187 - Accuracy: 0.9187 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.341 | 0.54 | 200 | 0.2333 | 0.9073 | 0.9073 | | 0.2436 | 1.08 | 400 | 0.2192 | 0.9174 | 0.9174 | | 0.2313 | 1.62 | 600 | 0.2097 | 0.9194 | 0.9194 | | 0.2187 | 2.16 | 800 | 0.2053 | 0.9180 | 0.9181 | | 0.2162 | 2.7 | 1000 | 0.2020 | 0.9177 | 0.9177 | | 0.2101 | 3.24 | 1200 | 0.2039 | 0.9167 | 0.9167 | | 0.2088 | 3.78 | 1400 | 0.1930 | 0.9220 | 0.9220 | | 0.205 | 4.32 | 1600 | 0.1914 | 0.9250 | 0.925 | | 0.1994 | 4.86 | 1800 | 0.1913 | 0.9240 | 0.9240 | | 0.1945 | 5.41 | 2000 | 0.1888 | 0.9253 | 0.9253 | | 0.1957 | 5.95 | 2200 | 0.1876 | 0.9262 | 0.9262 | | 0.1944 | 6.49 | 2400 | 0.1859 | 0.9250 | 0.925 | | 0.1918 | 7.03 | 2600 | 0.1878 | 0.9284 | 0.9284 | | 0.1845 | 7.57 | 2800 | 0.1859 | 0.9272 | 0.9272 | | 0.19 | 8.11 | 3000 | 0.1883 | 0.9280 | 0.9280 | | 0.1826 | 8.65 | 3200 | 0.1845 | 0.9252 | 0.9252 | | 0.1782 | 9.19 | 3400 | 0.1850 | 0.9282 | 0.9282 | | 0.1782 | 9.73 | 3600 | 0.1849 | 0.9277 | 0.9277 | | 0.1832 | 10.27 | 3800 | 0.1808 | 0.9274 | 0.9274 | | 0.1778 | 10.81 | 4000 | 0.1840 | 0.9292 | 0.9292 | | 0.1761 | 11.35 | 4200 | 0.1820 | 0.9279 | 0.9279 | | 0.1748 | 11.89 | 4400 | 0.1829 | 0.9299 | 0.9299 | | 0.1724 | 12.43 | 4600 | 0.1817 | 0.9296 | 0.9296 | | 0.1712 | 12.97 | 4800 | 0.1806 | 0.9301 | 0.9301 | | 0.1685 | 13.51 | 5000 | 0.1847 | 0.9275 | 0.9275 | | 0.1719 | 14.05 | 5200 | 0.1840 | 0.9262 | 0.9262 | | 0.1656 | 14.59 | 5400 | 0.1836 | 0.9302 | 0.9302 | | 0.1659 | 15.14 | 5600 | 0.1828 | 0.9302 | 0.9302 | | 0.1655 | 15.68 | 5800 | 0.1821 | 0.9277 | 0.9277 | | 0.1673 | 16.22 | 6000 | 0.1802 | 0.9309 | 0.9309 | | 0.1626 | 16.76 | 6200 | 0.1844 | 0.9270 | 0.9270 | | 0.1659 | 17.3 | 6400 | 0.1824 | 0.9301 | 0.9301 | | 0.1643 | 17.84 | 6600 | 0.1810 | 0.9289 | 0.9289 | | 0.1558 | 18.38 | 6800 | 0.1833 | 0.9279 | 0.9279 | | 0.1622 | 18.92 | 7000 | 0.1815 | 0.9285 | 0.9285 | | 0.1613 | 19.46 | 7200 | 0.1819 | 0.9301 | 0.9301 | | 0.1575 | 20.0 | 7400 | 0.1808 | 0.9289 | 0.9289 | | 0.1588 | 20.54 | 7600 | 0.1826 | 0.9279 | 0.9279 | | 0.1533 | 21.08 | 7800 | 0.1782 | 0.9297 | 0.9297 | | 0.1546 | 21.62 | 8000 | 0.1805 | 0.9294 | 0.9294 | | 0.1552 | 22.16 | 8200 | 0.1813 | 0.9297 | 0.9297 | | 0.1511 | 22.7 | 8400 | 0.1811 | 0.9301 | 0.9301 | | 0.155 | 23.24 | 8600 | 0.1812 | 0.9279 | 0.9279 | | 0.1544 | 23.78 | 8800 | 0.1797 | 0.9292 | 0.9292 | | 0.1552 | 24.32 | 9000 | 0.1822 | 0.9290 | 0.9291 | | 0.151 | 24.86 | 9200 | 0.1815 | 0.9285 | 0.9285 | | 0.1515 | 25.41 | 9400 | 0.1818 | 0.9280 | 0.9280 | | 0.1504 | 25.95 | 9600 | 0.1817 | 0.9292 | 0.9292 | | 0.1505 | 26.49 | 9800 | 0.1814 | 0.9289 | 0.9289 | | 0.1485 | 27.03 | 10000 | 0.1815 | 0.9291 | 0.9291 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_prom_prom_300_all-seqsight_32768_512_30M-L32_f", "results": []}]}
mahdibaghbanzadeh/GUE_prom_prom_300_all-seqsight_32768_512_30M-L32_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:04:32+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_prom\_prom\_300\_all-seqsight\_32768\_512\_30M-L32\_f ========================================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_prom\_prom\_300\_all dataset. It achieves the following results on the evaluation set: * Loss: 0.2076 * F1 Score: 0.9187 * Accuracy: 0.9187 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K14ac-seqsight_32768_512_30M-L8_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K14ac](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K14ac) dataset. It achieves the following results on the evaluation set: - Loss: 0.5169 - F1 Score: 0.7454 - Accuracy: 0.7446 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.609 | 0.97 | 200 | 0.5883 | 0.6958 | 0.6944 | | 0.5754 | 1.93 | 400 | 0.5632 | 0.7136 | 0.7116 | | 0.5648 | 2.9 | 600 | 0.5999 | 0.6804 | 0.6817 | | 0.5582 | 3.86 | 800 | 0.5436 | 0.7305 | 0.7295 | | 0.546 | 4.83 | 1000 | 0.5875 | 0.6947 | 0.6950 | | 0.5417 | 5.8 | 1200 | 0.5696 | 0.7109 | 0.7098 | | 0.5368 | 6.76 | 1400 | 0.5383 | 0.7370 | 0.7352 | | 0.5324 | 7.73 | 1600 | 0.5430 | 0.7368 | 0.7349 | | 0.5267 | 8.7 | 1800 | 0.5540 | 0.7249 | 0.7234 | | 0.5227 | 9.66 | 2000 | 0.5485 | 0.7307 | 0.7292 | | 0.5245 | 10.63 | 2200 | 0.5652 | 0.7168 | 0.7162 | | 0.5132 | 11.59 | 2400 | 0.5336 | 0.7442 | 0.7425 | | 0.5154 | 12.56 | 2600 | 0.5633 | 0.7208 | 0.7198 | | 0.5141 | 13.53 | 2800 | 0.5603 | 0.7262 | 0.7253 | | 0.5085 | 14.49 | 3000 | 0.5610 | 0.7188 | 0.7177 | | 0.5053 | 15.46 | 3200 | 0.5305 | 0.7470 | 0.7452 | | 0.5071 | 16.43 | 3400 | 0.5553 | 0.7242 | 0.7231 | | 0.5043 | 17.39 | 3600 | 0.5345 | 0.7398 | 0.7380 | | 0.501 | 18.36 | 3800 | 0.5264 | 0.7479 | 0.7464 | | 0.5007 | 19.32 | 4000 | 0.5324 | 0.7437 | 0.7419 | | 0.4968 | 20.29 | 4200 | 0.5485 | 0.7323 | 0.7307 | | 0.5002 | 21.26 | 4400 | 0.5446 | 0.7343 | 0.7328 | | 0.4953 | 22.22 | 4600 | 0.5511 | 0.7294 | 0.7280 | | 0.4959 | 23.19 | 4800 | 0.5296 | 0.7426 | 0.7410 | | 0.4942 | 24.15 | 5000 | 0.5398 | 0.7310 | 0.7292 | | 0.4882 | 25.12 | 5200 | 0.5566 | 0.7251 | 0.7241 | | 0.4882 | 26.09 | 5400 | 0.5433 | 0.7341 | 0.7325 | | 0.4913 | 27.05 | 5600 | 0.5629 | 0.7201 | 0.7192 | | 0.4871 | 28.02 | 5800 | 0.5371 | 0.7329 | 0.7310 | | 0.4834 | 28.99 | 6000 | 0.5448 | 0.7275 | 0.7259 | | 0.4846 | 29.95 | 6200 | 0.5552 | 0.7260 | 0.7247 | | 0.4837 | 30.92 | 6400 | 0.5311 | 0.7418 | 0.7401 | | 0.4817 | 31.88 | 6600 | 0.5389 | 0.7307 | 0.7289 | | 0.4774 | 32.85 | 6800 | 0.5524 | 0.7299 | 0.7283 | | 0.4852 | 33.82 | 7000 | 0.5469 | 0.7304 | 0.7289 | | 0.48 | 34.78 | 7200 | 0.5522 | 0.7268 | 0.7253 | | 0.4839 | 35.75 | 7400 | 0.5412 | 0.7374 | 0.7356 | | 0.4745 | 36.71 | 7600 | 0.5460 | 0.7322 | 0.7304 | | 0.4787 | 37.68 | 7800 | 0.5444 | 0.7288 | 0.7271 | | 0.4737 | 38.65 | 8000 | 0.5371 | 0.7383 | 0.7365 | | 0.4814 | 39.61 | 8200 | 0.5405 | 0.7328 | 0.7310 | | 0.4688 | 40.58 | 8400 | 0.5516 | 0.7291 | 0.7274 | | 0.4735 | 41.55 | 8600 | 0.5391 | 0.7347 | 0.7328 | | 0.4747 | 42.51 | 8800 | 0.5424 | 0.7337 | 0.7319 | | 0.4764 | 43.48 | 9000 | 0.5544 | 0.7271 | 0.7256 | | 0.4729 | 44.44 | 9200 | 0.5512 | 0.7294 | 0.7277 | | 0.4761 | 45.41 | 9400 | 0.5433 | 0.7328 | 0.7310 | | 0.4701 | 46.38 | 9600 | 0.5481 | 0.7318 | 0.7301 | | 0.4746 | 47.34 | 9800 | 0.5451 | 0.7331 | 0.7313 | | 0.4722 | 48.31 | 10000 | 0.5435 | 0.7340 | 0.7322 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K14ac-seqsight_32768_512_30M-L8_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K14ac-seqsight_32768_512_30M-L8_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:04:45+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K14ac-seqsight\_32768\_512\_30M-L8\_f ================================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K14ac dataset. It achieves the following results on the evaluation set: * Loss: 0.5169 * F1 Score: 0.7454 * Accuracy: 0.7446 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
text-classification
transformers
## TextAttack Model Card This `distilbert` model was fine-tuned using TextAttack. The model was fine-tuned for 3 epochs with a batch size of 8, a maximum sequence length of 512, and an initial learning rate of 3e-05. Since this was a classification task, the model was trained with a cross-entropy loss function. The best score the model achieved on this task was 0.937, as measured by the eval set accuracy, found after 3 epochs. For more information, check out [TextAttack on Github](https://github.com/QData/TextAttack).
{"language": ["zh"], "license": "apache-2.0", "metrics": ["accuracy"], "pipeline_tag": "text-classification"}
WangA/distilbert-base-finetuned-jd
null
[ "transformers", "safetensors", "distilbert", "text-classification", "zh", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:05:11+00:00
[]
[ "zh" ]
TAGS #transformers #safetensors #distilbert #text-classification #zh #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
## TextAttack Model Card This 'distilbert' model was fine-tuned using TextAttack. The model was fine-tuned for 3 epochs with a batch size of 8, a maximum sequence length of 512, and an initial learning rate of 3e-05. Since this was a classification task, the model was trained with a cross-entropy loss function. The best score the model achieved on this task was 0.937, as measured by the eval set accuracy, found after 3 epochs. For more information, check out TextAttack on Github.
[ "## TextAttack Model Card\n\n This 'distilbert' model was fine-tuned using TextAttack. The model was fine-tuned\n for 3 epochs with a batch size of 8,\n a maximum sequence length of 512, and an initial learning rate of 3e-05.\n Since this was a classification task, the model was trained with a cross-entropy loss function.\n The best score the model achieved on this task was 0.937, as measured by the\n eval set accuracy, found after 3 epochs.\n\n For more information, check out TextAttack on Github." ]
[ "TAGS\n#transformers #safetensors #distilbert #text-classification #zh #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "## TextAttack Model Card\n\n This 'distilbert' model was fine-tuned using TextAttack. The model was fine-tuned\n for 3 epochs with a batch size of 8,\n a maximum sequence length of 512, and an initial learning rate of 3e-05.\n Since this was a classification task, the model was trained with a cross-entropy loss function.\n The best score the model achieved on this task was 0.937, as measured by the\n eval set accuracy, found after 3 epochs.\n\n For more information, check out TextAttack on Github." ]
[ 40, 121 ]
[ "TAGS\n#transformers #safetensors #distilbert #text-classification #zh #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n## TextAttack Model Card\n\n This 'distilbert' model was fine-tuned using TextAttack. The model was fine-tuned\n for 3 epochs with a batch size of 8,\n a maximum sequence length of 512, and an initial learning rate of 3e-05.\n Since this was a classification task, the model was trained with a cross-entropy loss function.\n The best score the model achieved on this task was 0.937, as measured by the\n eval set accuracy, found after 3 epochs.\n\n For more information, check out TextAttack on Github." ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K14ac-seqsight_32768_512_30M-L32_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K14ac](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K14ac) dataset. It achieves the following results on the evaluation set: - Loss: 0.5324 - F1 Score: 0.7516 - Accuracy: 0.7504 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.5995 | 0.97 | 200 | 0.5710 | 0.7127 | 0.7107 | | 0.5654 | 1.93 | 400 | 0.5500 | 0.7193 | 0.7177 | | 0.5486 | 2.9 | 600 | 0.5834 | 0.6900 | 0.6905 | | 0.5401 | 3.86 | 800 | 0.5375 | 0.7422 | 0.7404 | | 0.5259 | 4.83 | 1000 | 0.5876 | 0.7014 | 0.7023 | | 0.521 | 5.8 | 1200 | 0.5590 | 0.7225 | 0.7216 | | 0.5147 | 6.76 | 1400 | 0.5281 | 0.7434 | 0.7416 | | 0.5094 | 7.73 | 1600 | 0.5283 | 0.7437 | 0.7419 | | 0.5015 | 8.7 | 1800 | 0.5503 | 0.7247 | 0.7238 | | 0.4981 | 9.66 | 2000 | 0.5451 | 0.7333 | 0.7319 | | 0.4975 | 10.63 | 2200 | 0.5551 | 0.7293 | 0.7286 | | 0.4858 | 11.59 | 2400 | 0.5316 | 0.7425 | 0.7407 | | 0.4862 | 12.56 | 2600 | 0.5598 | 0.7285 | 0.7274 | | 0.4828 | 13.53 | 2800 | 0.5533 | 0.7315 | 0.7304 | | 0.4771 | 14.49 | 3000 | 0.5429 | 0.7427 | 0.7410 | | 0.4717 | 15.46 | 3200 | 0.5272 | 0.7488 | 0.7470 | | 0.4684 | 16.43 | 3400 | 0.5541 | 0.7354 | 0.7340 | | 0.4673 | 17.39 | 3600 | 0.5392 | 0.7413 | 0.7395 | | 0.4619 | 18.36 | 3800 | 0.5252 | 0.7443 | 0.7434 | | 0.4598 | 19.32 | 4000 | 0.5366 | 0.7507 | 0.7492 | | 0.4536 | 20.29 | 4200 | 0.5497 | 0.7350 | 0.7331 | | 0.4546 | 21.26 | 4400 | 0.5486 | 0.7365 | 0.7346 | | 0.45 | 22.22 | 4600 | 0.5716 | 0.7334 | 0.7322 | | 0.4466 | 23.19 | 4800 | 0.5486 | 0.7357 | 0.7340 | | 0.444 | 24.15 | 5000 | 0.5740 | 0.7280 | 0.7265 | | 0.437 | 25.12 | 5200 | 0.5811 | 0.7251 | 0.7244 | | 0.4377 | 26.09 | 5400 | 0.5717 | 0.7311 | 0.7295 | | 0.4379 | 27.05 | 5600 | 0.6015 | 0.7226 | 0.7219 | | 0.4321 | 28.02 | 5800 | 0.5653 | 0.7304 | 0.7286 | | 0.4275 | 28.99 | 6000 | 0.5599 | 0.7352 | 0.7334 | | 0.4268 | 29.95 | 6200 | 0.5907 | 0.7252 | 0.7241 | | 0.423 | 30.92 | 6400 | 0.5557 | 0.7391 | 0.7374 | | 0.4214 | 31.88 | 6600 | 0.5636 | 0.7364 | 0.7346 | | 0.4147 | 32.85 | 6800 | 0.5935 | 0.7273 | 0.7259 | | 0.4206 | 33.82 | 7000 | 0.5936 | 0.7247 | 0.7238 | | 0.4144 | 34.78 | 7200 | 0.5917 | 0.7253 | 0.7241 | | 0.4164 | 35.75 | 7400 | 0.5744 | 0.7347 | 0.7328 | | 0.4071 | 36.71 | 7600 | 0.5950 | 0.7295 | 0.7280 | | 0.4095 | 37.68 | 7800 | 0.5915 | 0.7254 | 0.7238 | | 0.4024 | 38.65 | 8000 | 0.5928 | 0.7303 | 0.7286 | | 0.4133 | 39.61 | 8200 | 0.5809 | 0.7229 | 0.7210 | | 0.399 | 40.58 | 8400 | 0.5939 | 0.7310 | 0.7292 | | 0.399 | 41.55 | 8600 | 0.5873 | 0.7320 | 0.7301 | | 0.4036 | 42.51 | 8800 | 0.5890 | 0.7307 | 0.7289 | | 0.4041 | 43.48 | 9000 | 0.6023 | 0.7283 | 0.7268 | | 0.3987 | 44.44 | 9200 | 0.5997 | 0.7263 | 0.7247 | | 0.4011 | 45.41 | 9400 | 0.5952 | 0.7288 | 0.7271 | | 0.3971 | 46.38 | 9600 | 0.5951 | 0.7300 | 0.7283 | | 0.4 | 47.34 | 9800 | 0.5905 | 0.7259 | 0.7241 | | 0.3954 | 48.31 | 10000 | 0.5908 | 0.7277 | 0.7259 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K14ac-seqsight_32768_512_30M-L32_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K14ac-seqsight_32768_512_30M-L32_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:05:16+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K14ac-seqsight\_32768\_512\_30M-L32\_f ================================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K14ac dataset. It achieves the following results on the evaluation set: * Loss: 0.5324 * F1 Score: 0.7516 * Accuracy: 0.7504 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
null
# Model Card for Model ID This code implements Support Vector Machines (SVMs) with two different kernels: linear and RBF. A model card should mention that the model is an SVM and potentially specify the available kernels. ## Model Details The code demonstrates how the model is trained using the SVC class from scikit-learn. A model card's training details section might mention scikit-learn as a training framework. ### Model Description This model is a Support Vector Machine (SVM) classifier implemented using scikit-learn. It can be used for binary classification tasks where the data can be separated by a hyperplane in a high-dimensional space. The model offers two kernel choices: linear and RBF (Radial Basis Function). The linear kernel is suitable for data that is already linearly separable, while the RBF kernel can handle non-linearly separable data by mapping it to a higher-dimensional space. Here are some key aspects of this model: Classification Task: Binary classification (separating data points into two classes). Kernel Choices: Linear and RBF. Implementation Library: scikit-learn. Additionally, consider including these details if relevant: Limitations of SVMs, such as potentially high computational cost for training large datasets or difficulty interpreting the model's decisions. Specific use cases where this type of SVM might be suitable (e.g., image classification with low-dimensional data for linear kernel, or text classification for RBF kernel). Remember to replace or adjust the details based on your specific implementation and use case. ### Model Sources [optional] Akif ## Uses Direct Use This SVM model can be directly used for binary classification tasks where the data can be separated by a hyperplane. Here are some potential applications: Spam filtering: Classifying emails as spam or not spam based on features like sender address, keywords, and content. Image categorization: Classifying images into two categories, such as cat vs. dog or handwritten digit recognition (classifying digits 0-9). Sentiment analysis: Classifying text data as positive or negative sentiment. General requirements for direct use: The data needs to be well-defined with clear features that distinguish the two classes. The data should be balanced, meaning there are roughly equal numbers of data points for each class. Downstream Use [optional] This SVM model can also be a building block for more complex machine learning pipelines. Here's an example: You could use this model as a first stage filter in a multi-class classification problem. The SVM could classify data points into broad categories, and then a separate model could handle further classification within those categories. General requirements for downstream use: The downstream task should benefit from the binary classification performed by the SVM. The data used downstream should be compatible with the output of the SVM. Out-of-Scope Use While this SVM can be a powerful tool, it's essential to consider limitations: High dimensionality: The SVM might not perform well with very high-dimensional data due to the curse of dimensionality. Non-linear data: The linear kernel might not be suitable for data that is not linearly separable. In such cases, the RBF kernel or other kernel functions might be needed. Imbalanced data: The model's performance can be skewed if the data has a significant class imbalance (one class having many more data points than the other). It's important to avoid using this model for tasks where these limitations could significantly impact its effectiveness. ### Direct Use This SVM model can be directly applied to binary classification tasks where the data can be well-represented in a high-dimensional space and separated by a hyperplane. Here are some potential applications: Spam Filtering: Classifying emails as spam or not spam based on features like sender address, keywords, and content. This could be useful for personal email filtering or as a building block in more sophisticated spam filtering systems. Image Categorization: Classifying images into two broad categories, such as cat vs. dog or handwritten digit recognition (classifying digits 0-9). This could be used for simple image sorting tasks or as a preliminary step in more complex image recognition pipelines. Sentiment Analysis: Classifying text data as positive or negative sentiment. This could be helpful for analyzing customer reviews, social media posts, or other textual data to understand overall sentiment. General requirements for direct use: Data Suitability: The data should have clear features that effectively distinguish the two classes the model is designed to separate. Features might be numerical or categorical, depending on the task. Data Balance: Ideally, the data should be balanced, meaning there are roughly equal numbers of data points for each class (positive and negative). Imbalanced data can bias the model towards the majority class. Interpretability Needs: If you need to understand the model's reasoning behind its classifications, a linear kernel SVM might be preferable as it offers more interpretability compared to the RBF kernel. Additional Considerations: SVMs can be computationally expensive to train for very large datasets. Consider this when dealing with massive amounts of data. While SVMs are powerful classifiers, they might not be the best choice for all binary classification problems. Explore other algorithms like decision trees or random forests if the data is highly complex or not easily separable by a hyperplane. [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations Bias, Risks, and Limitations Here's a possible description for the "Bias, Risks, and Limitations" section of your model card: Bias: Training Data Bias: Like any machine learning model, this SVM is susceptible to bias present in the training data. If the training data is skewed towards one class or if certain features are not representative of the real world, the model's predictions can be biased. Algorithmic Bias: SVMs themselves might exhibit bias depending on the kernel used. For instance, linear SVMs can struggle with non-linear data distributions, potentially favoring certain regions of the feature space. Risks: Misclassification: The model might misclassify data points, especially if the data is noisy or not well-separated. This can lead to errors in downstream applications. Overfitting: If the model is trained on a small dataset or with overly complex hyperparameters, it might overfit the training data and perform poorly on unseen data. Limitations: High Dimensionality: SVMs can become computationally expensive and less effective when dealing with very high-dimensional data due to the "curse of dimensionality." Non-linear Data: The linear kernel SVM is limited to linearly separable data. For more complex, non-linear relationships, the RBF kernel might be necessary, but it can be less interpretable. Imbalanced Data: The model's performance can be skewed if the data has a significant class imbalance (one class having many more data points than the other). General Mitigation Strategies: Use high-quality, balanced training data that represents the real-world distribution of the target variable. Carefully select and tune hyperparameters to avoid overfitting. Consider using techniques like cross-validation to evaluate the model's generalizability. Be aware of the limitations of SVMs and choose alternative algorithms if the data is high-dimensional, non-linear, or imbalanced. It's important to understand these potential biases, risks, and limitations before deploying this SVM model in real-world applications. [More Information Needed] ### Recommendations Recommendations To mitigate the potential biases, risks, and limitations discussed in the previous section, here are some recommendations for users of this SVM model: Data Considerations: Data Quality and Balance: Ensure the training data used for the SVM is high-quality, free from errors, and balanced between the two classes. Techniques like data cleaning and oversampling/undersampling can be used to address imbalances. Data Representativeness: The training data should accurately represent the real-world distribution of data the model will encounter during deployment. Consider potential biases in data collection processes and explore mitigating strategies. Model Training and Evaluation: Hyperparameter Tuning: Carefully tune the hyperparameters of the SVM (e.g., regularization parameter, kernel parameters) to achieve a good balance between training accuracy and generalization performance. Techniques like grid search or randomized search can be helpful. Cross-Validation: Evaluate the model's performance using techniques like k-fold cross-validation to get a more robust estimate of its generalizability to unseen data. Alternative Models: Consider Alternatives: If the data is high-dimensional, non-linear, or imbalanced, explore alternative classification algorithms like decision trees, random forests, or gradient boosting that might be more suitable for such scenarios. Monitoring and Improvement: Monitor Performance: Continuously monitor the model's performance in deployment and retrain it with new data or adjusted hyperparameters if its accuracy degrades over time. Additionally: Document Biases: Document any identified biases in the training data or the model itself. This transparency is crucial for responsible model development and deployment. Responsible Use: Be aware of the potential societal impacts of using this model and ensure its application aligns with ethical considerations. By following these recommendations, users can help mitigate the risks and limitations associated with this SVM model and promote its fair and effective use. ## How to Get Started with the Model Use the code below to get started with the model. import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.svm import SVC from sklearn.datasets import make_classification # Generate synthetic dataset X, y = make_classification(n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=42) # Split the dataset into training and testing sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # Support Vector Machine without kernel (linear kernel) svm_linear = SVC(kernel='linear') svm_linear.fit(X_train, y_train) linear_train_acc = svm_linear.score(X_train, y_train) linear_test_acc = svm_linear.score(X_test, y_test) # Support Vector Machine with radial basis function (RBF) kernel svm_rbf = SVC(kernel='rbf') svm_rbf.fit(X_train, y_train) rbf_train_acc = svm_rbf.score(X_train, y_train) rbf_test_acc = svm_rbf.score(X_test, y_test) # Visualize decision boundary for linear SVM plt.figure(figsize=(10, 5)) plt.subplot(1, 2, 1) plt.scatter(X[:, 0], X[:, 1], c=y, cmap='coolwarm', edgecolors='k', s=100) plt.title("Linear SVM") plt.xlabel("Feature 1") plt.ylabel("Feature 2") # Plot decision boundary ax = plt.gca() xlim = ax.get_xlim() ylim = ax.get_ylim() # Create grid to evaluate model xx = np.linspace(xlim[0], xlim[1], 30) yy = np.linspace(ylim[0], ylim[1], 30) YY, XX = np.meshgrid(yy, xx) xy = np.vstack([XX.ravel(), YY.ravel()]).T Z = svm_linear.decision_function(xy).reshape(XX.shape) # Plot decision boundary and margins ax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) ax.scatter(svm_linear.support_vectors_[:, 0], svm_linear.support_vectors_[:, 1], s=100, linewidth=1, facecolors='none', edgecolors='k') plt.subplot(1, 2, 2) plt.scatter(X[:, 0], X[:, 1], c=y, cmap='coolwarm', edgecolors='k', s=100) plt.title("RBF SVM") plt.xlabel("Feature 1") plt.ylabel("Feature 2") # Plot decision boundary ax = plt.gca() xlim = ax.get_xlim() ylim = ax.get_ylim() # Create grid to evaluate model xx = np.linspace(xlim[0], xlim[1], 30) yy = np.linspace(ylim[0], ylim[1], 30) YY, XX = np.meshgrid(yy, xx) xy = np.vstack([XX.ravel(), YY.ravel()]).T Z = svm_rbf.decision_function(xy).reshape(XX.shape) # Plot decision boundary and margins ax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) ax.scatter(svm_rbf.support_vectors_[:, 0], svm_rbf.support_vectors_[:, 1], s=100, linewidth=1, facecolors='none', edgecolors='k') plt.tight_layout() plt.show() # Print accuracy scores print("Linear SVM - Training Accuracy: {:.2f}, Test Accuracy: {:.2f}".format(linear_train_acc, linear_test_acc)) print("RBF SVM - Training Accuracy: {:.2f}, Test Accuracy: {:.2f}".format(rbf_train_acc, rbf_test_acc)) # Example usage after training the model (replace with your specific logic) def predict_new_data(X_new): predictions = svm_model.predict(X_new) return predictions # Example usage X_new = np.array([[1.5, 2.0]]) # Replace with your new data point predictions = predict_new_data(X_new) print("Predicted class:", predictions[0]) ### Training Data Electric_Vehicle_Population_Data.csv [More Information Needed] ### Testing Data, Factors & Metrics #### Testing Hyperparameters The code trains two SVMs: Linear SVM: Uses the 'linear' kernel. RBF SVM: Uses the 'rbf' kernel. [More Information Needed] #### Software Visual Studio - Python ## Model Card Contact [email protected]
{}
Ironclad313/SVM
null
[ "region:us" ]
null
2024-04-30T04:06:21+00:00
[]
[]
TAGS #region-us
# Model Card for Model ID This code implements Support Vector Machines (SVMs) with two different kernels: linear and RBF. A model card should mention that the model is an SVM and potentially specify the available kernels. ## Model Details The code demonstrates how the model is trained using the SVC class from scikit-learn. A model card's training details section might mention scikit-learn as a training framework. ### Model Description This model is a Support Vector Machine (SVM) classifier implemented using scikit-learn. It can be used for binary classification tasks where the data can be separated by a hyperplane in a high-dimensional space. The model offers two kernel choices: linear and RBF (Radial Basis Function). The linear kernel is suitable for data that is already linearly separable, while the RBF kernel can handle non-linearly separable data by mapping it to a higher-dimensional space. Here are some key aspects of this model: Classification Task: Binary classification (separating data points into two classes). Kernel Choices: Linear and RBF. Implementation Library: scikit-learn. Additionally, consider including these details if relevant: Limitations of SVMs, such as potentially high computational cost for training large datasets or difficulty interpreting the model's decisions. Specific use cases where this type of SVM might be suitable (e.g., image classification with low-dimensional data for linear kernel, or text classification for RBF kernel). Remember to replace or adjust the details based on your specific implementation and use case. ### Model Sources [optional] Akif ## Uses Direct Use This SVM model can be directly used for binary classification tasks where the data can be separated by a hyperplane. Here are some potential applications: Spam filtering: Classifying emails as spam or not spam based on features like sender address, keywords, and content. Image categorization: Classifying images into two categories, such as cat vs. dog or handwritten digit recognition (classifying digits 0-9). Sentiment analysis: Classifying text data as positive or negative sentiment. General requirements for direct use: The data needs to be well-defined with clear features that distinguish the two classes. The data should be balanced, meaning there are roughly equal numbers of data points for each class. Downstream Use [optional] This SVM model can also be a building block for more complex machine learning pipelines. Here's an example: You could use this model as a first stage filter in a multi-class classification problem. The SVM could classify data points into broad categories, and then a separate model could handle further classification within those categories. General requirements for downstream use: The downstream task should benefit from the binary classification performed by the SVM. The data used downstream should be compatible with the output of the SVM. Out-of-Scope Use While this SVM can be a powerful tool, it's essential to consider limitations: High dimensionality: The SVM might not perform well with very high-dimensional data due to the curse of dimensionality. Non-linear data: The linear kernel might not be suitable for data that is not linearly separable. In such cases, the RBF kernel or other kernel functions might be needed. Imbalanced data: The model's performance can be skewed if the data has a significant class imbalance (one class having many more data points than the other). It's important to avoid using this model for tasks where these limitations could significantly impact its effectiveness. ### Direct Use This SVM model can be directly applied to binary classification tasks where the data can be well-represented in a high-dimensional space and separated by a hyperplane. Here are some potential applications: Spam Filtering: Classifying emails as spam or not spam based on features like sender address, keywords, and content. This could be useful for personal email filtering or as a building block in more sophisticated spam filtering systems. Image Categorization: Classifying images into two broad categories, such as cat vs. dog or handwritten digit recognition (classifying digits 0-9). This could be used for simple image sorting tasks or as a preliminary step in more complex image recognition pipelines. Sentiment Analysis: Classifying text data as positive or negative sentiment. This could be helpful for analyzing customer reviews, social media posts, or other textual data to understand overall sentiment. General requirements for direct use: Data Suitability: The data should have clear features that effectively distinguish the two classes the model is designed to separate. Features might be numerical or categorical, depending on the task. Data Balance: Ideally, the data should be balanced, meaning there are roughly equal numbers of data points for each class (positive and negative). Imbalanced data can bias the model towards the majority class. Interpretability Needs: If you need to understand the model's reasoning behind its classifications, a linear kernel SVM might be preferable as it offers more interpretability compared to the RBF kernel. Additional Considerations: SVMs can be computationally expensive to train for very large datasets. Consider this when dealing with massive amounts of data. While SVMs are powerful classifiers, they might not be the best choice for all binary classification problems. Explore other algorithms like decision trees or random forests if the data is highly complex or not easily separable by a hyperplane. ### Out-of-Scope Use ## Bias, Risks, and Limitations Bias, Risks, and Limitations Here's a possible description for the "Bias, Risks, and Limitations" section of your model card: Bias: Training Data Bias: Like any machine learning model, this SVM is susceptible to bias present in the training data. If the training data is skewed towards one class or if certain features are not representative of the real world, the model's predictions can be biased. Algorithmic Bias: SVMs themselves might exhibit bias depending on the kernel used. For instance, linear SVMs can struggle with non-linear data distributions, potentially favoring certain regions of the feature space. Risks: Misclassification: The model might misclassify data points, especially if the data is noisy or not well-separated. This can lead to errors in downstream applications. Overfitting: If the model is trained on a small dataset or with overly complex hyperparameters, it might overfit the training data and perform poorly on unseen data. Limitations: High Dimensionality: SVMs can become computationally expensive and less effective when dealing with very high-dimensional data due to the "curse of dimensionality." Non-linear Data: The linear kernel SVM is limited to linearly separable data. For more complex, non-linear relationships, the RBF kernel might be necessary, but it can be less interpretable. Imbalanced Data: The model's performance can be skewed if the data has a significant class imbalance (one class having many more data points than the other). General Mitigation Strategies: Use high-quality, balanced training data that represents the real-world distribution of the target variable. Carefully select and tune hyperparameters to avoid overfitting. Consider using techniques like cross-validation to evaluate the model's generalizability. Be aware of the limitations of SVMs and choose alternative algorithms if the data is high-dimensional, non-linear, or imbalanced. It's important to understand these potential biases, risks, and limitations before deploying this SVM model in real-world applications. ### Recommendations Recommendations To mitigate the potential biases, risks, and limitations discussed in the previous section, here are some recommendations for users of this SVM model: Data Considerations: Data Quality and Balance: Ensure the training data used for the SVM is high-quality, free from errors, and balanced between the two classes. Techniques like data cleaning and oversampling/undersampling can be used to address imbalances. Data Representativeness: The training data should accurately represent the real-world distribution of data the model will encounter during deployment. Consider potential biases in data collection processes and explore mitigating strategies. Model Training and Evaluation: Hyperparameter Tuning: Carefully tune the hyperparameters of the SVM (e.g., regularization parameter, kernel parameters) to achieve a good balance between training accuracy and generalization performance. Techniques like grid search or randomized search can be helpful. Cross-Validation: Evaluate the model's performance using techniques like k-fold cross-validation to get a more robust estimate of its generalizability to unseen data. Alternative Models: Consider Alternatives: If the data is high-dimensional, non-linear, or imbalanced, explore alternative classification algorithms like decision trees, random forests, or gradient boosting that might be more suitable for such scenarios. Monitoring and Improvement: Monitor Performance: Continuously monitor the model's performance in deployment and retrain it with new data or adjusted hyperparameters if its accuracy degrades over time. Additionally: Document Biases: Document any identified biases in the training data or the model itself. This transparency is crucial for responsible model development and deployment. Responsible Use: Be aware of the potential societal impacts of using this model and ensure its application aligns with ethical considerations. By following these recommendations, users can help mitigate the risks and limitations associated with this SVM model and promote its fair and effective use. ## How to Get Started with the Model Use the code below to get started with the model. import numpy as np import URL as plt from sklearn.model_selection import train_test_split from URL import SVC from sklearn.datasets import make_classification # Generate synthetic dataset X, y = make_classification(n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=42) # Split the dataset into training and testing sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # Support Vector Machine without kernel (linear kernel) svm_linear = SVC(kernel='linear') svm_linear.fit(X_train, y_train) linear_train_acc = svm_linear.score(X_train, y_train) linear_test_acc = svm_linear.score(X_test, y_test) # Support Vector Machine with radial basis function (RBF) kernel svm_rbf = SVC(kernel='rbf') svm_rbf.fit(X_train, y_train) rbf_train_acc = svm_rbf.score(X_train, y_train) rbf_test_acc = svm_rbf.score(X_test, y_test) # Visualize decision boundary for linear SVM URL(figsize=(10, 5)) plt.subplot(1, 2, 1) plt.scatter(X[:, 0], X[:, 1], c=y, cmap='coolwarm', edgecolors='k', s=100) URL("Linear SVM") URL("Feature 1") URL("Feature 2") # Plot decision boundary ax = URL() xlim = ax.get_xlim() ylim = ax.get_ylim() # Create grid to evaluate model xx = np.linspace(xlim[0], xlim[1], 30) yy = np.linspace(ylim[0], ylim[1], 30) YY, XX = np.meshgrid(yy, xx) xy = URL([URL(), URL()]).T Z = svm_linear.decision_function(xy).reshape(URL) # Plot decision boundary and margins ax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) ax.scatter(svm_linear.support_vectors_[:, 0], svm_linear.support_vectors_[:, 1], s=100, linewidth=1, facecolors='none', edgecolors='k') plt.subplot(1, 2, 2) plt.scatter(X[:, 0], X[:, 1], c=y, cmap='coolwarm', edgecolors='k', s=100) URL("RBF SVM") URL("Feature 1") URL("Feature 2") # Plot decision boundary ax = URL() xlim = ax.get_xlim() ylim = ax.get_ylim() # Create grid to evaluate model xx = np.linspace(xlim[0], xlim[1], 30) yy = np.linspace(ylim[0], ylim[1], 30) YY, XX = np.meshgrid(yy, xx) xy = URL([URL(), URL()]).T Z = svm_rbf.decision_function(xy).reshape(URL) # Plot decision boundary and margins ax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) ax.scatter(svm_rbf.support_vectors_[:, 0], svm_rbf.support_vectors_[:, 1], s=100, linewidth=1, facecolors='none', edgecolors='k') plt.tight_layout() URL() # Print accuracy scores print("Linear SVM - Training Accuracy: {:.2f}, Test Accuracy: {:.2f}".format(linear_train_acc, linear_test_acc)) print("RBF SVM - Training Accuracy: {:.2f}, Test Accuracy: {:.2f}".format(rbf_train_acc, rbf_test_acc)) # Example usage after training the model (replace with your specific logic) def predict_new_data(X_new): predictions = svm_model.predict(X_new) return predictions # Example usage X_new = URL([[1.5, 2.0]]) # Replace with your new data point predictions = predict_new_data(X_new) print("Predicted class:", predictions[0]) ### Training Data Electric_Vehicle_Population_Data.csv ### Testing Data, Factors & Metrics #### Testing Hyperparameters The code trains two SVMs: Linear SVM: Uses the 'linear' kernel. RBF SVM: Uses the 'rbf' kernel. #### Software Visual Studio - Python ## Model Card Contact Akiff313@URL
[ "# Model Card for Model ID\nThis code implements Support Vector Machines (SVMs) with two different kernels: linear and RBF. A model card should mention that the model is an SVM and potentially specify the available kernels.", "## Model Details\nThe code demonstrates how the model is trained using the SVC class from scikit-learn. A model card's training details section might mention scikit-learn as a training framework.", "### Model Description\nThis model is a Support Vector Machine (SVM) classifier implemented using scikit-learn. It can be used for binary classification tasks where the data can be separated by a hyperplane in a high-dimensional space. The model offers two kernel choices: linear and RBF (Radial Basis Function). The linear kernel is suitable for data that is already linearly separable, while the RBF kernel can handle non-linearly separable data by mapping it to a higher-dimensional space.\n\nHere are some key aspects of this model:\n\nClassification Task: Binary classification (separating data points into two classes).\nKernel Choices: Linear and RBF.\nImplementation Library: scikit-learn.\nAdditionally, consider including these details if relevant:\n\nLimitations of SVMs, such as potentially high computational cost for training large datasets or difficulty interpreting the model's decisions.\nSpecific use cases where this type of SVM might be suitable (e.g., image classification with low-dimensional data for linear kernel, or text classification for RBF kernel).\nRemember to replace or adjust the details based on your specific implementation and use case.", "### Model Sources [optional]\nAkif", "## Uses\nDirect Use\nThis SVM model can be directly used for binary classification tasks where the data can be separated by a hyperplane. Here are some potential applications:\nSpam filtering: Classifying emails as spam or not spam based on features like sender address, keywords, and content.\nImage categorization: Classifying images into two categories, such as cat vs. dog or handwritten digit recognition (classifying digits 0-9).\nSentiment analysis: Classifying text data as positive or negative sentiment.\nGeneral requirements for direct use:\n\nThe data needs to be well-defined with clear features that distinguish the two classes.\nThe data should be balanced, meaning there are roughly equal numbers of data points for each class.\nDownstream Use [optional]\n\nThis SVM model can also be a building block for more complex machine learning pipelines. Here's an example:\n\nYou could use this model as a first stage filter in a multi-class classification problem. The SVM could classify data points into broad categories, and then a separate model could handle further classification within those categories.\nGeneral requirements for downstream use:\n\nThe downstream task should benefit from the binary classification performed by the SVM.\nThe data used downstream should be compatible with the output of the SVM.\nOut-of-Scope Use\n\nWhile this SVM can be a powerful tool, it's essential to consider limitations:\n\nHigh dimensionality: The SVM might not perform well with very high-dimensional data due to the curse of dimensionality.\nNon-linear data: The linear kernel might not be suitable for data that is not linearly separable. In such cases, the RBF kernel or other kernel functions might be needed.\nImbalanced data: The model's performance can be skewed if the data has a significant class imbalance (one class having many more data points than the other).\nIt's important to avoid using this model for tasks where these limitations could significantly impact its effectiveness.", "### Direct Use\n\nThis SVM model can be directly applied to binary classification tasks where the data can be well-represented in a high-dimensional space and separated by a hyperplane. Here are some potential applications:\n\nSpam Filtering: Classifying emails as spam or not spam based on features like sender address, keywords, and content. This could be useful for personal email filtering or as a building block in more sophisticated spam filtering systems.\n\nImage Categorization: Classifying images into two broad categories, such as cat vs. dog or handwritten digit recognition (classifying digits 0-9). This could be used for simple image sorting tasks or as a preliminary step in more complex image recognition pipelines.\n\nSentiment Analysis: Classifying text data as positive or negative sentiment. This could be helpful for analyzing customer reviews, social media posts, or other textual data to understand overall sentiment.\n\nGeneral requirements for direct use:\n\nData Suitability: The data should have clear features that effectively distinguish the two classes the model is designed to separate. Features might be numerical or categorical, depending on the task.\nData Balance: Ideally, the data should be balanced, meaning there are roughly equal numbers of data points for each class (positive and negative). Imbalanced data can bias the model towards the majority class.\nInterpretability Needs: If you need to understand the model's reasoning behind its classifications, a linear kernel SVM might be preferable as it offers more interpretability compared to the RBF kernel.\nAdditional Considerations:\n\nSVMs can be computationally expensive to train for very large datasets. Consider this when dealing with massive amounts of data.\nWhile SVMs are powerful classifiers, they might not be the best choice for all binary classification problems. Explore other algorithms like decision trees or random forests if the data is highly complex or not easily separable by a hyperplane.", "### Out-of-Scope Use", "## Bias, Risks, and Limitations\n\nBias, Risks, and Limitations\n\nHere's a possible description for the \"Bias, Risks, and Limitations\" section of your model card:\n\nBias:\n\nTraining Data Bias: Like any machine learning model, this SVM is susceptible to bias present in the training data. If the training data is skewed towards one class or if certain features are not representative of the real world, the model's predictions can be biased.\nAlgorithmic Bias: SVMs themselves might exhibit bias depending on the kernel used. For instance, linear SVMs can struggle with non-linear data distributions, potentially favoring certain regions of the feature space.\nRisks:\n\nMisclassification: The model might misclassify data points, especially if the data is noisy or not well-separated. This can lead to errors in downstream applications.\nOverfitting: If the model is trained on a small dataset or with overly complex hyperparameters, it might overfit the training data and perform poorly on unseen data.\nLimitations:\n\nHigh Dimensionality: SVMs can become computationally expensive and less effective when dealing with very high-dimensional data due to the \"curse of dimensionality.\"\nNon-linear Data: The linear kernel SVM is limited to linearly separable data. For more complex, non-linear relationships, the RBF kernel might be necessary, but it can be less interpretable.\nImbalanced Data: The model's performance can be skewed if the data has a significant class imbalance (one class having many more data points than the other).\nGeneral Mitigation Strategies:\n\nUse high-quality, balanced training data that represents the real-world distribution of the target variable.\nCarefully select and tune hyperparameters to avoid overfitting.\nConsider using techniques like cross-validation to evaluate the model's generalizability.\nBe aware of the limitations of SVMs and choose alternative algorithms if the data is high-dimensional, non-linear, or imbalanced.\nIt's important to understand these potential biases, risks, and limitations before deploying this SVM model in real-world applications.", "### Recommendations\n\nRecommendations\n\nTo mitigate the potential biases, risks, and limitations discussed in the previous section, here are some recommendations for users of this SVM model:\n\nData Considerations:\n\nData Quality and Balance: Ensure the training data used for the SVM is high-quality, free from errors, and balanced between the two classes. Techniques like data cleaning and oversampling/undersampling can be used to address imbalances.\nData Representativeness: The training data should accurately represent the real-world distribution of data the model will encounter during deployment. Consider potential biases in data collection processes and explore mitigating strategies.\nModel Training and Evaluation:\n\nHyperparameter Tuning: Carefully tune the hyperparameters of the SVM (e.g., regularization parameter, kernel parameters) to achieve a good balance between training accuracy and generalization performance. Techniques like grid search or randomized search can be helpful.\nCross-Validation: Evaluate the model's performance using techniques like k-fold cross-validation to get a more robust estimate of its generalizability to unseen data.\nAlternative Models:\n\nConsider Alternatives: If the data is high-dimensional, non-linear, or imbalanced, explore alternative classification algorithms like decision trees, random forests, or gradient boosting that might be more suitable for such scenarios.\nMonitoring and Improvement:\n\nMonitor Performance: Continuously monitor the model's performance in deployment and retrain it with new data or adjusted hyperparameters if its accuracy degrades over time.\nAdditionally:\n\nDocument Biases: Document any identified biases in the training data or the model itself. This transparency is crucial for responsible model development and deployment.\nResponsible Use: Be aware of the potential societal impacts of using this model and ensure its application aligns with ethical considerations.\nBy following these recommendations, users can help mitigate the risks and limitations associated with this SVM model and promote its fair and effective use.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.\nimport numpy as np\nimport URL as plt\nfrom sklearn.model_selection import train_test_split\nfrom URL import SVC\nfrom sklearn.datasets import make_classification", "# Generate synthetic dataset\nX, y = make_classification(n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=42)", "# Split the dataset into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)", "# Support Vector Machine without kernel (linear kernel)\nsvm_linear = SVC(kernel='linear')\nsvm_linear.fit(X_train, y_train)\nlinear_train_acc = svm_linear.score(X_train, y_train)\nlinear_test_acc = svm_linear.score(X_test, y_test)", "# Support Vector Machine with radial basis function (RBF) kernel\nsvm_rbf = SVC(kernel='rbf')\nsvm_rbf.fit(X_train, y_train)\nrbf_train_acc = svm_rbf.score(X_train, y_train)\nrbf_test_acc = svm_rbf.score(X_test, y_test)", "# Visualize decision boundary for linear SVM\nURL(figsize=(10, 5))\nplt.subplot(1, 2, 1)\nplt.scatter(X[:, 0], X[:, 1], c=y, cmap='coolwarm', edgecolors='k', s=100)\nURL(\"Linear SVM\")\nURL(\"Feature 1\")\nURL(\"Feature 2\")", "# Plot decision boundary\nax = URL()\nxlim = ax.get_xlim()\nylim = ax.get_ylim()", "# Create grid to evaluate model\nxx = np.linspace(xlim[0], xlim[1], 30)\nyy = np.linspace(ylim[0], ylim[1], 30)\nYY, XX = np.meshgrid(yy, xx)\nxy = URL([URL(), URL()]).T\nZ = svm_linear.decision_function(xy).reshape(URL)", "# Plot decision boundary and margins\nax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--'])\nax.scatter(svm_linear.support_vectors_[:, 0], svm_linear.support_vectors_[:, 1], s=100,\n linewidth=1, facecolors='none', edgecolors='k')\n\nplt.subplot(1, 2, 2)\nplt.scatter(X[:, 0], X[:, 1], c=y, cmap='coolwarm', edgecolors='k', s=100)\nURL(\"RBF SVM\")\nURL(\"Feature 1\")\nURL(\"Feature 2\")", "# Plot decision boundary\nax = URL()\nxlim = ax.get_xlim()\nylim = ax.get_ylim()", "# Create grid to evaluate model\nxx = np.linspace(xlim[0], xlim[1], 30)\nyy = np.linspace(ylim[0], ylim[1], 30)\nYY, XX = np.meshgrid(yy, xx)\nxy = URL([URL(), URL()]).T\nZ = svm_rbf.decision_function(xy).reshape(URL)", "# Plot decision boundary and margins\nax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--'])\nax.scatter(svm_rbf.support_vectors_[:, 0], svm_rbf.support_vectors_[:, 1], s=100,\n linewidth=1, facecolors='none', edgecolors='k')\n\nplt.tight_layout()\nURL()", "# Print accuracy scores\nprint(\"Linear SVM - Training Accuracy: {:.2f}, Test Accuracy: {:.2f}\".format(linear_train_acc, linear_test_acc))\nprint(\"RBF SVM - Training Accuracy: {:.2f}, Test Accuracy: {:.2f}\".format(rbf_train_acc, rbf_test_acc))", "# Example usage after training the model (replace with your specific logic)\ndef predict_new_data(X_new):\n predictions = svm_model.predict(X_new)\n return predictions", "# Example usage\nX_new = URL([[1.5, 2.0]]) # Replace with your new data point\npredictions = predict_new_data(X_new)\nprint(\"Predicted class:\", predictions[0])", "### Training Data\n\nElectric_Vehicle_Population_Data.csv", "### Testing Data, Factors & Metrics", "#### Testing Hyperparameters\n\nThe code trains two SVMs:\n\nLinear SVM: Uses the 'linear' kernel.\nRBF SVM: Uses the 'rbf' kernel.", "#### Software\n\nVisual Studio - Python", "## Model Card Contact\n\nAkiff313@URL" ]
[ "TAGS\n#region-us \n", "# Model Card for Model ID\nThis code implements Support Vector Machines (SVMs) with two different kernels: linear and RBF. A model card should mention that the model is an SVM and potentially specify the available kernels.", "## Model Details\nThe code demonstrates how the model is trained using the SVC class from scikit-learn. A model card's training details section might mention scikit-learn as a training framework.", "### Model Description\nThis model is a Support Vector Machine (SVM) classifier implemented using scikit-learn. It can be used for binary classification tasks where the data can be separated by a hyperplane in a high-dimensional space. The model offers two kernel choices: linear and RBF (Radial Basis Function). The linear kernel is suitable for data that is already linearly separable, while the RBF kernel can handle non-linearly separable data by mapping it to a higher-dimensional space.\n\nHere are some key aspects of this model:\n\nClassification Task: Binary classification (separating data points into two classes).\nKernel Choices: Linear and RBF.\nImplementation Library: scikit-learn.\nAdditionally, consider including these details if relevant:\n\nLimitations of SVMs, such as potentially high computational cost for training large datasets or difficulty interpreting the model's decisions.\nSpecific use cases where this type of SVM might be suitable (e.g., image classification with low-dimensional data for linear kernel, or text classification for RBF kernel).\nRemember to replace or adjust the details based on your specific implementation and use case.", "### Model Sources [optional]\nAkif", "## Uses\nDirect Use\nThis SVM model can be directly used for binary classification tasks where the data can be separated by a hyperplane. Here are some potential applications:\nSpam filtering: Classifying emails as spam or not spam based on features like sender address, keywords, and content.\nImage categorization: Classifying images into two categories, such as cat vs. dog or handwritten digit recognition (classifying digits 0-9).\nSentiment analysis: Classifying text data as positive or negative sentiment.\nGeneral requirements for direct use:\n\nThe data needs to be well-defined with clear features that distinguish the two classes.\nThe data should be balanced, meaning there are roughly equal numbers of data points for each class.\nDownstream Use [optional]\n\nThis SVM model can also be a building block for more complex machine learning pipelines. Here's an example:\n\nYou could use this model as a first stage filter in a multi-class classification problem. The SVM could classify data points into broad categories, and then a separate model could handle further classification within those categories.\nGeneral requirements for downstream use:\n\nThe downstream task should benefit from the binary classification performed by the SVM.\nThe data used downstream should be compatible with the output of the SVM.\nOut-of-Scope Use\n\nWhile this SVM can be a powerful tool, it's essential to consider limitations:\n\nHigh dimensionality: The SVM might not perform well with very high-dimensional data due to the curse of dimensionality.\nNon-linear data: The linear kernel might not be suitable for data that is not linearly separable. In such cases, the RBF kernel or other kernel functions might be needed.\nImbalanced data: The model's performance can be skewed if the data has a significant class imbalance (one class having many more data points than the other).\nIt's important to avoid using this model for tasks where these limitations could significantly impact its effectiveness.", "### Direct Use\n\nThis SVM model can be directly applied to binary classification tasks where the data can be well-represented in a high-dimensional space and separated by a hyperplane. Here are some potential applications:\n\nSpam Filtering: Classifying emails as spam or not spam based on features like sender address, keywords, and content. This could be useful for personal email filtering or as a building block in more sophisticated spam filtering systems.\n\nImage Categorization: Classifying images into two broad categories, such as cat vs. dog or handwritten digit recognition (classifying digits 0-9). This could be used for simple image sorting tasks or as a preliminary step in more complex image recognition pipelines.\n\nSentiment Analysis: Classifying text data as positive or negative sentiment. This could be helpful for analyzing customer reviews, social media posts, or other textual data to understand overall sentiment.\n\nGeneral requirements for direct use:\n\nData Suitability: The data should have clear features that effectively distinguish the two classes the model is designed to separate. Features might be numerical or categorical, depending on the task.\nData Balance: Ideally, the data should be balanced, meaning there are roughly equal numbers of data points for each class (positive and negative). Imbalanced data can bias the model towards the majority class.\nInterpretability Needs: If you need to understand the model's reasoning behind its classifications, a linear kernel SVM might be preferable as it offers more interpretability compared to the RBF kernel.\nAdditional Considerations:\n\nSVMs can be computationally expensive to train for very large datasets. Consider this when dealing with massive amounts of data.\nWhile SVMs are powerful classifiers, they might not be the best choice for all binary classification problems. Explore other algorithms like decision trees or random forests if the data is highly complex or not easily separable by a hyperplane.", "### Out-of-Scope Use", "## Bias, Risks, and Limitations\n\nBias, Risks, and Limitations\n\nHere's a possible description for the \"Bias, Risks, and Limitations\" section of your model card:\n\nBias:\n\nTraining Data Bias: Like any machine learning model, this SVM is susceptible to bias present in the training data. If the training data is skewed towards one class or if certain features are not representative of the real world, the model's predictions can be biased.\nAlgorithmic Bias: SVMs themselves might exhibit bias depending on the kernel used. For instance, linear SVMs can struggle with non-linear data distributions, potentially favoring certain regions of the feature space.\nRisks:\n\nMisclassification: The model might misclassify data points, especially if the data is noisy or not well-separated. This can lead to errors in downstream applications.\nOverfitting: If the model is trained on a small dataset or with overly complex hyperparameters, it might overfit the training data and perform poorly on unseen data.\nLimitations:\n\nHigh Dimensionality: SVMs can become computationally expensive and less effective when dealing with very high-dimensional data due to the \"curse of dimensionality.\"\nNon-linear Data: The linear kernel SVM is limited to linearly separable data. For more complex, non-linear relationships, the RBF kernel might be necessary, but it can be less interpretable.\nImbalanced Data: The model's performance can be skewed if the data has a significant class imbalance (one class having many more data points than the other).\nGeneral Mitigation Strategies:\n\nUse high-quality, balanced training data that represents the real-world distribution of the target variable.\nCarefully select and tune hyperparameters to avoid overfitting.\nConsider using techniques like cross-validation to evaluate the model's generalizability.\nBe aware of the limitations of SVMs and choose alternative algorithms if the data is high-dimensional, non-linear, or imbalanced.\nIt's important to understand these potential biases, risks, and limitations before deploying this SVM model in real-world applications.", "### Recommendations\n\nRecommendations\n\nTo mitigate the potential biases, risks, and limitations discussed in the previous section, here are some recommendations for users of this SVM model:\n\nData Considerations:\n\nData Quality and Balance: Ensure the training data used for the SVM is high-quality, free from errors, and balanced between the two classes. Techniques like data cleaning and oversampling/undersampling can be used to address imbalances.\nData Representativeness: The training data should accurately represent the real-world distribution of data the model will encounter during deployment. Consider potential biases in data collection processes and explore mitigating strategies.\nModel Training and Evaluation:\n\nHyperparameter Tuning: Carefully tune the hyperparameters of the SVM (e.g., regularization parameter, kernel parameters) to achieve a good balance between training accuracy and generalization performance. Techniques like grid search or randomized search can be helpful.\nCross-Validation: Evaluate the model's performance using techniques like k-fold cross-validation to get a more robust estimate of its generalizability to unseen data.\nAlternative Models:\n\nConsider Alternatives: If the data is high-dimensional, non-linear, or imbalanced, explore alternative classification algorithms like decision trees, random forests, or gradient boosting that might be more suitable for such scenarios.\nMonitoring and Improvement:\n\nMonitor Performance: Continuously monitor the model's performance in deployment and retrain it with new data or adjusted hyperparameters if its accuracy degrades over time.\nAdditionally:\n\nDocument Biases: Document any identified biases in the training data or the model itself. This transparency is crucial for responsible model development and deployment.\nResponsible Use: Be aware of the potential societal impacts of using this model and ensure its application aligns with ethical considerations.\nBy following these recommendations, users can help mitigate the risks and limitations associated with this SVM model and promote its fair and effective use.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.\nimport numpy as np\nimport URL as plt\nfrom sklearn.model_selection import train_test_split\nfrom URL import SVC\nfrom sklearn.datasets import make_classification", "# Generate synthetic dataset\nX, y = make_classification(n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=42)", "# Split the dataset into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)", "# Support Vector Machine without kernel (linear kernel)\nsvm_linear = SVC(kernel='linear')\nsvm_linear.fit(X_train, y_train)\nlinear_train_acc = svm_linear.score(X_train, y_train)\nlinear_test_acc = svm_linear.score(X_test, y_test)", "# Support Vector Machine with radial basis function (RBF) kernel\nsvm_rbf = SVC(kernel='rbf')\nsvm_rbf.fit(X_train, y_train)\nrbf_train_acc = svm_rbf.score(X_train, y_train)\nrbf_test_acc = svm_rbf.score(X_test, y_test)", "# Visualize decision boundary for linear SVM\nURL(figsize=(10, 5))\nplt.subplot(1, 2, 1)\nplt.scatter(X[:, 0], X[:, 1], c=y, cmap='coolwarm', edgecolors='k', s=100)\nURL(\"Linear SVM\")\nURL(\"Feature 1\")\nURL(\"Feature 2\")", "# Plot decision boundary\nax = URL()\nxlim = ax.get_xlim()\nylim = ax.get_ylim()", "# Create grid to evaluate model\nxx = np.linspace(xlim[0], xlim[1], 30)\nyy = np.linspace(ylim[0], ylim[1], 30)\nYY, XX = np.meshgrid(yy, xx)\nxy = URL([URL(), URL()]).T\nZ = svm_linear.decision_function(xy).reshape(URL)", "# Plot decision boundary and margins\nax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--'])\nax.scatter(svm_linear.support_vectors_[:, 0], svm_linear.support_vectors_[:, 1], s=100,\n linewidth=1, facecolors='none', edgecolors='k')\n\nplt.subplot(1, 2, 2)\nplt.scatter(X[:, 0], X[:, 1], c=y, cmap='coolwarm', edgecolors='k', s=100)\nURL(\"RBF SVM\")\nURL(\"Feature 1\")\nURL(\"Feature 2\")", "# Plot decision boundary\nax = URL()\nxlim = ax.get_xlim()\nylim = ax.get_ylim()", "# Create grid to evaluate model\nxx = np.linspace(xlim[0], xlim[1], 30)\nyy = np.linspace(ylim[0], ylim[1], 30)\nYY, XX = np.meshgrid(yy, xx)\nxy = URL([URL(), URL()]).T\nZ = svm_rbf.decision_function(xy).reshape(URL)", "# Plot decision boundary and margins\nax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--'])\nax.scatter(svm_rbf.support_vectors_[:, 0], svm_rbf.support_vectors_[:, 1], s=100,\n linewidth=1, facecolors='none', edgecolors='k')\n\nplt.tight_layout()\nURL()", "# Print accuracy scores\nprint(\"Linear SVM - Training Accuracy: {:.2f}, Test Accuracy: {:.2f}\".format(linear_train_acc, linear_test_acc))\nprint(\"RBF SVM - Training Accuracy: {:.2f}, Test Accuracy: {:.2f}\".format(rbf_train_acc, rbf_test_acc))", "# Example usage after training the model (replace with your specific logic)\ndef predict_new_data(X_new):\n predictions = svm_model.predict(X_new)\n return predictions", "# Example usage\nX_new = URL([[1.5, 2.0]]) # Replace with your new data point\npredictions = predict_new_data(X_new)\nprint(\"Predicted class:\", predictions[0])", "### Training Data\n\nElectric_Vehicle_Population_Data.csv", "### Testing Data, Factors & Metrics", "#### Testing Hyperparameters\n\nThe code trains two SVMs:\n\nLinear SVM: Uses the 'linear' kernel.\nRBF SVM: Uses the 'rbf' kernel.", "#### Software\n\nVisual Studio - Python", "## Model Card Contact\n\nAkiff313@URL" ]
[ 5, 47, 42, 234, 10, 400, 382, 9, 437, 389, 64, 60, 50, 81, 91, 105, 32, 107, 207, 32, 108, 137, 93, 41, 54, 15, 10, 40, 9, 12 ]
[ "TAGS\n#region-us \n# Model Card for Model ID\nThis code implements Support Vector Machines (SVMs) with two different kernels: linear and RBF. A model card should mention that the model is an SVM and potentially specify the available kernels.## Model Details\nThe code demonstrates how the model is trained using the SVC class from scikit-learn. A model card's training details section might mention scikit-learn as a training framework.### Model Description\nThis model is a Support Vector Machine (SVM) classifier implemented using scikit-learn. It can be used for binary classification tasks where the data can be separated by a hyperplane in a high-dimensional space. The model offers two kernel choices: linear and RBF (Radial Basis Function). The linear kernel is suitable for data that is already linearly separable, while the RBF kernel can handle non-linearly separable data by mapping it to a higher-dimensional space.\n\nHere are some key aspects of this model:\n\nClassification Task: Binary classification (separating data points into two classes).\nKernel Choices: Linear and RBF.\nImplementation Library: scikit-learn.\nAdditionally, consider including these details if relevant:\n\nLimitations of SVMs, such as potentially high computational cost for training large datasets or difficulty interpreting the model's decisions.\nSpecific use cases where this type of SVM might be suitable (e.g., image classification with low-dimensional data for linear kernel, or text classification for RBF kernel).\nRemember to replace or adjust the details based on your specific implementation and use case.### Model Sources [optional]\nAkif## Uses\nDirect Use\nThis SVM model can be directly used for binary classification tasks where the data can be separated by a hyperplane. Here are some potential applications:\nSpam filtering: Classifying emails as spam or not spam based on features like sender address, keywords, and content.\nImage categorization: Classifying images into two categories, such as cat vs. dog or handwritten digit recognition (classifying digits 0-9).\nSentiment analysis: Classifying text data as positive or negative sentiment.\nGeneral requirements for direct use:\n\nThe data needs to be well-defined with clear features that distinguish the two classes.\nThe data should be balanced, meaning there are roughly equal numbers of data points for each class.\nDownstream Use [optional]\n\nThis SVM model can also be a building block for more complex machine learning pipelines. Here's an example:\n\nYou could use this model as a first stage filter in a multi-class classification problem. The SVM could classify data points into broad categories, and then a separate model could handle further classification within those categories.\nGeneral requirements for downstream use:\n\nThe downstream task should benefit from the binary classification performed by the SVM.\nThe data used downstream should be compatible with the output of the SVM.\nOut-of-Scope Use\n\nWhile this SVM can be a powerful tool, it's essential to consider limitations:\n\nHigh dimensionality: The SVM might not perform well with very high-dimensional data due to the curse of dimensionality.\nNon-linear data: The linear kernel might not be suitable for data that is not linearly separable. In such cases, the RBF kernel or other kernel functions might be needed.\nImbalanced data: The model's performance can be skewed if the data has a significant class imbalance (one class having many more data points than the other).\nIt's important to avoid using this model for tasks where these limitations could significantly impact its effectiveness.### Direct Use\n\nThis SVM model can be directly applied to binary classification tasks where the data can be well-represented in a high-dimensional space and separated by a hyperplane. Here are some potential applications:\n\nSpam Filtering: Classifying emails as spam or not spam based on features like sender address, keywords, and content. This could be useful for personal email filtering or as a building block in more sophisticated spam filtering systems.\n\nImage Categorization: Classifying images into two broad categories, such as cat vs. dog or handwritten digit recognition (classifying digits 0-9). This could be used for simple image sorting tasks or as a preliminary step in more complex image recognition pipelines.\n\nSentiment Analysis: Classifying text data as positive or negative sentiment. This could be helpful for analyzing customer reviews, social media posts, or other textual data to understand overall sentiment.\n\nGeneral requirements for direct use:\n\nData Suitability: The data should have clear features that effectively distinguish the two classes the model is designed to separate. Features might be numerical or categorical, depending on the task.\nData Balance: Ideally, the data should be balanced, meaning there are roughly equal numbers of data points for each class (positive and negative). Imbalanced data can bias the model towards the majority class.\nInterpretability Needs: If you need to understand the model's reasoning behind its classifications, a linear kernel SVM might be preferable as it offers more interpretability compared to the RBF kernel.\nAdditional Considerations:\n\nSVMs can be computationally expensive to train for very large datasets. Consider this when dealing with massive amounts of data.\nWhile SVMs are powerful classifiers, they might not be the best choice for all binary classification problems. Explore other algorithms like decision trees or random forests if the data is highly complex or not easily separable by a hyperplane.### Out-of-Scope Use## Bias, Risks, and Limitations\n\nBias, Risks, and Limitations\n\nHere's a possible description for the \"Bias, Risks, and Limitations\" section of your model card:\n\nBias:\n\nTraining Data Bias: Like any machine learning model, this SVM is susceptible to bias present in the training data. If the training data is skewed towards one class or if certain features are not representative of the real world, the model's predictions can be biased.\nAlgorithmic Bias: SVMs themselves might exhibit bias depending on the kernel used. For instance, linear SVMs can struggle with non-linear data distributions, potentially favoring certain regions of the feature space.\nRisks:\n\nMisclassification: The model might misclassify data points, especially if the data is noisy or not well-separated. This can lead to errors in downstream applications.\nOverfitting: If the model is trained on a small dataset or with overly complex hyperparameters, it might overfit the training data and perform poorly on unseen data.\nLimitations:\n\nHigh Dimensionality: SVMs can become computationally expensive and less effective when dealing with very high-dimensional data due to the \"curse of dimensionality.\"\nNon-linear Data: The linear kernel SVM is limited to linearly separable data. For more complex, non-linear relationships, the RBF kernel might be necessary, but it can be less interpretable.\nImbalanced Data: The model's performance can be skewed if the data has a significant class imbalance (one class having many more data points than the other).\nGeneral Mitigation Strategies:\n\nUse high-quality, balanced training data that represents the real-world distribution of the target variable.\nCarefully select and tune hyperparameters to avoid overfitting.\nConsider using techniques like cross-validation to evaluate the model's generalizability.\nBe aware of the limitations of SVMs and choose alternative algorithms if the data is high-dimensional, non-linear, or imbalanced.\nIt's important to understand these potential biases, risks, and limitations before deploying this SVM model in real-world applications.### Recommendations\n\nRecommendations\n\nTo mitigate the potential biases, risks, and limitations discussed in the previous section, here are some recommendations for users of this SVM model:\n\nData Considerations:\n\nData Quality and Balance: Ensure the training data used for the SVM is high-quality, free from errors, and balanced between the two classes. Techniques like data cleaning and oversampling/undersampling can be used to address imbalances.\nData Representativeness: The training data should accurately represent the real-world distribution of data the model will encounter during deployment. Consider potential biases in data collection processes and explore mitigating strategies.\nModel Training and Evaluation:\n\nHyperparameter Tuning: Carefully tune the hyperparameters of the SVM (e.g., regularization parameter, kernel parameters) to achieve a good balance between training accuracy and generalization performance. Techniques like grid search or randomized search can be helpful.\nCross-Validation: Evaluate the model's performance using techniques like k-fold cross-validation to get a more robust estimate of its generalizability to unseen data.\nAlternative Models:\n\nConsider Alternatives: If the data is high-dimensional, non-linear, or imbalanced, explore alternative classification algorithms like decision trees, random forests, or gradient boosting that might be more suitable for such scenarios.\nMonitoring and Improvement:\n\nMonitor Performance: Continuously monitor the model's performance in deployment and retrain it with new data or adjusted hyperparameters if its accuracy degrades over time.\nAdditionally:\n\nDocument Biases: Document any identified biases in the training data or the model itself. This transparency is crucial for responsible model development and deployment.\nResponsible Use: Be aware of the potential societal impacts of using this model and ensure its application aligns with ethical considerations.\nBy following these recommendations, users can help mitigate the risks and limitations associated with this SVM model and promote its fair and effective use.## How to Get Started with the Model\n\nUse the code below to get started with the model.\nimport numpy as np\nimport URL as plt\nfrom sklearn.model_selection import train_test_split\nfrom URL import SVC\nfrom sklearn.datasets import make_classification# Generate synthetic dataset\nX, y = make_classification(n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=42)# Split the dataset into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)# Support Vector Machine without kernel (linear kernel)\nsvm_linear = SVC(kernel='linear')\nsvm_linear.fit(X_train, y_train)\nlinear_train_acc = svm_linear.score(X_train, y_train)\nlinear_test_acc = svm_linear.score(X_test, y_test)# Support Vector Machine with radial basis function (RBF) kernel\nsvm_rbf = SVC(kernel='rbf')\nsvm_rbf.fit(X_train, y_train)\nrbf_train_acc = svm_rbf.score(X_train, y_train)\nrbf_test_acc = svm_rbf.score(X_test, y_test)# Visualize decision boundary for linear SVM\nURL(figsize=(10, 5))\nplt.subplot(1, 2, 1)\nplt.scatter(X[:, 0], X[:, 1], c=y, cmap='coolwarm', edgecolors='k', s=100)\nURL(\"Linear SVM\")\nURL(\"Feature 1\")\nURL(\"Feature 2\")# Plot decision boundary\nax = URL()\nxlim = ax.get_xlim()\nylim = ax.get_ylim()# Create grid to evaluate model\nxx = np.linspace(xlim[0], xlim[1], 30)\nyy = np.linspace(ylim[0], ylim[1], 30)\nYY, XX = np.meshgrid(yy, xx)\nxy = URL([URL(), URL()]).T\nZ = svm_linear.decision_function(xy).reshape(URL)# Plot decision boundary and margins\nax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--'])\nax.scatter(svm_linear.support_vectors_[:, 0], svm_linear.support_vectors_[:, 1], s=100,\n linewidth=1, facecolors='none', edgecolors='k')\n\nplt.subplot(1, 2, 2)\nplt.scatter(X[:, 0], X[:, 1], c=y, cmap='coolwarm', edgecolors='k', s=100)\nURL(\"RBF SVM\")\nURL(\"Feature 1\")\nURL(\"Feature 2\")# Plot decision boundary\nax = URL()\nxlim = ax.get_xlim()\nylim = ax.get_ylim()# Create grid to evaluate model\nxx = np.linspace(xlim[0], xlim[1], 30)\nyy = np.linspace(ylim[0], ylim[1], 30)\nYY, XX = np.meshgrid(yy, xx)\nxy = URL([URL(), URL()]).T\nZ = svm_rbf.decision_function(xy).reshape(URL)# Plot decision boundary and margins\nax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--'])\nax.scatter(svm_rbf.support_vectors_[:, 0], svm_rbf.support_vectors_[:, 1], s=100,\n linewidth=1, facecolors='none', edgecolors='k')\n\nplt.tight_layout()\nURL()# Print accuracy scores\nprint(\"Linear SVM - Training Accuracy: {:.2f}, Test Accuracy: {:.2f}\".format(linear_train_acc, linear_test_acc))\nprint(\"RBF SVM - Training Accuracy: {:.2f}, Test Accuracy: {:.2f}\".format(rbf_train_acc, rbf_test_acc))# Example usage after training the model (replace with your specific logic)\ndef predict_new_data(X_new):\n predictions = svm_model.predict(X_new)\n return predictions# Example usage\nX_new = URL([[1.5, 2.0]]) # Replace with your new data point\npredictions = predict_new_data(X_new)\nprint(\"Predicted class:\", predictions[0])### Training Data\n\nElectric_Vehicle_Population_Data.csv### Testing Data, Factors & Metrics#### Testing Hyperparameters\n\nThe code trains two SVMs:\n\nLinear SVM: Uses the 'linear' kernel.\nRBF SVM: Uses the 'rbf' kernel.#### Software\n\nVisual Studio - Python## Model Card Contact\n\nAkiff313@URL" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K4me2-seqsight_32768_512_30M-L1_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K4me2](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K4me2) dataset. It achieves the following results on the evaluation set: - Loss: 0.6031 - F1 Score: 0.6608 - Accuracy: 0.6631 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6575 | 1.04 | 200 | 0.6330 | 0.6290 | 0.6494 | | 0.6292 | 2.08 | 400 | 0.6402 | 0.6381 | 0.6354 | | 0.6232 | 3.12 | 600 | 0.6226 | 0.6552 | 0.6644 | | 0.6211 | 4.17 | 800 | 0.6234 | 0.6564 | 0.6588 | | 0.6211 | 5.21 | 1000 | 0.6321 | 0.6407 | 0.6383 | | 0.6172 | 6.25 | 1200 | 0.6235 | 0.6562 | 0.6569 | | 0.6115 | 7.29 | 1400 | 0.6385 | 0.6413 | 0.6386 | | 0.6146 | 8.33 | 1600 | 0.6308 | 0.6486 | 0.6471 | | 0.613 | 9.38 | 1800 | 0.6341 | 0.6422 | 0.6396 | | 0.6114 | 10.42 | 2000 | 0.6247 | 0.6487 | 0.6481 | | 0.6126 | 11.46 | 2200 | 0.6182 | 0.6578 | 0.6601 | | 0.6066 | 12.5 | 2400 | 0.6252 | 0.6533 | 0.6533 | | 0.6057 | 13.54 | 2600 | 0.6179 | 0.6597 | 0.6637 | | 0.6096 | 14.58 | 2800 | 0.6155 | 0.6599 | 0.6654 | | 0.609 | 15.62 | 3000 | 0.6201 | 0.6517 | 0.6536 | | 0.6048 | 16.67 | 3200 | 0.6248 | 0.6520 | 0.6514 | | 0.6053 | 17.71 | 3400 | 0.6165 | 0.6619 | 0.6654 | | 0.6042 | 18.75 | 3600 | 0.6202 | 0.6526 | 0.6533 | | 0.6058 | 19.79 | 3800 | 0.6191 | 0.6552 | 0.6556 | | 0.5999 | 20.83 | 4000 | 0.6295 | 0.6545 | 0.6523 | | 0.603 | 21.88 | 4200 | 0.6291 | 0.6500 | 0.6481 | | 0.602 | 22.92 | 4400 | 0.6283 | 0.6527 | 0.6507 | | 0.6012 | 23.96 | 4600 | 0.6303 | 0.6523 | 0.6500 | | 0.6001 | 25.0 | 4800 | 0.6210 | 0.6581 | 0.6579 | | 0.6001 | 26.04 | 5000 | 0.6215 | 0.6582 | 0.6575 | | 0.6007 | 27.08 | 5200 | 0.6239 | 0.6571 | 0.6559 | | 0.5995 | 28.12 | 5400 | 0.6180 | 0.6592 | 0.6592 | | 0.6006 | 29.17 | 5600 | 0.6224 | 0.6565 | 0.6549 | | 0.5955 | 30.21 | 5800 | 0.6266 | 0.6581 | 0.6566 | | 0.599 | 31.25 | 6000 | 0.6228 | 0.6594 | 0.6582 | | 0.5979 | 32.29 | 6200 | 0.6203 | 0.6604 | 0.6595 | | 0.5957 | 33.33 | 6400 | 0.6253 | 0.6591 | 0.6575 | | 0.5987 | 34.38 | 6600 | 0.6141 | 0.6647 | 0.6667 | | 0.5951 | 35.42 | 6800 | 0.6181 | 0.6620 | 0.6621 | | 0.5965 | 36.46 | 7000 | 0.6154 | 0.6632 | 0.6644 | | 0.5937 | 37.5 | 7200 | 0.6227 | 0.6565 | 0.6553 | | 0.5943 | 38.54 | 7400 | 0.6190 | 0.6629 | 0.6628 | | 0.5991 | 39.58 | 7600 | 0.6188 | 0.6627 | 0.6624 | | 0.596 | 40.62 | 7800 | 0.6263 | 0.6566 | 0.6546 | | 0.5932 | 41.67 | 8000 | 0.6229 | 0.6598 | 0.6582 | | 0.5933 | 42.71 | 8200 | 0.6159 | 0.6631 | 0.6647 | | 0.5952 | 43.75 | 8400 | 0.6189 | 0.6620 | 0.6618 | | 0.5955 | 44.79 | 8600 | 0.6236 | 0.6585 | 0.6572 | | 0.5914 | 45.83 | 8800 | 0.6193 | 0.6607 | 0.6605 | | 0.595 | 46.88 | 9000 | 0.6241 | 0.6624 | 0.6608 | | 0.5925 | 47.92 | 9200 | 0.6216 | 0.6605 | 0.6595 | | 0.5937 | 48.96 | 9400 | 0.6204 | 0.6612 | 0.6605 | | 0.5929 | 50.0 | 9600 | 0.6214 | 0.6611 | 0.6601 | | 0.5922 | 51.04 | 9800 | 0.6216 | 0.6608 | 0.6598 | | 0.5916 | 52.08 | 10000 | 0.6214 | 0.6604 | 0.6595 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K4me2-seqsight_32768_512_30M-L1_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K4me2-seqsight_32768_512_30M-L1_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:07:25+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K4me2-seqsight\_32768\_512\_30M-L1\_f ================================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K4me2 dataset. It achieves the following results on the evaluation set: * Loss: 0.6031 * F1 Score: 0.6608 * Accuracy: 0.6631 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K4me2-seqsight_32768_512_30M-L8_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K4me2](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K4me2) dataset. It achieves the following results on the evaluation set: - Loss: 0.5980 - F1 Score: 0.6814 - Accuracy: 0.6820 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.651 | 1.04 | 200 | 0.6255 | 0.6438 | 0.6553 | | 0.622 | 2.08 | 400 | 0.6356 | 0.6361 | 0.6334 | | 0.6165 | 3.12 | 600 | 0.6170 | 0.6501 | 0.6660 | | 0.6148 | 4.17 | 800 | 0.6253 | 0.6527 | 0.6530 | | 0.6136 | 5.21 | 1000 | 0.6172 | 0.6597 | 0.6618 | | 0.6088 | 6.25 | 1200 | 0.6159 | 0.6636 | 0.6686 | | 0.6019 | 7.29 | 1400 | 0.6237 | 0.6572 | 0.6559 | | 0.604 | 8.33 | 1600 | 0.6284 | 0.6570 | 0.6553 | | 0.5998 | 9.38 | 1800 | 0.6439 | 0.6466 | 0.6442 | | 0.5968 | 10.42 | 2000 | 0.6108 | 0.6591 | 0.6611 | | 0.5993 | 11.46 | 2200 | 0.6101 | 0.6627 | 0.6660 | | 0.5902 | 12.5 | 2400 | 0.6159 | 0.6649 | 0.6660 | | 0.5879 | 13.54 | 2600 | 0.6134 | 0.6637 | 0.6650 | | 0.59 | 14.58 | 2800 | 0.6106 | 0.6691 | 0.6722 | | 0.5897 | 15.62 | 3000 | 0.6170 | 0.6682 | 0.6680 | | 0.584 | 16.67 | 3200 | 0.6184 | 0.6671 | 0.6657 | | 0.5818 | 17.71 | 3400 | 0.6140 | 0.6724 | 0.6722 | | 0.5827 | 18.75 | 3600 | 0.6075 | 0.6756 | 0.6777 | | 0.5826 | 19.79 | 3800 | 0.6120 | 0.6771 | 0.6774 | | 0.576 | 20.83 | 4000 | 0.6182 | 0.6697 | 0.6680 | | 0.579 | 21.88 | 4200 | 0.6227 | 0.6610 | 0.6588 | | 0.5766 | 22.92 | 4400 | 0.6199 | 0.6693 | 0.6676 | | 0.574 | 23.96 | 4600 | 0.6246 | 0.6611 | 0.6588 | | 0.5733 | 25.0 | 4800 | 0.6145 | 0.6750 | 0.6745 | | 0.5718 | 26.04 | 5000 | 0.6181 | 0.6714 | 0.6706 | | 0.5735 | 27.08 | 5200 | 0.6164 | 0.6725 | 0.6712 | | 0.571 | 28.12 | 5400 | 0.6126 | 0.6756 | 0.6748 | | 0.5714 | 29.17 | 5600 | 0.6112 | 0.6778 | 0.6774 | | 0.5653 | 30.21 | 5800 | 0.6211 | 0.6701 | 0.6686 | | 0.5691 | 31.25 | 6000 | 0.6191 | 0.6703 | 0.6686 | | 0.5659 | 32.29 | 6200 | 0.6093 | 0.6780 | 0.6787 | | 0.5674 | 33.33 | 6400 | 0.6177 | 0.6727 | 0.6716 | | 0.5646 | 34.38 | 6600 | 0.6116 | 0.6733 | 0.6735 | | 0.5638 | 35.42 | 6800 | 0.6152 | 0.6729 | 0.6722 | | 0.564 | 36.46 | 7000 | 0.6129 | 0.6750 | 0.6748 | | 0.5601 | 37.5 | 7200 | 0.6183 | 0.6719 | 0.6703 | | 0.5607 | 38.54 | 7400 | 0.6166 | 0.6745 | 0.6735 | | 0.566 | 39.58 | 7600 | 0.6112 | 0.6797 | 0.6804 | | 0.5596 | 40.62 | 7800 | 0.6226 | 0.6673 | 0.6654 | | 0.5599 | 41.67 | 8000 | 0.6182 | 0.6694 | 0.6676 | | 0.5576 | 42.71 | 8200 | 0.6144 | 0.6782 | 0.6787 | | 0.5594 | 43.75 | 8400 | 0.6165 | 0.6723 | 0.6716 | | 0.5617 | 44.79 | 8600 | 0.6195 | 0.6689 | 0.6673 | | 0.556 | 45.83 | 8800 | 0.6149 | 0.6732 | 0.6729 | | 0.5588 | 46.88 | 9000 | 0.6242 | 0.6629 | 0.6608 | | 0.5548 | 47.92 | 9200 | 0.6218 | 0.6665 | 0.6647 | | 0.5566 | 48.96 | 9400 | 0.6186 | 0.6722 | 0.6712 | | 0.5551 | 50.0 | 9600 | 0.6230 | 0.6675 | 0.6657 | | 0.5542 | 51.04 | 9800 | 0.6213 | 0.6676 | 0.6660 | | 0.5549 | 52.08 | 10000 | 0.6214 | 0.6679 | 0.6663 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K4me2-seqsight_32768_512_30M-L8_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K4me2-seqsight_32768_512_30M-L8_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:08:40+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K4me2-seqsight\_32768\_512\_30M-L8\_f ================================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K4me2 dataset. It achieves the following results on the evaluation set: * Loss: 0.5980 * F1 Score: 0.6814 * Accuracy: 0.6820 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
fruk19/hidaka_model
null
[ "transformers", "safetensors", "vision-encoder-decoder", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:09:05+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #vision-encoder-decoder #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #vision-encoder-decoder #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 35, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #vision-encoder-decoder #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # aligner-v1-llama3-01 This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.4361 - Rewards/chosen: -0.0331 - Rewards/rejected: -0.0349 - Rewards/accuracies: 0.8333 - Rewards/margins: 0.0018 - Logps/rejected: -0.3493 - Logps/chosen: -0.3313 - Logits/rejected: -1.5592 - Logits/chosen: -1.5485 - Nll Loss: 1.3699 - Log Odds Ratio: -0.6618 - Log Odds Chosen: 0.0646 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 8e-06 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 10 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen | Nll Loss | Log Odds Ratio | Log Odds Chosen | |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|:--------:|:--------------:|:---------------:| | 2.8628 | 0.2105 | 15 | 2.7068 | -0.1219 | -0.1183 | 0.0 | -0.0036 | -1.1830 | -1.2191 | -1.8565 | -1.8340 | 2.6349 | -0.7190 | -0.0509 | | 2.1044 | 0.4211 | 30 | 2.0553 | -0.0702 | -0.0687 | 0.1667 | -0.0015 | -0.6871 | -0.7024 | -1.6352 | -1.6218 | 1.9845 | -0.7082 | -0.0296 | | 1.6915 | 0.6316 | 45 | 1.6323 | -0.0431 | -0.0436 | 0.8333 | 0.0006 | -0.4364 | -0.4305 | -1.6833 | -1.6715 | 1.5639 | -0.6842 | 0.0185 | | 1.4279 | 0.8421 | 60 | 1.4361 | -0.0331 | -0.0349 | 0.8333 | 0.0018 | -0.3493 | -0.3313 | -1.5592 | -1.5485 | 1.3699 | -0.6618 | 0.0646 | ### Framework versions - PEFT 0.10.0 - Transformers 4.40.1 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "other", "library_name": "peft", "tags": ["trl", "orpo", "generated_from_trainer"], "base_model": "meta-llama/Meta-Llama-3-8B", "model-index": [{"name": "aligner-v1-llama3-01", "results": []}]}
Ksgk-fy/aligner-v1-llama3-01
null
[ "peft", "safetensors", "trl", "orpo", "generated_from_trainer", "base_model:meta-llama/Meta-Llama-3-8B", "license:other", "region:us" ]
null
2024-04-30T04:14:00+00:00
[]
[]
TAGS #peft #safetensors #trl #orpo #generated_from_trainer #base_model-meta-llama/Meta-Llama-3-8B #license-other #region-us
aligner-v1-llama3-01 ==================== This model is a fine-tuned version of meta-llama/Meta-Llama-3-8B on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 1.4361 * Rewards/chosen: -0.0331 * Rewards/rejected: -0.0349 * Rewards/accuracies: 0.8333 * Rewards/margins: 0.0018 * Logps/rejected: -0.3493 * Logps/chosen: -0.3313 * Logits/rejected: -1.5592 * Logits/chosen: -1.5485 * Nll Loss: 1.3699 * Log Odds Ratio: -0.6618 * Log Odds Chosen: 0.0646 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 8e-06 * train\_batch\_size: 2 * eval\_batch\_size: 2 * seed: 42 * gradient\_accumulation\_steps: 4 * total\_train\_batch\_size: 8 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * lr\_scheduler\_warmup\_steps: 10 * num\_epochs: 1 ### Training results ### Framework versions * PEFT 0.10.0 * Transformers 4.40.1 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 8e-06\n* train\\_batch\\_size: 2\n* eval\\_batch\\_size: 2\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 8\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 10\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* PEFT 0.10.0\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#peft #safetensors #trl #orpo #generated_from_trainer #base_model-meta-llama/Meta-Llama-3-8B #license-other #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 8e-06\n* train\\_batch\\_size: 2\n* eval\\_batch\\_size: 2\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 8\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 10\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* PEFT 0.10.0\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ 47, 140, 5, 52 ]
[ "TAGS\n#peft #safetensors #trl #orpo #generated_from_trainer #base_model-meta-llama/Meta-Llama-3-8B #license-other #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 8e-06\n* train\\_batch\\_size: 2\n* eval\\_batch\\_size: 2\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 8\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 10\n* num\\_epochs: 1### Training results### Framework versions\n\n\n* PEFT 0.10.0\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K4me2-seqsight_32768_512_30M-L32_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K4me2](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K4me2) dataset. It achieves the following results on the evaluation set: - Loss: 0.5996 - F1 Score: 0.6706 - Accuracy: 0.6735 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6466 | 1.04 | 200 | 0.6215 | 0.6422 | 0.6605 | | 0.6183 | 2.08 | 400 | 0.6391 | 0.6355 | 0.6328 | | 0.6117 | 3.12 | 600 | 0.6127 | 0.6578 | 0.6657 | | 0.6078 | 4.17 | 800 | 0.6438 | 0.6425 | 0.6399 | | 0.603 | 5.21 | 1000 | 0.6093 | 0.6701 | 0.6751 | | 0.5969 | 6.25 | 1200 | 0.6119 | 0.6672 | 0.6683 | | 0.5883 | 7.29 | 1400 | 0.6109 | 0.6679 | 0.6686 | | 0.5884 | 8.33 | 1600 | 0.6236 | 0.6656 | 0.6637 | | 0.5821 | 9.38 | 1800 | 0.6329 | 0.6546 | 0.6520 | | 0.5761 | 10.42 | 2000 | 0.6073 | 0.6728 | 0.6777 | | 0.5791 | 11.46 | 2200 | 0.6121 | 0.6784 | 0.6804 | | 0.5684 | 12.5 | 2400 | 0.6158 | 0.6737 | 0.6745 | | 0.5641 | 13.54 | 2600 | 0.6354 | 0.6682 | 0.6663 | | 0.5652 | 14.58 | 2800 | 0.6163 | 0.6714 | 0.6722 | | 0.5626 | 15.62 | 3000 | 0.6323 | 0.6655 | 0.6637 | | 0.5534 | 16.67 | 3200 | 0.6317 | 0.6589 | 0.6569 | | 0.5497 | 17.71 | 3400 | 0.6289 | 0.6551 | 0.6527 | | 0.5498 | 18.75 | 3600 | 0.6250 | 0.6695 | 0.6680 | | 0.549 | 19.79 | 3800 | 0.6511 | 0.6484 | 0.6458 | | 0.5396 | 20.83 | 4000 | 0.6248 | 0.6676 | 0.6660 | | 0.541 | 21.88 | 4200 | 0.6431 | 0.6582 | 0.6556 | | 0.535 | 22.92 | 4400 | 0.6522 | 0.6578 | 0.6553 | | 0.5304 | 23.96 | 4600 | 0.6437 | 0.6591 | 0.6566 | | 0.5291 | 25.0 | 4800 | 0.6536 | 0.6485 | 0.6458 | | 0.5236 | 26.04 | 5000 | 0.6384 | 0.6638 | 0.6618 | | 0.5239 | 27.08 | 5200 | 0.6368 | 0.6631 | 0.6608 | | 0.5184 | 28.12 | 5400 | 0.6363 | 0.6645 | 0.6624 | | 0.5166 | 29.17 | 5600 | 0.6427 | 0.6558 | 0.6533 | | 0.5082 | 30.21 | 5800 | 0.6549 | 0.6571 | 0.6549 | | 0.512 | 31.25 | 6000 | 0.6498 | 0.6568 | 0.6543 | | 0.5049 | 32.29 | 6200 | 0.6523 | 0.6551 | 0.6527 | | 0.5058 | 33.33 | 6400 | 0.6637 | 0.6555 | 0.6530 | | 0.5015 | 34.38 | 6600 | 0.6583 | 0.6577 | 0.6556 | | 0.5006 | 35.42 | 6800 | 0.6677 | 0.6462 | 0.6435 | | 0.4944 | 36.46 | 7000 | 0.6683 | 0.6509 | 0.6484 | | 0.4917 | 37.5 | 7200 | 0.6583 | 0.6577 | 0.6553 | | 0.4896 | 38.54 | 7400 | 0.6636 | 0.6519 | 0.6494 | | 0.4953 | 39.58 | 7600 | 0.6492 | 0.6679 | 0.6676 | | 0.4872 | 40.62 | 7800 | 0.6830 | 0.6417 | 0.6393 | | 0.4851 | 41.67 | 8000 | 0.6629 | 0.6530 | 0.6504 | | 0.4814 | 42.71 | 8200 | 0.6695 | 0.6476 | 0.6452 | | 0.4805 | 43.75 | 8400 | 0.6657 | 0.6546 | 0.6520 | | 0.4817 | 44.79 | 8600 | 0.6747 | 0.6540 | 0.6514 | | 0.4768 | 45.83 | 8800 | 0.6611 | 0.6608 | 0.6588 | | 0.4798 | 46.88 | 9000 | 0.6750 | 0.6527 | 0.6500 | | 0.4736 | 47.92 | 9200 | 0.6768 | 0.6533 | 0.6507 | | 0.4757 | 48.96 | 9400 | 0.6713 | 0.6570 | 0.6546 | | 0.4723 | 50.0 | 9600 | 0.6803 | 0.6533 | 0.6507 | | 0.4724 | 51.04 | 9800 | 0.6813 | 0.6530 | 0.6504 | | 0.4699 | 52.08 | 10000 | 0.6809 | 0.6540 | 0.6514 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K4me2-seqsight_32768_512_30M-L32_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K4me2-seqsight_32768_512_30M-L32_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:15:12+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K4me2-seqsight\_32768\_512\_30M-L32\_f ================================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K4me2 dataset. It achieves the following results on the evaluation set: * Loss: 0.5996 * F1 Score: 0.6706 * Accuracy: 0.6735 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K9ac-seqsight_32768_512_30M-L1_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K9ac](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K9ac) dataset. It achieves the following results on the evaluation set: - Loss: 0.5056 - F1 Score: 0.7602 - Accuracy: 0.7600 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6135 | 1.15 | 200 | 0.6051 | 0.6809 | 0.6837 | | 0.568 | 2.3 | 400 | 0.6276 | 0.6593 | 0.6693 | | 0.549 | 3.45 | 600 | 0.6638 | 0.6411 | 0.6556 | | 0.5469 | 4.6 | 800 | 0.6112 | 0.6717 | 0.6797 | | 0.5401 | 5.75 | 1000 | 0.5847 | 0.7014 | 0.7046 | | 0.5366 | 6.9 | 1200 | 0.5835 | 0.7028 | 0.7053 | | 0.5323 | 8.05 | 1400 | 0.5841 | 0.6957 | 0.6992 | | 0.5291 | 9.2 | 1600 | 0.6242 | 0.6636 | 0.6743 | | 0.5265 | 10.34 | 1800 | 0.5708 | 0.7085 | 0.7100 | | 0.5274 | 11.49 | 2000 | 0.5831 | 0.6970 | 0.7003 | | 0.5156 | 12.64 | 2200 | 0.6086 | 0.6808 | 0.6884 | | 0.5236 | 13.79 | 2400 | 0.5860 | 0.6945 | 0.6992 | | 0.5169 | 14.94 | 2600 | 0.5761 | 0.6992 | 0.7028 | | 0.5151 | 16.09 | 2800 | 0.5545 | 0.7202 | 0.7208 | | 0.5141 | 17.24 | 3000 | 0.5720 | 0.7112 | 0.7136 | | 0.5113 | 18.39 | 3200 | 0.5723 | 0.7048 | 0.7082 | | 0.5117 | 19.54 | 3400 | 0.5575 | 0.7116 | 0.7132 | | 0.5076 | 20.69 | 3600 | 0.5581 | 0.7156 | 0.7172 | | 0.504 | 21.84 | 3800 | 0.5462 | 0.7226 | 0.7233 | | 0.5049 | 22.99 | 4000 | 0.5607 | 0.7121 | 0.7139 | | 0.5039 | 24.14 | 4200 | 0.5326 | 0.7287 | 0.7283 | | 0.4962 | 25.29 | 4400 | 0.5532 | 0.7228 | 0.7236 | | 0.5032 | 26.44 | 4600 | 0.5572 | 0.7174 | 0.7190 | | 0.4971 | 27.59 | 4800 | 0.5615 | 0.7163 | 0.7182 | | 0.498 | 28.74 | 5000 | 0.5526 | 0.7201 | 0.7218 | | 0.5028 | 29.89 | 5200 | 0.5424 | 0.7264 | 0.7269 | | 0.4958 | 31.03 | 5400 | 0.5537 | 0.7191 | 0.7208 | | 0.4966 | 32.18 | 5600 | 0.5343 | 0.7258 | 0.7254 | | 0.49 | 33.33 | 5800 | 0.5416 | 0.7284 | 0.7283 | | 0.5015 | 34.48 | 6000 | 0.5405 | 0.7266 | 0.7269 | | 0.491 | 35.63 | 6200 | 0.5331 | 0.7275 | 0.7272 | | 0.4951 | 36.78 | 6400 | 0.5474 | 0.7234 | 0.7244 | | 0.4922 | 37.93 | 6600 | 0.5390 | 0.7253 | 0.7254 | | 0.4898 | 39.08 | 6800 | 0.5376 | 0.7285 | 0.7287 | | 0.4914 | 40.23 | 7000 | 0.5362 | 0.7279 | 0.7280 | | 0.4917 | 41.38 | 7200 | 0.5389 | 0.7275 | 0.7280 | | 0.4898 | 42.53 | 7400 | 0.5419 | 0.7280 | 0.7287 | | 0.4915 | 43.68 | 7600 | 0.5351 | 0.7296 | 0.7298 | | 0.4878 | 44.83 | 7800 | 0.5439 | 0.7257 | 0.7265 | | 0.4901 | 45.98 | 8000 | 0.5424 | 0.7277 | 0.7283 | | 0.4884 | 47.13 | 8200 | 0.5406 | 0.7268 | 0.7272 | | 0.4885 | 48.28 | 8400 | 0.5398 | 0.7269 | 0.7272 | | 0.4862 | 49.43 | 8600 | 0.5342 | 0.7296 | 0.7294 | | 0.4879 | 50.57 | 8800 | 0.5392 | 0.7285 | 0.7287 | | 0.488 | 51.72 | 9000 | 0.5415 | 0.7299 | 0.7305 | | 0.484 | 52.87 | 9200 | 0.5435 | 0.7290 | 0.7298 | | 0.4876 | 54.02 | 9400 | 0.5411 | 0.7292 | 0.7298 | | 0.4855 | 55.17 | 9600 | 0.5401 | 0.7289 | 0.7294 | | 0.4856 | 56.32 | 9800 | 0.5371 | 0.7309 | 0.7312 | | 0.4865 | 57.47 | 10000 | 0.5389 | 0.7301 | 0.7305 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K9ac-seqsight_32768_512_30M-L1_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K9ac-seqsight_32768_512_30M-L1_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:16:01+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K9ac-seqsight\_32768\_512\_30M-L1\_f ================================================ This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K9ac dataset. It achieves the following results on the evaluation set: * Loss: 0.5056 * F1 Score: 0.7602 * Accuracy: 0.7600 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
kyounghyun/EEVE-Korean-Instruct-2.8B-v1.0-20240430-2
null
[ "transformers", "safetensors", "phi", "text-generation", "conversational", "custom_code", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "region:us" ]
null
2024-04-30T04:16:37+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #phi #text-generation #conversational #custom_code #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #phi #text-generation #conversational #custom_code #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 54, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #phi #text-generation #conversational #custom_code #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K9ac-seqsight_32768_512_30M-L8_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K9ac](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K9ac) dataset. It achieves the following results on the evaluation set: - Loss: 0.4849 - F1 Score: 0.7717 - Accuracy: 0.7711 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.5939 | 1.15 | 200 | 0.5881 | 0.6942 | 0.6963 | | 0.5479 | 2.3 | 400 | 0.6719 | 0.6133 | 0.6351 | | 0.5277 | 3.45 | 600 | 0.6097 | 0.6835 | 0.6902 | | 0.5221 | 4.6 | 800 | 0.5649 | 0.7057 | 0.7082 | | 0.5142 | 5.75 | 1000 | 0.5473 | 0.7279 | 0.7283 | | 0.508 | 6.9 | 1200 | 0.5458 | 0.7246 | 0.7247 | | 0.5044 | 8.05 | 1400 | 0.5579 | 0.7147 | 0.7164 | | 0.4986 | 9.2 | 1600 | 0.5739 | 0.6996 | 0.7049 | | 0.4961 | 10.34 | 1800 | 0.5555 | 0.7246 | 0.7258 | | 0.4955 | 11.49 | 2000 | 0.5400 | 0.7335 | 0.7337 | | 0.4864 | 12.64 | 2200 | 0.5728 | 0.7042 | 0.7092 | | 0.491 | 13.79 | 2400 | 0.5279 | 0.7362 | 0.7362 | | 0.485 | 14.94 | 2600 | 0.5321 | 0.7306 | 0.7312 | | 0.4823 | 16.09 | 2800 | 0.5368 | 0.7319 | 0.7330 | | 0.4823 | 17.24 | 3000 | 0.5420 | 0.7306 | 0.7319 | | 0.476 | 18.39 | 3200 | 0.5328 | 0.7314 | 0.7316 | | 0.479 | 19.54 | 3400 | 0.5289 | 0.7342 | 0.7348 | | 0.4729 | 20.69 | 3600 | 0.5216 | 0.7380 | 0.7377 | | 0.4698 | 21.84 | 3800 | 0.5466 | 0.7280 | 0.7301 | | 0.4707 | 22.99 | 4000 | 0.5451 | 0.7244 | 0.7269 | | 0.4704 | 24.14 | 4200 | 0.5234 | 0.7425 | 0.7424 | | 0.4603 | 25.29 | 4400 | 0.5452 | 0.7390 | 0.7398 | | 0.467 | 26.44 | 4600 | 0.5324 | 0.7397 | 0.7398 | | 0.4605 | 27.59 | 4800 | 0.5406 | 0.7382 | 0.7391 | | 0.4615 | 28.74 | 5000 | 0.5333 | 0.7400 | 0.7406 | | 0.4659 | 29.89 | 5200 | 0.5364 | 0.7409 | 0.7413 | | 0.46 | 31.03 | 5400 | 0.5299 | 0.7435 | 0.7438 | | 0.4581 | 32.18 | 5600 | 0.5224 | 0.7462 | 0.7460 | | 0.4506 | 33.33 | 5800 | 0.5321 | 0.7459 | 0.7456 | | 0.4614 | 34.48 | 6000 | 0.5286 | 0.7471 | 0.7470 | | 0.451 | 35.63 | 6200 | 0.5194 | 0.7483 | 0.7478 | | 0.4568 | 36.78 | 6400 | 0.5335 | 0.7438 | 0.7442 | | 0.4509 | 37.93 | 6600 | 0.5496 | 0.7319 | 0.7330 | | 0.4498 | 39.08 | 6800 | 0.5295 | 0.7479 | 0.7478 | | 0.4497 | 40.23 | 7000 | 0.5345 | 0.7447 | 0.7449 | | 0.4491 | 41.38 | 7200 | 0.5461 | 0.7390 | 0.7398 | | 0.4512 | 42.53 | 7400 | 0.5352 | 0.7394 | 0.7398 | | 0.4487 | 43.68 | 7600 | 0.5305 | 0.7478 | 0.7478 | | 0.4472 | 44.83 | 7800 | 0.5382 | 0.7427 | 0.7431 | | 0.4482 | 45.98 | 8000 | 0.5231 | 0.7488 | 0.7485 | | 0.4459 | 47.13 | 8200 | 0.5408 | 0.7394 | 0.7398 | | 0.4456 | 48.28 | 8400 | 0.5319 | 0.7460 | 0.7460 | | 0.4418 | 49.43 | 8600 | 0.5314 | 0.7468 | 0.7467 | | 0.4449 | 50.57 | 8800 | 0.5351 | 0.7463 | 0.7463 | | 0.4438 | 51.72 | 9000 | 0.5454 | 0.7432 | 0.7438 | | 0.4416 | 52.87 | 9200 | 0.5444 | 0.7409 | 0.7416 | | 0.4431 | 54.02 | 9400 | 0.5426 | 0.7393 | 0.7398 | | 0.441 | 55.17 | 9600 | 0.5405 | 0.7413 | 0.7416 | | 0.4411 | 56.32 | 9800 | 0.5347 | 0.7437 | 0.7438 | | 0.4409 | 57.47 | 10000 | 0.5362 | 0.7444 | 0.7445 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K9ac-seqsight_32768_512_30M-L8_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K9ac-seqsight_32768_512_30M-L8_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:16:47+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K9ac-seqsight\_32768\_512\_30M-L8\_f ================================================ This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K9ac dataset. It achieves the following results on the evaluation set: * Loss: 0.4849 * F1 Score: 0.7717 * Accuracy: 0.7711 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
automatic-speech-recognition
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-vivos This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.5366 - Wer: 0.3320 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 7.987 | 0.66 | 500 | 3.5460 | 1.0 | | 3.4026 | 1.31 | 1000 | 3.0685 | 1.0 | | 1.6402 | 1.97 | 1500 | 0.7959 | 0.7082 | | 0.8229 | 2.62 | 2000 | 0.5581 | 0.5326 | | 0.6392 | 3.28 | 2500 | 0.4779 | 0.4738 | | 0.5532 | 3.94 | 3000 | 0.4415 | 0.4491 | | 0.4937 | 4.59 | 3500 | 0.4318 | 0.4312 | | 0.4506 | 5.25 | 4000 | 0.4284 | 0.4134 | | 0.4099 | 5.91 | 4500 | 0.4405 | 0.4267 | | 0.3848 | 6.56 | 5000 | 0.4097 | 0.3987 | | 0.3683 | 7.22 | 5500 | 0.4239 | 0.4031 | | 0.3485 | 7.87 | 6000 | 0.4383 | 0.3926 | | 0.3313 | 8.53 | 6500 | 0.4779 | 0.3846 | | 0.321 | 9.19 | 7000 | 0.4623 | 0.3895 | | 0.3058 | 9.84 | 7500 | 0.4668 | 0.3906 | | 0.2869 | 10.5 | 8000 | 0.4817 | 0.3749 | | 0.2828 | 11.15 | 8500 | 0.4777 | 0.3789 | | 0.2724 | 11.81 | 9000 | 0.4915 | 0.3649 | | 0.2527 | 12.47 | 9500 | 0.4671 | 0.3670 | | 0.2588 | 13.12 | 10000 | 0.4693 | 0.3612 | | 0.2405 | 13.78 | 10500 | 0.4375 | 0.3579 | | 0.2409 | 14.44 | 11000 | 0.4643 | 0.3595 | | 0.2247 | 15.09 | 11500 | 0.5445 | 0.3626 | | 0.2257 | 15.75 | 12000 | 0.4474 | 0.3513 | | 0.2101 | 16.4 | 12500 | 0.4327 | 0.3502 | | 0.2118 | 17.06 | 13000 | 0.4830 | 0.3534 | | 0.1991 | 17.72 | 13500 | 0.4832 | 0.3454 | | 0.193 | 18.37 | 14000 | 0.4878 | 0.3547 | | 0.1909 | 19.03 | 14500 | 0.4777 | 0.3506 | | 0.1869 | 19.69 | 15000 | 0.4722 | 0.3455 | | 0.1801 | 20.34 | 15500 | 0.4891 | 0.3477 | | 0.1749 | 21.0 | 16000 | 0.5065 | 0.3446 | | 0.1715 | 21.65 | 16500 | 0.5381 | 0.3447 | | 0.1669 | 22.31 | 17000 | 0.4946 | 0.3459 | | 0.1674 | 22.97 | 17500 | 0.4968 | 0.3425 | | 0.1579 | 23.62 | 18000 | 0.5210 | 0.3370 | | 0.1566 | 24.28 | 18500 | 0.5318 | 0.3385 | | 0.1565 | 24.93 | 19000 | 0.4959 | 0.3381 | | 0.1517 | 25.59 | 19500 | 0.5181 | 0.3393 | | 0.1452 | 26.25 | 20000 | 0.5222 | 0.3359 | | 0.1419 | 26.9 | 20500 | 0.5316 | 0.3333 | | 0.1389 | 27.56 | 21000 | 0.5094 | 0.3302 | | 0.1422 | 28.22 | 21500 | 0.5327 | 0.3346 | | 0.1365 | 28.87 | 22000 | 0.5436 | 0.3320 | | 0.1337 | 29.53 | 22500 | 0.5366 | 0.3320 | ### Framework versions - Transformers 4.39.3 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["wer"], "base_model": "facebook/wav2vec2-xls-r-300m", "model-index": [{"name": "wav2vec2-base-vivos", "results": []}]}
Lasion/wav2vec2-xls-r-300m-vivos
null
[ "transformers", "tensorboard", "safetensors", "wav2vec2", "automatic-speech-recognition", "generated_from_trainer", "base_model:facebook/wav2vec2-xls-r-300m", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:19:12+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #wav2vec2 #automatic-speech-recognition #generated_from_trainer #base_model-facebook/wav2vec2-xls-r-300m #license-apache-2.0 #endpoints_compatible #region-us
wav2vec2-base-vivos =================== This model is a fine-tuned version of facebook/wav2vec2-xls-r-300m on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.5366 * Wer: 0.3320 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0001 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * lr\_scheduler\_warmup\_steps: 1000 * num\_epochs: 30 * mixed\_precision\_training: Native AMP ### Training results ### Framework versions * Transformers 4.39.3 * Pytorch 2.1.2 * Datasets 2.18.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0001\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 1000\n* num\\_epochs: 30\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.3\n* Pytorch 2.1.2\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #wav2vec2 #automatic-speech-recognition #generated_from_trainer #base_model-facebook/wav2vec2-xls-r-300m #license-apache-2.0 #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0001\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 1000\n* num\\_epochs: 30\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.3\n* Pytorch 2.1.2\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
[ 67, 128, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #wav2vec2 #automatic-speech-recognition #generated_from_trainer #base_model-facebook/wav2vec2-xls-r-300m #license-apache-2.0 #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0001\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 1000\n* num\\_epochs: 30\n* mixed\\_precision\\_training: Native AMP### Training results### Framework versions\n\n\n* Transformers 4.39.3\n* Pytorch 2.1.2\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
null
transformers
# Uploaded model - **Developed by:** arvnoodle - **License:** apache-2.0 - **Finetuned from model :** unsloth/Phi-3-mini-4k-instruct This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "mistral", "trl"], "base_model": "unsloth/Phi-3-mini-4k-instruct"}
arvnoodle/hcl-phi3-it-3b-xml-json
null
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "mistral", "trl", "en", "base_model:unsloth/Phi-3-mini-4k-instruct", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:19:50+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #text-generation-inference #unsloth #mistral #trl #en #base_model-unsloth/Phi-3-mini-4k-instruct #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: arvnoodle - License: apache-2.0 - Finetuned from model : unsloth/Phi-3-mini-4k-instruct This mistral model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: arvnoodle\n- License: apache-2.0\n- Finetuned from model : unsloth/Phi-3-mini-4k-instruct\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #mistral #trl #en #base_model-unsloth/Phi-3-mini-4k-instruct #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: arvnoodle\n- License: apache-2.0\n- Finetuned from model : unsloth/Phi-3-mini-4k-instruct\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ 62, 79 ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #mistral #trl #en #base_model-unsloth/Phi-3-mini-4k-instruct #license-apache-2.0 #endpoints_compatible #region-us \n# Uploaded model\n\n- Developed by: arvnoodle\n- License: apache-2.0\n- Finetuned from model : unsloth/Phi-3-mini-4k-instruct\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
cilantro9246/7chhuyp
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T04:19:50+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 47, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
shallow6414/2p8jfrv
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T04:20:10+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 47, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
transformers
# TooManyMix_LLM TooManyMix_LLM is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [jdqwoi/TooManyMixed-LLM_01](https://huggingface.co/jdqwoi/TooManyMixed-LLM_01) * [jdqwoi/TooManyMixed-LLM_02](https://huggingface.co/jdqwoi/TooManyMixed-LLM_02) ## 🧩 Configuration ```yaml slices: - sources: - model: jdqwoi/TooManyMixed-LLM_01 layer_range: [0, 32] - model: jdqwoi/TooManyMixed-LLM_02 layer_range: [0, 32] merge_method: slerp base_model: jdqwoi/TooManyMixed-LLM_01 parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "jdqwoi/TooManyMix_LLM" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
{"tags": ["merge", "mergekit", "lazymergekit", "jdqwoi/TooManyMixed-LLM_01", "jdqwoi/TooManyMixed-LLM_02"], "base_model": ["jdqwoi/TooManyMixed-LLM_01", "jdqwoi/TooManyMixed-LLM_02"]}
jdqwoi/TooManyMix_LLM
null
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "jdqwoi/TooManyMixed-LLM_01", "jdqwoi/TooManyMixed-LLM_02", "base_model:jdqwoi/TooManyMixed-LLM_01", "base_model:jdqwoi/TooManyMixed-LLM_02", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T04:21:02+00:00
[]
[]
TAGS #transformers #safetensors #mistral #text-generation #merge #mergekit #lazymergekit #jdqwoi/TooManyMixed-LLM_01 #jdqwoi/TooManyMixed-LLM_02 #base_model-jdqwoi/TooManyMixed-LLM_01 #base_model-jdqwoi/TooManyMixed-LLM_02 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# TooManyMix_LLM TooManyMix_LLM is a merge of the following models using LazyMergekit: * jdqwoi/TooManyMixed-LLM_01 * jdqwoi/TooManyMixed-LLM_02 ## Configuration ## Usage
[ "# TooManyMix_LLM\n\nTooManyMix_LLM is a merge of the following models using LazyMergekit:\n* jdqwoi/TooManyMixed-LLM_01\n* jdqwoi/TooManyMixed-LLM_02", "## Configuration", "## Usage" ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #merge #mergekit #lazymergekit #jdqwoi/TooManyMixed-LLM_01 #jdqwoi/TooManyMixed-LLM_02 #base_model-jdqwoi/TooManyMixed-LLM_01 #base_model-jdqwoi/TooManyMixed-LLM_02 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# TooManyMix_LLM\n\nTooManyMix_LLM is a merge of the following models using LazyMergekit:\n* jdqwoi/TooManyMixed-LLM_01\n* jdqwoi/TooManyMixed-LLM_02", "## Configuration", "## Usage" ]
[ 116, 60, 3, 3 ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #merge #mergekit #lazymergekit #jdqwoi/TooManyMixed-LLM_01 #jdqwoi/TooManyMixed-LLM_02 #base_model-jdqwoi/TooManyMixed-LLM_01 #base_model-jdqwoi/TooManyMixed-LLM_02 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# TooManyMix_LLM\n\nTooManyMix_LLM is a merge of the following models using LazyMergekit:\n* jdqwoi/TooManyMixed-LLM_01\n* jdqwoi/TooManyMixed-LLM_02## Configuration## Usage" ]
text-classification
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
atsizelti/turkish_org_classifier_16k
null
[ "transformers", "safetensors", "bert", "text-classification", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:21:43+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #bert #text-classification #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #bert #text-classification #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 37, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #bert #text-classification #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
MohammadOthman/OpenHermes-2.5-Mistral-7B-Orca-DPO
null
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T04:21:48+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #mistral #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 47, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K9ac-seqsight_32768_512_30M-L32_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K9ac](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K9ac) dataset. It achieves the following results on the evaluation set: - Loss: 0.5096 - F1 Score: 0.7734 - Accuracy: 0.7729 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.5817 | 1.15 | 200 | 0.5913 | 0.6887 | 0.6927 | | 0.5339 | 2.3 | 400 | 0.6205 | 0.6522 | 0.6657 | | 0.5132 | 3.45 | 600 | 0.5668 | 0.7166 | 0.7186 | | 0.5061 | 4.6 | 800 | 0.5335 | 0.7329 | 0.7334 | | 0.4999 | 5.75 | 1000 | 0.5319 | 0.7328 | 0.7330 | | 0.4901 | 6.9 | 1200 | 0.5360 | 0.7338 | 0.7341 | | 0.487 | 8.05 | 1400 | 0.5536 | 0.7241 | 0.7262 | | 0.4783 | 9.2 | 1600 | 0.5589 | 0.7149 | 0.7186 | | 0.4752 | 10.34 | 1800 | 0.5505 | 0.7303 | 0.7319 | | 0.4725 | 11.49 | 2000 | 0.5447 | 0.7361 | 0.7370 | | 0.4628 | 12.64 | 2200 | 0.5480 | 0.7317 | 0.7334 | | 0.4647 | 13.79 | 2400 | 0.5368 | 0.7442 | 0.7452 | | 0.4587 | 14.94 | 2600 | 0.5183 | 0.7536 | 0.7531 | | 0.4518 | 16.09 | 2800 | 0.5482 | 0.7373 | 0.7388 | | 0.4483 | 17.24 | 3000 | 0.5362 | 0.7485 | 0.7492 | | 0.4448 | 18.39 | 3200 | 0.5329 | 0.7519 | 0.7521 | | 0.4423 | 19.54 | 3400 | 0.5261 | 0.7483 | 0.7481 | | 0.4362 | 20.69 | 3600 | 0.5187 | 0.7569 | 0.7564 | | 0.4329 | 21.84 | 3800 | 0.5539 | 0.7347 | 0.7370 | | 0.4273 | 22.99 | 4000 | 0.5805 | 0.7244 | 0.7280 | | 0.4263 | 24.14 | 4200 | 0.5338 | 0.7522 | 0.7521 | | 0.4153 | 25.29 | 4400 | 0.5495 | 0.7534 | 0.7535 | | 0.4194 | 26.44 | 4600 | 0.5493 | 0.7572 | 0.7571 | | 0.4125 | 27.59 | 4800 | 0.5311 | 0.7544 | 0.7546 | | 0.4093 | 28.74 | 5000 | 0.5474 | 0.7483 | 0.7488 | | 0.4153 | 29.89 | 5200 | 0.5588 | 0.7436 | 0.7438 | | 0.4062 | 31.03 | 5400 | 0.5699 | 0.7401 | 0.7413 | | 0.4034 | 32.18 | 5600 | 0.5563 | 0.7472 | 0.7478 | | 0.3941 | 33.33 | 5800 | 0.5614 | 0.7547 | 0.7546 | | 0.4054 | 34.48 | 6000 | 0.5466 | 0.7500 | 0.7499 | | 0.3897 | 35.63 | 6200 | 0.5369 | 0.7565 | 0.7560 | | 0.3964 | 36.78 | 6400 | 0.5498 | 0.7498 | 0.7499 | | 0.3841 | 37.93 | 6600 | 0.5737 | 0.7442 | 0.7449 | | 0.3878 | 39.08 | 6800 | 0.5691 | 0.7422 | 0.7424 | | 0.3843 | 40.23 | 7000 | 0.5700 | 0.7392 | 0.7398 | | 0.3824 | 41.38 | 7200 | 0.5768 | 0.7391 | 0.7398 | | 0.3807 | 42.53 | 7400 | 0.5628 | 0.7473 | 0.7474 | | 0.3792 | 43.68 | 7600 | 0.5603 | 0.7478 | 0.7478 | | 0.3783 | 44.83 | 7800 | 0.5697 | 0.7431 | 0.7434 | | 0.3768 | 45.98 | 8000 | 0.5539 | 0.7477 | 0.7474 | | 0.3742 | 47.13 | 8200 | 0.5758 | 0.7421 | 0.7424 | | 0.3746 | 48.28 | 8400 | 0.5785 | 0.7392 | 0.7395 | | 0.3716 | 49.43 | 8600 | 0.5693 | 0.7489 | 0.7488 | | 0.3702 | 50.57 | 8800 | 0.5805 | 0.7424 | 0.7427 | | 0.3675 | 51.72 | 9000 | 0.5923 | 0.7381 | 0.7388 | | 0.369 | 52.87 | 9200 | 0.5896 | 0.7385 | 0.7391 | | 0.3655 | 54.02 | 9400 | 0.5891 | 0.7405 | 0.7409 | | 0.3646 | 55.17 | 9600 | 0.5869 | 0.7422 | 0.7427 | | 0.3627 | 56.32 | 9800 | 0.5785 | 0.7466 | 0.7467 | | 0.3617 | 57.47 | 10000 | 0.5803 | 0.7440 | 0.7442 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K9ac-seqsight_32768_512_30M-L32_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K9ac-seqsight_32768_512_30M-L32_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:21:52+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K9ac-seqsight\_32768\_512\_30M-L32\_f ================================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K9ac dataset. It achieves the following results on the evaluation set: * Loss: 0.5096 * F1 Score: 0.7734 * Accuracy: 0.7729 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K4me3-seqsight_32768_512_30M-L1_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K4me3](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K4me3) dataset. It achieves the following results on the evaluation set: - Loss: 0.6279 - F1 Score: 0.6600 - Accuracy: 0.6614 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6724 | 0.87 | 200 | 0.6653 | 0.6048 | 0.6076 | | 0.6521 | 1.74 | 400 | 0.6557 | 0.6151 | 0.6158 | | 0.6529 | 2.61 | 600 | 0.6530 | 0.6176 | 0.6177 | | 0.6433 | 3.48 | 800 | 0.6474 | 0.6226 | 0.6223 | | 0.645 | 4.35 | 1000 | 0.6444 | 0.6231 | 0.6239 | | 0.6426 | 5.22 | 1200 | 0.6666 | 0.5973 | 0.6065 | | 0.6386 | 6.09 | 1400 | 0.6468 | 0.6261 | 0.6266 | | 0.6351 | 6.96 | 1600 | 0.6556 | 0.6185 | 0.6204 | | 0.6332 | 7.83 | 1800 | 0.6383 | 0.6358 | 0.6359 | | 0.6323 | 8.7 | 2000 | 0.6411 | 0.6274 | 0.6274 | | 0.6294 | 9.57 | 2200 | 0.6424 | 0.6283 | 0.6296 | | 0.6282 | 10.43 | 2400 | 0.6412 | 0.6303 | 0.6310 | | 0.6257 | 11.3 | 2600 | 0.6514 | 0.6153 | 0.6198 | | 0.6248 | 12.17 | 2800 | 0.6429 | 0.6282 | 0.6291 | | 0.6216 | 13.04 | 3000 | 0.6408 | 0.6285 | 0.6304 | | 0.6217 | 13.91 | 3200 | 0.6472 | 0.6253 | 0.6299 | | 0.6203 | 14.78 | 3400 | 0.6342 | 0.6275 | 0.6283 | | 0.6178 | 15.65 | 3600 | 0.6449 | 0.6316 | 0.6340 | | 0.6169 | 16.52 | 3800 | 0.6425 | 0.6311 | 0.6334 | | 0.6175 | 17.39 | 4000 | 0.6414 | 0.6335 | 0.6356 | | 0.6187 | 18.26 | 4200 | 0.6366 | 0.6324 | 0.6334 | | 0.6142 | 19.13 | 4400 | 0.6372 | 0.6350 | 0.6364 | | 0.6144 | 20.0 | 4600 | 0.6373 | 0.6328 | 0.6345 | | 0.6143 | 20.87 | 4800 | 0.6336 | 0.6365 | 0.6367 | | 0.6121 | 21.74 | 5000 | 0.6438 | 0.6295 | 0.6329 | | 0.6126 | 22.61 | 5200 | 0.6392 | 0.6326 | 0.6359 | | 0.6123 | 23.48 | 5400 | 0.6446 | 0.6300 | 0.6348 | | 0.6108 | 24.35 | 5600 | 0.6339 | 0.6372 | 0.6383 | | 0.6109 | 25.22 | 5800 | 0.6554 | 0.6262 | 0.6345 | | 0.6076 | 26.09 | 6000 | 0.6478 | 0.6272 | 0.6329 | | 0.6098 | 26.96 | 6200 | 0.6392 | 0.6312 | 0.6351 | | 0.6086 | 27.83 | 6400 | 0.6554 | 0.6260 | 0.6351 | | 0.6064 | 28.7 | 6600 | 0.6385 | 0.6337 | 0.6364 | | 0.6092 | 29.57 | 6800 | 0.6343 | 0.6386 | 0.6410 | | 0.6032 | 30.43 | 7000 | 0.6460 | 0.6329 | 0.6386 | | 0.6104 | 31.3 | 7200 | 0.6428 | 0.6317 | 0.6372 | | 0.6078 | 32.17 | 7400 | 0.6475 | 0.6331 | 0.6402 | | 0.6052 | 33.04 | 7600 | 0.6336 | 0.6376 | 0.6405 | | 0.6053 | 33.91 | 7800 | 0.6369 | 0.6342 | 0.6378 | | 0.6076 | 34.78 | 8000 | 0.6369 | 0.6351 | 0.6389 | | 0.6026 | 35.65 | 8200 | 0.6360 | 0.6346 | 0.6380 | | 0.6033 | 36.52 | 8400 | 0.6402 | 0.6370 | 0.6416 | | 0.605 | 37.39 | 8600 | 0.6368 | 0.6354 | 0.6391 | | 0.601 | 38.26 | 8800 | 0.6395 | 0.6352 | 0.6397 | | 0.6053 | 39.13 | 9000 | 0.6395 | 0.6378 | 0.6429 | | 0.6045 | 40.0 | 9200 | 0.6345 | 0.6374 | 0.6405 | | 0.6022 | 40.87 | 9400 | 0.6321 | 0.6417 | 0.6440 | | 0.604 | 41.74 | 9600 | 0.6334 | 0.6407 | 0.6435 | | 0.6062 | 42.61 | 9800 | 0.6341 | 0.6374 | 0.6408 | | 0.6016 | 43.48 | 10000 | 0.6344 | 0.6380 | 0.6413 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K4me3-seqsight_32768_512_30M-L1_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K4me3-seqsight_32768_512_30M-L1_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:22:22+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K4me3-seqsight\_32768\_512\_30M-L1\_f ================================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K4me3 dataset. It achieves the following results on the evaluation set: * Loss: 0.6279 * F1 Score: 0.6600 * Accuracy: 0.6614 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K4me3-seqsight_32768_512_30M-L8_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K4me3](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K4me3) dataset. It achieves the following results on the evaluation set: - Loss: 0.6199 - F1 Score: 0.6667 - Accuracy: 0.6698 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6672 | 0.87 | 200 | 0.6615 | 0.6132 | 0.6152 | | 0.6439 | 1.74 | 400 | 0.6459 | 0.6231 | 0.6231 | | 0.6412 | 2.61 | 600 | 0.6482 | 0.6209 | 0.6234 | | 0.6295 | 3.48 | 800 | 0.6386 | 0.6372 | 0.6372 | | 0.6282 | 4.35 | 1000 | 0.6344 | 0.6327 | 0.6323 | | 0.6234 | 5.22 | 1200 | 0.6662 | 0.6025 | 0.6160 | | 0.6176 | 6.09 | 1400 | 0.6671 | 0.6179 | 0.6291 | | 0.6138 | 6.96 | 1600 | 0.6507 | 0.6316 | 0.6370 | | 0.6085 | 7.83 | 1800 | 0.6236 | 0.6435 | 0.6432 | | 0.6093 | 8.7 | 2000 | 0.6283 | 0.6425 | 0.6446 | | 0.6027 | 9.57 | 2200 | 0.6225 | 0.6476 | 0.6492 | | 0.6013 | 10.43 | 2400 | 0.6278 | 0.6439 | 0.6457 | | 0.5974 | 11.3 | 2600 | 0.6335 | 0.6411 | 0.6465 | | 0.5978 | 12.17 | 2800 | 0.6289 | 0.6457 | 0.6497 | | 0.5916 | 13.04 | 3000 | 0.6242 | 0.6445 | 0.6476 | | 0.5914 | 13.91 | 3200 | 0.6235 | 0.6489 | 0.6524 | | 0.5896 | 14.78 | 3400 | 0.6243 | 0.6465 | 0.6505 | | 0.5864 | 15.65 | 3600 | 0.6296 | 0.6491 | 0.6538 | | 0.5849 | 16.52 | 3800 | 0.6174 | 0.6572 | 0.6592 | | 0.5837 | 17.39 | 4000 | 0.6279 | 0.6473 | 0.6503 | | 0.585 | 18.26 | 4200 | 0.6204 | 0.6579 | 0.6611 | | 0.5802 | 19.13 | 4400 | 0.6223 | 0.6572 | 0.6598 | | 0.5784 | 20.0 | 4600 | 0.6207 | 0.6516 | 0.6554 | | 0.5788 | 20.87 | 4800 | 0.6239 | 0.6594 | 0.6630 | | 0.5772 | 21.74 | 5000 | 0.6308 | 0.6471 | 0.6519 | | 0.5765 | 22.61 | 5200 | 0.6179 | 0.6564 | 0.6590 | | 0.5741 | 23.48 | 5400 | 0.6391 | 0.6392 | 0.6495 | | 0.5735 | 24.35 | 5600 | 0.6255 | 0.6541 | 0.6582 | | 0.5715 | 25.22 | 5800 | 0.6391 | 0.6390 | 0.6481 | | 0.5686 | 26.09 | 6000 | 0.6380 | 0.6459 | 0.6527 | | 0.5695 | 26.96 | 6200 | 0.6258 | 0.6469 | 0.6541 | | 0.5671 | 27.83 | 6400 | 0.6481 | 0.6306 | 0.6435 | | 0.5667 | 28.7 | 6600 | 0.6278 | 0.6508 | 0.6554 | | 0.567 | 29.57 | 6800 | 0.6250 | 0.6557 | 0.6598 | | 0.5628 | 30.43 | 7000 | 0.6341 | 0.6460 | 0.6533 | | 0.5685 | 31.3 | 7200 | 0.6270 | 0.6499 | 0.6546 | | 0.5663 | 32.17 | 7400 | 0.6295 | 0.6484 | 0.6546 | | 0.5633 | 33.04 | 7600 | 0.6262 | 0.6493 | 0.6546 | | 0.5621 | 33.91 | 7800 | 0.6226 | 0.6564 | 0.6606 | | 0.5644 | 34.78 | 8000 | 0.6256 | 0.6548 | 0.6587 | | 0.5589 | 35.65 | 8200 | 0.6265 | 0.6565 | 0.6614 | | 0.5588 | 36.52 | 8400 | 0.6334 | 0.6470 | 0.6543 | | 0.5623 | 37.39 | 8600 | 0.6259 | 0.6523 | 0.6571 | | 0.5561 | 38.26 | 8800 | 0.6353 | 0.6506 | 0.6573 | | 0.5623 | 39.13 | 9000 | 0.6298 | 0.6524 | 0.6582 | | 0.5584 | 40.0 | 9200 | 0.6240 | 0.6541 | 0.6582 | | 0.5577 | 40.87 | 9400 | 0.6227 | 0.6544 | 0.6579 | | 0.5591 | 41.74 | 9600 | 0.6240 | 0.6546 | 0.6584 | | 0.563 | 42.61 | 9800 | 0.6244 | 0.6533 | 0.6576 | | 0.5566 | 43.48 | 10000 | 0.6258 | 0.6543 | 0.6587 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K4me3-seqsight_32768_512_30M-L8_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K4me3-seqsight_32768_512_30M-L8_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:22:46+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K4me3-seqsight\_32768\_512\_30M-L8\_f ================================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K4me3 dataset. It achieves the following results on the evaluation set: * Loss: 0.6199 * F1 Score: 0.6667 * Accuracy: 0.6698 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K4me3-seqsight_32768_512_30M-L32_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K4me3](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K4me3) dataset. It achieves the following results on the evaluation set: - Loss: 0.6248 - F1 Score: 0.6761 - Accuracy: 0.6777 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6637 | 0.87 | 200 | 0.6550 | 0.6195 | 0.6209 | | 0.6376 | 1.74 | 400 | 0.6438 | 0.6310 | 0.6326 | | 0.6322 | 2.61 | 600 | 0.6353 | 0.6408 | 0.6416 | | 0.6198 | 3.48 | 800 | 0.6305 | 0.6444 | 0.6448 | | 0.6141 | 4.35 | 1000 | 0.6260 | 0.6433 | 0.6435 | | 0.6064 | 5.22 | 1200 | 0.6410 | 0.6384 | 0.6448 | | 0.5977 | 6.09 | 1400 | 0.6605 | 0.6232 | 0.6375 | | 0.5927 | 6.96 | 1600 | 0.6323 | 0.6466 | 0.6524 | | 0.5833 | 7.83 | 1800 | 0.6178 | 0.6596 | 0.6606 | | 0.5848 | 8.7 | 2000 | 0.6235 | 0.6560 | 0.6592 | | 0.5737 | 9.57 | 2200 | 0.6151 | 0.6564 | 0.6582 | | 0.5733 | 10.43 | 2400 | 0.6241 | 0.6578 | 0.6601 | | 0.5658 | 11.3 | 2600 | 0.6331 | 0.6475 | 0.6546 | | 0.5669 | 12.17 | 2800 | 0.6178 | 0.6548 | 0.6576 | | 0.5576 | 13.04 | 3000 | 0.6209 | 0.6604 | 0.6628 | | 0.5577 | 13.91 | 3200 | 0.6188 | 0.6523 | 0.6560 | | 0.5507 | 14.78 | 3400 | 0.6287 | 0.6479 | 0.6533 | | 0.5483 | 15.65 | 3600 | 0.6358 | 0.6585 | 0.6625 | | 0.5452 | 16.52 | 3800 | 0.6206 | 0.6618 | 0.6633 | | 0.5422 | 17.39 | 4000 | 0.6360 | 0.6548 | 0.6579 | | 0.5416 | 18.26 | 4200 | 0.6539 | 0.6452 | 0.6533 | | 0.5367 | 19.13 | 4400 | 0.6351 | 0.6593 | 0.6617 | | 0.5333 | 20.0 | 4600 | 0.6371 | 0.6540 | 0.6598 | | 0.5308 | 20.87 | 4800 | 0.6404 | 0.6613 | 0.6639 | | 0.5284 | 21.74 | 5000 | 0.6435 | 0.6561 | 0.6598 | | 0.5246 | 22.61 | 5200 | 0.6354 | 0.6653 | 0.6671 | | 0.5236 | 23.48 | 5400 | 0.6640 | 0.6461 | 0.6541 | | 0.5195 | 24.35 | 5600 | 0.6482 | 0.6523 | 0.6568 | | 0.5162 | 25.22 | 5800 | 0.6601 | 0.6504 | 0.6552 | | 0.5139 | 26.09 | 6000 | 0.6628 | 0.6573 | 0.6620 | | 0.5121 | 26.96 | 6200 | 0.6513 | 0.6523 | 0.6571 | | 0.5083 | 27.83 | 6400 | 0.6792 | 0.6408 | 0.65 | | 0.5081 | 28.7 | 6600 | 0.6416 | 0.6605 | 0.6628 | | 0.5059 | 29.57 | 6800 | 0.6477 | 0.6586 | 0.6617 | | 0.5005 | 30.43 | 7000 | 0.6573 | 0.6549 | 0.6590 | | 0.5098 | 31.3 | 7200 | 0.6490 | 0.6573 | 0.6611 | | 0.5007 | 32.17 | 7400 | 0.6486 | 0.6604 | 0.6625 | | 0.4976 | 33.04 | 7600 | 0.6524 | 0.6543 | 0.6582 | | 0.4946 | 33.91 | 7800 | 0.6502 | 0.6584 | 0.6617 | | 0.4978 | 34.78 | 8000 | 0.6525 | 0.6635 | 0.6658 | | 0.4894 | 35.65 | 8200 | 0.6656 | 0.6563 | 0.6592 | | 0.4883 | 36.52 | 8400 | 0.6661 | 0.6535 | 0.6576 | | 0.4944 | 37.39 | 8600 | 0.6607 | 0.6583 | 0.6617 | | 0.4869 | 38.26 | 8800 | 0.6732 | 0.6511 | 0.6565 | | 0.4922 | 39.13 | 9000 | 0.6633 | 0.6551 | 0.6587 | | 0.4886 | 40.0 | 9200 | 0.6539 | 0.6564 | 0.6590 | | 0.4866 | 40.87 | 9400 | 0.6586 | 0.6583 | 0.6609 | | 0.4864 | 41.74 | 9600 | 0.6606 | 0.6561 | 0.6592 | | 0.4919 | 42.61 | 9800 | 0.6595 | 0.6565 | 0.6595 | | 0.4856 | 43.48 | 10000 | 0.6606 | 0.6585 | 0.6617 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K4me3-seqsight_32768_512_30M-L32_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K4me3-seqsight_32768_512_30M-L32_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:24:47+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K4me3-seqsight\_32768\_512\_30M-L32\_f ================================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K4me3 dataset. It achieves the following results on the evaluation set: * Loss: 0.6248 * F1 Score: 0.6761 * Accuracy: 0.6777 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
reinforcement-learning
ml-agents
# **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: Rudolph314/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
{"library_name": "ml-agents", "tags": ["SoccerTwos", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-SoccerTwos"]}
Rudolph314/poca-SoccerTwos
null
[ "ml-agents", "tensorboard", "onnx", "SoccerTwos", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-SoccerTwos", "region:us" ]
null
2024-04-30T04:24:53+00:00
[]
[]
TAGS #ml-agents #tensorboard #onnx #SoccerTwos #deep-reinforcement-learning #reinforcement-learning #ML-Agents-SoccerTwos #region-us
# poca Agent playing SoccerTwos This is a trained model of a poca agent playing SoccerTwos using the Unity ML-Agents Library. ## Usage (with ML-Agents) The Documentation: URL We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog to fetch the stick and then play with him directly in your browser: URL - A *longer tutorial* to understand how works ML-Agents: URL ### Resume the training ### Watch your Agent play You can watch your agent playing directly in your browser 1. If the environment is part of ML-Agents official environments, go to URL 2. Step 1: Find your model_id: Rudolph314/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play
[ "# poca Agent playing SoccerTwos\n This is a trained model of a poca agent playing SoccerTwos\n using the Unity ML-Agents Library.\n\n ## Usage (with ML-Agents)\n The Documentation: URL\n\n We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:\n - A *short tutorial* where you teach Huggy the Dog to fetch the stick and then play with him directly in your\n browser: URL\n - A *longer tutorial* to understand how works ML-Agents:\n URL\n\n ### Resume the training\n \n\n ### Watch your Agent play\n You can watch your agent playing directly in your browser\n\n 1. If the environment is part of ML-Agents official environments, go to URL\n 2. Step 1: Find your model_id: Rudolph314/poca-SoccerTwos\n 3. Step 2: Select your *.nn /*.onnx file\n 4. Click on Watch the agent play" ]
[ "TAGS\n#ml-agents #tensorboard #onnx #SoccerTwos #deep-reinforcement-learning #reinforcement-learning #ML-Agents-SoccerTwos #region-us \n", "# poca Agent playing SoccerTwos\n This is a trained model of a poca agent playing SoccerTwos\n using the Unity ML-Agents Library.\n\n ## Usage (with ML-Agents)\n The Documentation: URL\n\n We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:\n - A *short tutorial* where you teach Huggy the Dog to fetch the stick and then play with him directly in your\n browser: URL\n - A *longer tutorial* to understand how works ML-Agents:\n URL\n\n ### Resume the training\n \n\n ### Watch your Agent play\n You can watch your agent playing directly in your browser\n\n 1. If the environment is part of ML-Agents official environments, go to URL\n 2. Step 1: Find your model_id: Rudolph314/poca-SoccerTwos\n 3. Step 2: Select your *.nn /*.onnx file\n 4. Click on Watch the agent play" ]
[ 39, 205 ]
[ "TAGS\n#ml-agents #tensorboard #onnx #SoccerTwos #deep-reinforcement-learning #reinforcement-learning #ML-Agents-SoccerTwos #region-us \n# poca Agent playing SoccerTwos\n This is a trained model of a poca agent playing SoccerTwos\n using the Unity ML-Agents Library.\n\n ## Usage (with ML-Agents)\n The Documentation: URL\n\n We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:\n - A *short tutorial* where you teach Huggy the Dog to fetch the stick and then play with him directly in your\n browser: URL\n - A *longer tutorial* to understand how works ML-Agents:\n URL\n\n ### Resume the training\n \n\n ### Watch your Agent play\n You can watch your agent playing directly in your browser\n\n 1. If the environment is part of ML-Agents official environments, go to URL\n 2. Step 1: Find your model_id: Rudolph314/poca-SoccerTwos\n 3. Step 2: Select your *.nn /*.onnx file\n 4. Click on Watch the agent play" ]
text-generation
transformers
# Finetune Mistral, Gemma, Llama 2-5x faster with 70% less memory via Unsloth! Directly quantized 4bit model with `bitsandbytes`. Built with Meta Llama 3 We have a Google Colab Tesla T4 notebook for Llama-3 8b here: https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/Discord%20button.png" width="200"/>](https://discord.gg/u54VK8m8tk) [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/buy%20me%20a%20coffee%20button.png" width="200"/>](https://ko-fi.com/unsloth) [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth) ## ✨ Finetune for Free All notebooks are **beginner friendly**! Add your dataset, click "Run All", and you'll get a 2x faster finetuned model which can be exported to GGUF, vLLM or uploaded to Hugging Face. | Unsloth supports | Free Notebooks | Performance | Memory use | |-----------------|--------------------------------------------------------------------------------------------------------------------------|-------------|----------| | **Llama-3 8b** | [▶️ Start on Colab](https://colab.research.google.com/drive/135ced7oHytdxu3N2DNe1Z0kqjyYIkDXp?usp=sharing) | 2.4x faster | 58% less | | **Gemma 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/10NbwlsRChbma1v55m8LAPYG15uQv6HLo?usp=sharing) | 2.4x faster | 58% less | | **Mistral 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1Dyauq4kTZoLewQ1cApceUQVNcnnNTzg_?usp=sharing) | 2.2x faster | 62% less | | **Llama-2 7b** | [▶️ Start on Colab](https://colab.research.google.com/drive/1lBzz5KeZJKXjvivbYvmGarix9Ao6Wxe5?usp=sharing) | 2.2x faster | 43% less | | **TinyLlama** | [▶️ Start on Colab](https://colab.research.google.com/drive/1AZghoNBQaMDgWJpi4RbffGM1h6raLUj9?usp=sharing) | 3.9x faster | 74% less | | **CodeLlama 34b** A100 | [▶️ Start on Colab](https://colab.research.google.com/drive/1y7A0AxE3y8gdj4AVkl2aZX47Xu3P1wJT?usp=sharing) | 1.9x faster | 27% less | | **Mistral 7b** 1xT4 | [▶️ Start on Kaggle](https://www.kaggle.com/code/danielhanchen/kaggle-mistral-7b-unsloth-notebook) | 5x faster\* | 62% less | | **DPO - Zephyr** | [▶️ Start on Colab](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) | 1.9x faster | 19% less | - This [conversational notebook](https://colab.research.google.com/drive/1Aau3lgPzeZKQ-98h69CCu1UJcvIBLmy2?usp=sharing) is useful for ShareGPT ChatML / Vicuna templates. - This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for raw text. This [DPO notebook](https://colab.research.google.com/drive/15vttTpzzVXv_tJwEk-hIcQ0S9FcEWvwP?usp=sharing) replicates Zephyr. - \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster.
{"language": ["en"], "license": "llama2", "library_name": "transformers", "tags": ["unsloth", "transformers", "llama", "llama-3"]}
Akirami/vanilla-llama-3-8b-bnb-4bit
null
[ "transformers", "safetensors", "llama", "text-generation", "unsloth", "llama-3", "en", "license:llama2", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "region:us" ]
null
2024-04-30T04:25:17+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #llama #text-generation #unsloth #llama-3 #en #license-llama2 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us
Finetune Mistral, Gemma, Llama 2-5x faster with 70% less memory via Unsloth! ============================================================================ Directly quantized 4bit model with 'bitsandbytes'. Built with Meta Llama 3 We have a Google Colab Tesla T4 notebook for Llama-3 8b here: URL <img src="URL width="200"/> <img src="URL width="200"/> <img src="URL width="200"/> Finetune for Free ----------------- All notebooks are beginner friendly! Add your dataset, click "Run All", and you'll get a 2x faster finetuned model which can be exported to GGUF, vLLM or uploaded to Hugging Face. * This conversational notebook is useful for ShareGPT ChatML / Vicuna templates. * This text completion notebook is for raw text. This DPO notebook replicates Zephyr. * \* Kaggle has 2x T4s, but we use 1. Due to overhead, 1x T4 is 5x faster.
[]
[ "TAGS\n#transformers #safetensors #llama #text-generation #unsloth #llama-3 #en #license-llama2 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n" ]
[ 55 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #unsloth #llama-3 #en #license-llama2 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n" ]
automatic-speech-recognition
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisperFinetune This model is a fine-tuned version of [openai/whisper-tiny.en](https://huggingface.co/openai/whisper-tiny.en) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.5745 - Wer: 28.7011 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 128 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:------:|:----:|:---------------:|:-------:| | 2.8818 | 0.2778 | 10 | 1.9210 | 39.4679 | | 0.7557 | 0.5556 | 20 | 0.6926 | 27.2926 | | 0.5718 | 0.8333 | 30 | 0.5717 | 23.7559 | | 0.421 | 1.1111 | 40 | 0.5161 | 21.5023 | | 0.3088 | 1.3889 | 50 | 0.5103 | 21.0955 | | 0.3415 | 1.6667 | 60 | 0.5155 | 21.6901 | | 0.3434 | 1.9444 | 70 | 0.5176 | 30.0156 | | 0.1371 | 2.2222 | 80 | 0.5303 | 20.6886 | | 0.1349 | 2.5 | 90 | 0.5589 | 22.0344 | | 0.1461 | 2.7778 | 100 | 0.5745 | 28.7011 | ### Framework versions - Transformers 4.40.1 - Pytorch 2.2.1+cu121 - Datasets 2.19.1.dev0 - Tokenizers 0.19.1
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["wer"], "base_model": "openai/whisper-tiny.en", "model-index": [{"name": "whisperFinetune", "results": []}]}
shljessie/whisperFinetune
null
[ "transformers", "tensorboard", "safetensors", "whisper", "automatic-speech-recognition", "generated_from_trainer", "base_model:openai/whisper-tiny.en", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:25:36+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #whisper #automatic-speech-recognition #generated_from_trainer #base_model-openai/whisper-tiny.en #license-apache-2.0 #endpoints_compatible #region-us
whisperFinetune =============== This model is a fine-tuned version of openai/URL on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.5745 * Wer: 28.7011 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.001 * train\_batch\_size: 128 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * lr\_scheduler\_warmup\_steps: 500 * training\_steps: 100 ### Training results ### Framework versions * Transformers 4.40.1 * Pytorch 2.2.1+cu121 * Datasets 2.19.1.dev0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.001\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 500\n* training\\_steps: 100", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.1.dev0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #tensorboard #safetensors #whisper #automatic-speech-recognition #generated_from_trainer #base_model-openai/whisper-tiny.en #license-apache-2.0 #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.001\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 500\n* training\\_steps: 100", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.1.dev0\n* Tokenizers 0.19.1" ]
[ 54, 114, 5, 47 ]
[ "TAGS\n#transformers #tensorboard #safetensors #whisper #automatic-speech-recognition #generated_from_trainer #base_model-openai/whisper-tiny.en #license-apache-2.0 #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.001\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 500\n* training\\_steps: 100### Training results### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.1.dev0\n* Tokenizers 0.19.1" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H4-seqsight_32768_512_30M-L1_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H4](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H4) dataset. It achieves the following results on the evaluation set: - Loss: 0.2787 - F1 Score: 0.8962 - Accuracy: 0.8960 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:------:|:-----:|:---------------:|:--------:|:--------:| | 0.4361 | 2.17 | 200 | 0.3195 | 0.8804 | 0.8802 | | 0.3195 | 4.35 | 400 | 0.3192 | 0.8772 | 0.8768 | | 0.3052 | 6.52 | 600 | 0.3107 | 0.8745 | 0.8741 | | 0.3046 | 8.7 | 800 | 0.3134 | 0.8765 | 0.8761 | | 0.2905 | 10.87 | 1000 | 0.3050 | 0.8825 | 0.8823 | | 0.2882 | 13.04 | 1200 | 0.3107 | 0.8738 | 0.8734 | | 0.2853 | 15.22 | 1400 | 0.3008 | 0.8840 | 0.8836 | | 0.2813 | 17.39 | 1600 | 0.3047 | 0.8772 | 0.8768 | | 0.2758 | 19.57 | 1800 | 0.2978 | 0.8900 | 0.8898 | | 0.2777 | 21.74 | 2000 | 0.3170 | 0.8725 | 0.8720 | | 0.2722 | 23.91 | 2200 | 0.3090 | 0.8772 | 0.8768 | | 0.2693 | 26.09 | 2400 | 0.3118 | 0.8786 | 0.8782 | | 0.2696 | 28.26 | 2600 | 0.2978 | 0.8866 | 0.8864 | | 0.2645 | 30.43 | 2800 | 0.2999 | 0.8840 | 0.8836 | | 0.2651 | 32.61 | 3000 | 0.3025 | 0.8840 | 0.8836 | | 0.2593 | 34.78 | 3200 | 0.2983 | 0.8839 | 0.8836 | | 0.2579 | 36.96 | 3400 | 0.3068 | 0.8840 | 0.8836 | | 0.2566 | 39.13 | 3600 | 0.3051 | 0.8820 | 0.8816 | | 0.2533 | 41.3 | 3800 | 0.2938 | 0.8934 | 0.8932 | | 0.2536 | 43.48 | 4000 | 0.3004 | 0.8839 | 0.8836 | | 0.2569 | 45.65 | 4200 | 0.2923 | 0.8886 | 0.8884 | | 0.2477 | 47.83 | 4400 | 0.2996 | 0.8880 | 0.8877 | | 0.249 | 50.0 | 4600 | 0.2921 | 0.8900 | 0.8898 | | 0.2485 | 52.17 | 4800 | 0.2950 | 0.8846 | 0.8843 | | 0.2513 | 54.35 | 5000 | 0.3077 | 0.8807 | 0.8802 | | 0.2448 | 56.52 | 5200 | 0.3044 | 0.8827 | 0.8823 | | 0.243 | 58.7 | 5400 | 0.2998 | 0.8861 | 0.8857 | | 0.2423 | 60.87 | 5600 | 0.3085 | 0.8875 | 0.8871 | | 0.2435 | 63.04 | 5800 | 0.3065 | 0.8834 | 0.8830 | | 0.241 | 65.22 | 6000 | 0.2984 | 0.8895 | 0.8891 | | 0.24 | 67.39 | 6200 | 0.3087 | 0.8875 | 0.8871 | | 0.2387 | 69.57 | 6400 | 0.2938 | 0.8915 | 0.8912 | | 0.2418 | 71.74 | 6600 | 0.2994 | 0.8895 | 0.8891 | | 0.2411 | 73.91 | 6800 | 0.2972 | 0.8922 | 0.8919 | | 0.2367 | 76.09 | 7000 | 0.3017 | 0.8895 | 0.8891 | | 0.2353 | 78.26 | 7200 | 0.3026 | 0.8868 | 0.8864 | | 0.2358 | 80.43 | 7400 | 0.2961 | 0.8901 | 0.8898 | | 0.236 | 82.61 | 7600 | 0.2898 | 0.8928 | 0.8925 | | 0.2363 | 84.78 | 7800 | 0.3005 | 0.8902 | 0.8898 | | 0.2322 | 86.96 | 8000 | 0.2967 | 0.8881 | 0.8877 | | 0.2365 | 89.13 | 8200 | 0.2971 | 0.8895 | 0.8891 | | 0.2345 | 91.3 | 8400 | 0.2962 | 0.8888 | 0.8884 | | 0.2311 | 93.48 | 8600 | 0.2978 | 0.8902 | 0.8898 | | 0.2326 | 95.65 | 8800 | 0.2956 | 0.8915 | 0.8912 | | 0.2361 | 97.83 | 9000 | 0.2949 | 0.8915 | 0.8912 | | 0.2363 | 100.0 | 9200 | 0.3017 | 0.8888 | 0.8884 | | 0.2318 | 102.17 | 9400 | 0.2972 | 0.8881 | 0.8877 | | 0.2341 | 104.35 | 9600 | 0.2977 | 0.8895 | 0.8891 | | 0.2316 | 106.52 | 9800 | 0.2960 | 0.8901 | 0.8898 | | 0.2336 | 108.7 | 10000 | 0.2960 | 0.8901 | 0.8898 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H4-seqsight_32768_512_30M-L1_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H4-seqsight_32768_512_30M-L1_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:26:10+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H4-seqsight\_32768\_512\_30M-L1\_f ============================================ This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H4 dataset. It achieves the following results on the evaluation set: * Loss: 0.2787 * F1 Score: 0.8962 * Accuracy: 0.8960 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
text-classification
transformers
# Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 1.065280795097351 f1_macro: 0.2095479509928179 f1_micro: 0.4584103512014787 f1_weighted: 0.2881768494245037 precision_macro: 0.1528034504004929 precision_micro: 0.4584103512014787 precision_weighted: 0.21014005008866307 recall_macro: 0.3333333333333333 recall_micro: 0.4584103512014787 recall_weighted: 0.4584103512014787 accuracy: 0.4584103512014787
{"tags": ["autotrain", "text-classification"], "datasets": ["autotrain-4vbeh-1p6bd/autotrain-data"], "widget": [{"text": "I love AutoTrain"}]}
AnirudhVV/autotrain-4vbeh-1p6bd
null
[ "transformers", "tensorboard", "safetensors", "xlm-roberta", "text-classification", "autotrain", "dataset:autotrain-4vbeh-1p6bd/autotrain-data", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:29:37+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #xlm-roberta #text-classification #autotrain #dataset-autotrain-4vbeh-1p6bd/autotrain-data #autotrain_compatible #endpoints_compatible #region-us
# Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 1.065280795097351 f1_macro: 0.2095479509928179 f1_micro: 0.4584103512014787 f1_weighted: 0.2881768494245037 precision_macro: 0.1528034504004929 precision_micro: 0.4584103512014787 precision_weighted: 0.21014005008866307 recall_macro: 0.3333333333333333 recall_micro: 0.4584103512014787 recall_weighted: 0.4584103512014787 accuracy: 0.4584103512014787
[ "# Model Trained Using AutoTrain\n\n- Problem type: Text Classification", "## Validation Metrics\nloss: 1.065280795097351\n\nf1_macro: 0.2095479509928179\n\nf1_micro: 0.4584103512014787\n\nf1_weighted: 0.2881768494245037\n\nprecision_macro: 0.1528034504004929\n\nprecision_micro: 0.4584103512014787\n\nprecision_weighted: 0.21014005008866307\n\nrecall_macro: 0.3333333333333333\n\nrecall_micro: 0.4584103512014787\n\nrecall_weighted: 0.4584103512014787\n\naccuracy: 0.4584103512014787" ]
[ "TAGS\n#transformers #tensorboard #safetensors #xlm-roberta #text-classification #autotrain #dataset-autotrain-4vbeh-1p6bd/autotrain-data #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Trained Using AutoTrain\n\n- Problem type: Text Classification", "## Validation Metrics\nloss: 1.065280795097351\n\nf1_macro: 0.2095479509928179\n\nf1_micro: 0.4584103512014787\n\nf1_weighted: 0.2881768494245037\n\nprecision_macro: 0.1528034504004929\n\nprecision_micro: 0.4584103512014787\n\nprecision_weighted: 0.21014005008866307\n\nrecall_macro: 0.3333333333333333\n\nrecall_micro: 0.4584103512014787\n\nrecall_weighted: 0.4584103512014787\n\naccuracy: 0.4584103512014787" ]
[ 57, 12, 170 ]
[ "TAGS\n#transformers #tensorboard #safetensors #xlm-roberta #text-classification #autotrain #dataset-autotrain-4vbeh-1p6bd/autotrain-data #autotrain_compatible #endpoints_compatible #region-us \n# Model Trained Using AutoTrain\n\n- Problem type: Text Classification## Validation Metrics\nloss: 1.065280795097351\n\nf1_macro: 0.2095479509928179\n\nf1_micro: 0.4584103512014787\n\nf1_weighted: 0.2881768494245037\n\nprecision_macro: 0.1528034504004929\n\nprecision_micro: 0.4584103512014787\n\nprecision_weighted: 0.21014005008866307\n\nrecall_macro: 0.3333333333333333\n\nrecall_micro: 0.4584103512014787\n\nrecall_weighted: 0.4584103512014787\n\naccuracy: 0.4584103512014787" ]
null
null
# Summer Keto ACV Gummies Avis France - Summer Keto Gummies Expériences, Prix Summer Keto ACV Gummies Avis - Uni : L’ajustement et la perte de poids sont devenus populaires récemment. Les gens réfléchissent souvent beaucoup à leur apparence et à leur comportement. Les médias sociaux sont la principale raison pour laquelle tant d’individus réussissent dans le monde moderne, car ils sous-tendent tout. Actuellement, plus de 25 % des personnes s'intéressent au vlogging, et tout le monde souhaite se tenir au courant des dernières modes, visiter des endroits exotiques et apprendre de nouvelles choses en général. ## **[Cliquez ici pour acheter maintenant sur le site officiel de Summer Keto ACV Gummies](https://adtocart.xyz/summer-keto-fr)** ## Que dit l’étude médicale sur les Summer Keto ACV Gummies ? La dernière enquête sur la santé montre qu'environ 26 % des adultes au Royaume-Uni souffrent d'obésité et de prise de poids. Il indique également qu’environ 38 % des adultes sont en surpoids. Ces chiffres augmentent chaque année et l'obésité devient un grave problème de santé au Royaume-Uni. Une grande partie de la population du Royaume-Uni choisit les méthodes et produits traditionnels de perte de poids. Selon la nouvelle enquête, les produits amaigrissants traditionnels sont nocifs pour le corps. Ils provoquent des effets secondaires dans l’organisme tels que des migraines et des maux de tête. Un nouveau produit Summer Keto ACV Gummies est composé d'ingrédients naturels. Ces bonbons gélifiés sont utilisés par de nombreux clients au Royaume-Uni pour réduire l'obésité. La plupart des clients qui utilisent ce produit quotidiennement obtiennent une belle silhouette en quelques semaines. On dit que les bonbons aident à faire fondre les graisses tenaces dans le corps en quelques semaines. De nombreuses personnes obtiennent des niveaux d’énergie plus élevés après avoir pris une dose quotidienne de ces bonbons gélifiés. Ce produit naturel aide à brûler les graisses au lieu des glucides et donne à votre silhouette une apparence attrayante. De plus, le produit a reçu un maximum de critiques positives de la part des clients. ## Quel est le coût de vente des Summer Keto ACV Gummies au Royaume-Uni ? Le prix de 2 bouteilles de bonbons gélifiés Summer Keto ACV est de 49,95 £ par bouteille. Pour bénéficier de réductions supplémentaires, vous pouvez essayer le pack de 4 bouteilles pour seulement 39,95 £ par bouteille. Le pack super économique de bonbons gélifiés Summer Keto ACV Gummies est disponible au prix de 39,95 £ par bouteille. Chaque commande que vous passez sur le site officiel de Summer Keto ACV Gummies est éligible à la livraison gratuite au Royaume-Uni. ## Comment s’opère la perte de poids dans le corps ? Summer Keto ACV Gummies peut aider à brûler les graisses tenaces dans le ventre, les cuisses, le menton et le cou. Ils peuvent aider à perdre du poids jusqu’à 5 livres. en seulement 2 semaines. En dehors de cela, les bonbons peuvent également accélérer le processus métabolique et brûler les graisses de tout le corps. Vous pouvez perdre jusqu'à 20 livres. de poids en 3 à 4 semaines. Les bonbons gélifiés peuvent stabiliser l’appétit et réduire la faim fréquente et les fringales de minuit. Ils peuvent donner une sensation de satiété à votre ventre et ne pas vous donner faim pendant des heures. De plus, les bonbons gélifiés peuvent provoquer une transformation soudaine de votre corps en quelques semaines. ## Avantages des bonbons gélifiés Summer Keto ACV Summer Keto ACV Gummies est composé d'ingrédients naturels comme des extraits de pomme et des cétones BHB. Ils peuvent apporter des bienfaits au corps et à l’esprit tels que : ## **[Cliquez ici pour acheter maintenant sur le site officiel de Summer Keto ACV Gummies](https://adtocart.xyz/summer-keto-fr)**
{}
VKapseln475/SummerKetoACV102
null
[ "region:us" ]
null
2024-04-30T04:31:06+00:00
[]
[]
TAGS #region-us
# Summer Keto ACV Gummies Avis France - Summer Keto Gummies Expériences, Prix Summer Keto ACV Gummies Avis - Uni : L’ajustement et la perte de poids sont devenus populaires récemment. Les gens réfléchissent souvent beaucoup à leur apparence et à leur comportement. Les médias sociaux sont la principale raison pour laquelle tant d’individus réussissent dans le monde moderne, car ils sous-tendent tout. Actuellement, plus de 25 % des personnes s'intéressent au vlogging, et tout le monde souhaite se tenir au courant des dernières modes, visiter des endroits exotiques et apprendre de nouvelles choses en général. ## Cliquez ici pour acheter maintenant sur le site officiel de Summer Keto ACV Gummies ## Que dit l’étude médicale sur les Summer Keto ACV Gummies ? La dernière enquête sur la santé montre qu'environ 26 % des adultes au Royaume-Uni souffrent d'obésité et de prise de poids. Il indique également qu’environ 38 % des adultes sont en surpoids. Ces chiffres augmentent chaque année et l'obésité devient un grave problème de santé au Royaume-Uni. Une grande partie de la population du Royaume-Uni choisit les méthodes et produits traditionnels de perte de poids. Selon la nouvelle enquête, les produits amaigrissants traditionnels sont nocifs pour le corps. Ils provoquent des effets secondaires dans l’organisme tels que des migraines et des maux de tête. Un nouveau produit Summer Keto ACV Gummies est composé d'ingrédients naturels. Ces bonbons gélifiés sont utilisés par de nombreux clients au Royaume-Uni pour réduire l'obésité. La plupart des clients qui utilisent ce produit quotidiennement obtiennent une belle silhouette en quelques semaines. On dit que les bonbons aident à faire fondre les graisses tenaces dans le corps en quelques semaines. De nombreuses personnes obtiennent des niveaux d’énergie plus élevés après avoir pris une dose quotidienne de ces bonbons gélifiés. Ce produit naturel aide à brûler les graisses au lieu des glucides et donne à votre silhouette une apparence attrayante. De plus, le produit a reçu un maximum de critiques positives de la part des clients. ## Quel est le coût de vente des Summer Keto ACV Gummies au Royaume-Uni ? Le prix de 2 bouteilles de bonbons gélifiés Summer Keto ACV est de 49,95 £ par bouteille. Pour bénéficier de réductions supplémentaires, vous pouvez essayer le pack de 4 bouteilles pour seulement 39,95 £ par bouteille. Le pack super économique de bonbons gélifiés Summer Keto ACV Gummies est disponible au prix de 39,95 £ par bouteille. Chaque commande que vous passez sur le site officiel de Summer Keto ACV Gummies est éligible à la livraison gratuite au Royaume-Uni. ## Comment s’opère la perte de poids dans le corps ? Summer Keto ACV Gummies peut aider à brûler les graisses tenaces dans le ventre, les cuisses, le menton et le cou. Ils peuvent aider à perdre du poids jusqu’à 5 livres. en seulement 2 semaines. En dehors de cela, les bonbons peuvent également accélérer le processus métabolique et brûler les graisses de tout le corps. Vous pouvez perdre jusqu'à 20 livres. de poids en 3 à 4 semaines. Les bonbons gélifiés peuvent stabiliser l’appétit et réduire la faim fréquente et les fringales de minuit. Ils peuvent donner une sensation de satiété à votre ventre et ne pas vous donner faim pendant des heures. De plus, les bonbons gélifiés peuvent provoquer une transformation soudaine de votre corps en quelques semaines. ## Avantages des bonbons gélifiés Summer Keto ACV Summer Keto ACV Gummies est composé d'ingrédients naturels comme des extraits de pomme et des cétones BHB. Ils peuvent apporter des bienfaits au corps et à l’esprit tels que : ## Cliquez ici pour acheter maintenant sur le site officiel de Summer Keto ACV Gummies
[ "# Summer Keto ACV Gummies Avis France - Summer Keto Gummies Expériences, Prix\n\nSummer Keto ACV Gummies Avis - Uni : L’ajustement et la perte de poids sont devenus populaires récemment. Les gens réfléchissent souvent beaucoup à leur apparence et à leur comportement. Les médias sociaux sont la principale raison pour laquelle tant d’individus réussissent dans le monde moderne, car ils sous-tendent tout. Actuellement, plus de 25 % des personnes s'intéressent au vlogging, et tout le monde souhaite se tenir au courant des dernières modes, visiter des endroits exotiques et apprendre de nouvelles choses en général.", "## Cliquez ici pour acheter maintenant sur le site officiel de Summer Keto ACV Gummies", "## Que dit l’étude médicale sur les Summer Keto ACV Gummies ?\n\nLa dernière enquête sur la santé montre qu'environ 26 % des adultes au Royaume-Uni souffrent d'obésité et de prise de poids. Il indique également qu’environ 38 % des adultes sont en surpoids. Ces chiffres augmentent chaque année et l'obésité devient un grave problème de santé au Royaume-Uni. Une grande partie de la population du Royaume-Uni choisit les méthodes et produits traditionnels de perte de poids.\n\nSelon la nouvelle enquête, les produits amaigrissants traditionnels sont nocifs pour le corps. Ils provoquent des effets secondaires dans l’organisme tels que des migraines et des maux de tête.\n\nUn nouveau produit Summer Keto ACV Gummies est composé d'ingrédients naturels. Ces bonbons gélifiés sont utilisés par de nombreux clients au Royaume-Uni pour réduire l'obésité. La plupart des clients qui utilisent ce produit quotidiennement obtiennent une belle silhouette en quelques semaines. On dit que les bonbons aident à faire fondre les graisses tenaces dans le corps en quelques semaines.\n\nDe nombreuses personnes obtiennent des niveaux d’énergie plus élevés après avoir pris une dose quotidienne de ces bonbons gélifiés. Ce produit naturel aide à brûler les graisses au lieu des glucides et donne à votre silhouette une apparence attrayante. De plus, le produit a reçu un maximum de critiques positives de la part des clients.", "## Quel est le coût de vente des Summer Keto ACV Gummies au Royaume-Uni ?\n\nLe prix de 2 bouteilles de bonbons gélifiés Summer Keto ACV est de 49,95 £ par bouteille. Pour bénéficier de réductions supplémentaires, vous pouvez essayer le pack de 4 bouteilles pour seulement 39,95 £ par bouteille. Le pack super économique de bonbons gélifiés Summer Keto ACV Gummies est disponible au prix de 39,95 £ par bouteille. Chaque commande que vous passez sur le site officiel de Summer Keto ACV Gummies est éligible à la livraison gratuite au Royaume-Uni.", "## Comment s’opère la perte de poids dans le corps ?\n\nSummer Keto ACV Gummies peut aider à brûler les graisses tenaces dans le ventre, les cuisses, le menton et le cou. Ils peuvent aider à perdre du poids jusqu’à 5 livres. en seulement 2 semaines. En dehors de cela, les bonbons peuvent également accélérer le processus métabolique et brûler les graisses de tout le corps. Vous pouvez perdre jusqu'à 20 livres. de poids en 3 à 4 semaines.\n\nLes bonbons gélifiés peuvent stabiliser l’appétit et réduire la faim fréquente et les fringales de minuit. Ils peuvent donner une sensation de satiété à votre ventre et ne pas vous donner faim pendant des heures. De plus, les bonbons gélifiés peuvent provoquer une transformation soudaine de votre corps en quelques semaines.", "## Avantages des bonbons gélifiés Summer Keto ACV\n\nSummer Keto ACV Gummies est composé d'ingrédients naturels comme des extraits de pomme et des cétones BHB. Ils peuvent apporter des bienfaits au corps et à l’esprit tels que :", "## Cliquez ici pour acheter maintenant sur le site officiel de Summer Keto ACV Gummies" ]
[ "TAGS\n#region-us \n", "# Summer Keto ACV Gummies Avis France - Summer Keto Gummies Expériences, Prix\n\nSummer Keto ACV Gummies Avis - Uni : L’ajustement et la perte de poids sont devenus populaires récemment. Les gens réfléchissent souvent beaucoup à leur apparence et à leur comportement. Les médias sociaux sont la principale raison pour laquelle tant d’individus réussissent dans le monde moderne, car ils sous-tendent tout. Actuellement, plus de 25 % des personnes s'intéressent au vlogging, et tout le monde souhaite se tenir au courant des dernières modes, visiter des endroits exotiques et apprendre de nouvelles choses en général.", "## Cliquez ici pour acheter maintenant sur le site officiel de Summer Keto ACV Gummies", "## Que dit l’étude médicale sur les Summer Keto ACV Gummies ?\n\nLa dernière enquête sur la santé montre qu'environ 26 % des adultes au Royaume-Uni souffrent d'obésité et de prise de poids. Il indique également qu’environ 38 % des adultes sont en surpoids. Ces chiffres augmentent chaque année et l'obésité devient un grave problème de santé au Royaume-Uni. Une grande partie de la population du Royaume-Uni choisit les méthodes et produits traditionnels de perte de poids.\n\nSelon la nouvelle enquête, les produits amaigrissants traditionnels sont nocifs pour le corps. Ils provoquent des effets secondaires dans l’organisme tels que des migraines et des maux de tête.\n\nUn nouveau produit Summer Keto ACV Gummies est composé d'ingrédients naturels. Ces bonbons gélifiés sont utilisés par de nombreux clients au Royaume-Uni pour réduire l'obésité. La plupart des clients qui utilisent ce produit quotidiennement obtiennent une belle silhouette en quelques semaines. On dit que les bonbons aident à faire fondre les graisses tenaces dans le corps en quelques semaines.\n\nDe nombreuses personnes obtiennent des niveaux d’énergie plus élevés après avoir pris une dose quotidienne de ces bonbons gélifiés. Ce produit naturel aide à brûler les graisses au lieu des glucides et donne à votre silhouette une apparence attrayante. De plus, le produit a reçu un maximum de critiques positives de la part des clients.", "## Quel est le coût de vente des Summer Keto ACV Gummies au Royaume-Uni ?\n\nLe prix de 2 bouteilles de bonbons gélifiés Summer Keto ACV est de 49,95 £ par bouteille. Pour bénéficier de réductions supplémentaires, vous pouvez essayer le pack de 4 bouteilles pour seulement 39,95 £ par bouteille. Le pack super économique de bonbons gélifiés Summer Keto ACV Gummies est disponible au prix de 39,95 £ par bouteille. Chaque commande que vous passez sur le site officiel de Summer Keto ACV Gummies est éligible à la livraison gratuite au Royaume-Uni.", "## Comment s’opère la perte de poids dans le corps ?\n\nSummer Keto ACV Gummies peut aider à brûler les graisses tenaces dans le ventre, les cuisses, le menton et le cou. Ils peuvent aider à perdre du poids jusqu’à 5 livres. en seulement 2 semaines. En dehors de cela, les bonbons peuvent également accélérer le processus métabolique et brûler les graisses de tout le corps. Vous pouvez perdre jusqu'à 20 livres. de poids en 3 à 4 semaines.\n\nLes bonbons gélifiés peuvent stabiliser l’appétit et réduire la faim fréquente et les fringales de minuit. Ils peuvent donner une sensation de satiété à votre ventre et ne pas vous donner faim pendant des heures. De plus, les bonbons gélifiés peuvent provoquer une transformation soudaine de votre corps en quelques semaines.", "## Avantages des bonbons gélifiés Summer Keto ACV\n\nSummer Keto ACV Gummies est composé d'ingrédients naturels comme des extraits de pomme et des cétones BHB. Ils peuvent apporter des bienfaits au corps et à l’esprit tels que :", "## Cliquez ici pour acheter maintenant sur le site officiel de Summer Keto ACV Gummies" ]
[ 5, 196, 27, 444, 173, 256, 70, 27 ]
[ "TAGS\n#region-us \n# Summer Keto ACV Gummies Avis France - Summer Keto Gummies Expériences, Prix\n\nSummer Keto ACV Gummies Avis - Uni : L’ajustement et la perte de poids sont devenus populaires récemment. Les gens réfléchissent souvent beaucoup à leur apparence et à leur comportement. Les médias sociaux sont la principale raison pour laquelle tant d’individus réussissent dans le monde moderne, car ils sous-tendent tout. Actuellement, plus de 25 % des personnes s'intéressent au vlogging, et tout le monde souhaite se tenir au courant des dernières modes, visiter des endroits exotiques et apprendre de nouvelles choses en général.## Cliquez ici pour acheter maintenant sur le site officiel de Summer Keto ACV Gummies## Que dit l’étude médicale sur les Summer Keto ACV Gummies ?\n\nLa dernière enquête sur la santé montre qu'environ 26 % des adultes au Royaume-Uni souffrent d'obésité et de prise de poids. Il indique également qu’environ 38 % des adultes sont en surpoids. Ces chiffres augmentent chaque année et l'obésité devient un grave problème de santé au Royaume-Uni. Une grande partie de la population du Royaume-Uni choisit les méthodes et produits traditionnels de perte de poids.\n\nSelon la nouvelle enquête, les produits amaigrissants traditionnels sont nocifs pour le corps. Ils provoquent des effets secondaires dans l’organisme tels que des migraines et des maux de tête.\n\nUn nouveau produit Summer Keto ACV Gummies est composé d'ingrédients naturels. Ces bonbons gélifiés sont utilisés par de nombreux clients au Royaume-Uni pour réduire l'obésité. La plupart des clients qui utilisent ce produit quotidiennement obtiennent une belle silhouette en quelques semaines. On dit que les bonbons aident à faire fondre les graisses tenaces dans le corps en quelques semaines.\n\nDe nombreuses personnes obtiennent des niveaux d’énergie plus élevés après avoir pris une dose quotidienne de ces bonbons gélifiés. Ce produit naturel aide à brûler les graisses au lieu des glucides et donne à votre silhouette une apparence attrayante. De plus, le produit a reçu un maximum de critiques positives de la part des clients.## Quel est le coût de vente des Summer Keto ACV Gummies au Royaume-Uni ?\n\nLe prix de 2 bouteilles de bonbons gélifiés Summer Keto ACV est de 49,95 £ par bouteille. Pour bénéficier de réductions supplémentaires, vous pouvez essayer le pack de 4 bouteilles pour seulement 39,95 £ par bouteille. Le pack super économique de bonbons gélifiés Summer Keto ACV Gummies est disponible au prix de 39,95 £ par bouteille. Chaque commande que vous passez sur le site officiel de Summer Keto ACV Gummies est éligible à la livraison gratuite au Royaume-Uni.## Comment s’opère la perte de poids dans le corps ?\n\nSummer Keto ACV Gummies peut aider à brûler les graisses tenaces dans le ventre, les cuisses, le menton et le cou. Ils peuvent aider à perdre du poids jusqu’à 5 livres. en seulement 2 semaines. En dehors de cela, les bonbons peuvent également accélérer le processus métabolique et brûler les graisses de tout le corps. Vous pouvez perdre jusqu'à 20 livres. de poids en 3 à 4 semaines.\n\nLes bonbons gélifiés peuvent stabiliser l’appétit et réduire la faim fréquente et les fringales de minuit. Ils peuvent donner une sensation de satiété à votre ventre et ne pas vous donner faim pendant des heures. De plus, les bonbons gélifiés peuvent provoquer une transformation soudaine de votre corps en quelques semaines.## Avantages des bonbons gélifiés Summer Keto ACV\n\nSummer Keto ACV Gummies est composé d'ingrédients naturels comme des extraits de pomme et des cétones BHB. Ils peuvent apporter des bienfaits au corps et à l’esprit tels que :## Cliquez ici pour acheter maintenant sur le site officiel de Summer Keto ACV Gummies" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H4-seqsight_32768_512_30M-L8_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H4](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H4) dataset. It achieves the following results on the evaluation set: - Loss: 0.2796 - F1 Score: 0.8974 - Accuracy: 0.8973 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:------:|:-----:|:---------------:|:--------:|:--------:| | 0.3949 | 2.17 | 200 | 0.3073 | 0.8806 | 0.8802 | | 0.2985 | 4.35 | 400 | 0.3049 | 0.8813 | 0.8809 | | 0.2837 | 6.52 | 600 | 0.2934 | 0.8898 | 0.8898 | | 0.28 | 8.7 | 800 | 0.3121 | 0.8732 | 0.8727 | | 0.2662 | 10.87 | 1000 | 0.2953 | 0.8906 | 0.8905 | | 0.2628 | 13.04 | 1200 | 0.2932 | 0.8804 | 0.8802 | | 0.2545 | 15.22 | 1400 | 0.2913 | 0.8853 | 0.8850 | | 0.2481 | 17.39 | 1600 | 0.2990 | 0.8820 | 0.8816 | | 0.2409 | 19.57 | 1800 | 0.2841 | 0.8961 | 0.8960 | | 0.24 | 21.74 | 2000 | 0.2988 | 0.8895 | 0.8891 | | 0.2333 | 23.91 | 2200 | 0.2900 | 0.8887 | 0.8884 | | 0.2279 | 26.09 | 2400 | 0.2836 | 0.8901 | 0.8898 | | 0.2268 | 28.26 | 2600 | 0.2898 | 0.8891 | 0.8891 | | 0.221 | 30.43 | 2800 | 0.2876 | 0.8901 | 0.8898 | | 0.2186 | 32.61 | 3000 | 0.2898 | 0.8928 | 0.8925 | | 0.2119 | 34.78 | 3200 | 0.2841 | 0.8921 | 0.8919 | | 0.214 | 36.96 | 3400 | 0.3009 | 0.8929 | 0.8925 | | 0.2104 | 39.13 | 3600 | 0.2906 | 0.8858 | 0.8857 | | 0.2023 | 41.3 | 3800 | 0.2903 | 0.8886 | 0.8884 | | 0.2025 | 43.48 | 4000 | 0.3002 | 0.8894 | 0.8891 | | 0.2039 | 45.65 | 4200 | 0.2933 | 0.8879 | 0.8877 | | 0.1957 | 47.83 | 4400 | 0.3002 | 0.8837 | 0.8836 | | 0.1974 | 50.0 | 4600 | 0.2938 | 0.8851 | 0.8850 | | 0.1917 | 52.17 | 4800 | 0.3058 | 0.8833 | 0.8830 | | 0.1949 | 54.35 | 5000 | 0.3103 | 0.8840 | 0.8836 | | 0.1884 | 56.52 | 5200 | 0.3055 | 0.8860 | 0.8857 | | 0.1859 | 58.7 | 5400 | 0.3017 | 0.8888 | 0.8884 | | 0.1825 | 60.87 | 5600 | 0.3068 | 0.8847 | 0.8843 | | 0.183 | 63.04 | 5800 | 0.3102 | 0.8840 | 0.8836 | | 0.178 | 65.22 | 6000 | 0.3112 | 0.8874 | 0.8871 | | 0.1789 | 67.39 | 6200 | 0.3041 | 0.8901 | 0.8898 | | 0.1766 | 69.57 | 6400 | 0.3150 | 0.8874 | 0.8871 | | 0.1778 | 71.74 | 6600 | 0.3118 | 0.8908 | 0.8905 | | 0.1749 | 73.91 | 6800 | 0.3055 | 0.8893 | 0.8891 | | 0.1711 | 76.09 | 7000 | 0.3166 | 0.8901 | 0.8898 | | 0.1709 | 78.26 | 7200 | 0.3134 | 0.8900 | 0.8898 | | 0.1681 | 80.43 | 7400 | 0.3146 | 0.8886 | 0.8884 | | 0.1696 | 82.61 | 7600 | 0.3152 | 0.8871 | 0.8871 | | 0.1688 | 84.78 | 7800 | 0.3242 | 0.8888 | 0.8884 | | 0.1638 | 86.96 | 8000 | 0.3179 | 0.8880 | 0.8877 | | 0.1663 | 89.13 | 8200 | 0.3152 | 0.8853 | 0.8850 | | 0.1662 | 91.3 | 8400 | 0.3132 | 0.8859 | 0.8857 | | 0.161 | 93.48 | 8600 | 0.3181 | 0.8866 | 0.8864 | | 0.1647 | 95.65 | 8800 | 0.3166 | 0.8885 | 0.8884 | | 0.1635 | 97.83 | 9000 | 0.3175 | 0.8852 | 0.8850 | | 0.1643 | 100.0 | 9200 | 0.3221 | 0.8874 | 0.8871 | | 0.1608 | 102.17 | 9400 | 0.3188 | 0.8859 | 0.8857 | | 0.1634 | 104.35 | 9600 | 0.3203 | 0.8867 | 0.8864 | | 0.161 | 106.52 | 9800 | 0.3191 | 0.8845 | 0.8843 | | 0.1615 | 108.7 | 10000 | 0.3195 | 0.8873 | 0.8871 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H4-seqsight_32768_512_30M-L8_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H4-seqsight_32768_512_30M-L8_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:32:35+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H4-seqsight\_32768\_512\_30M-L8\_f ============================================ This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H4 dataset. It achieves the following results on the evaluation set: * Loss: 0.2796 * F1 Score: 0.8974 * Accuracy: 0.8973 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
terry69/tiny-llama-20p-full
null
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T04:32:53+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 44, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
automatic-speech-recognition
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # WAV2VEC-FINETUNE-TAMIL-1 This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice_11_0 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 20 - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.40.0 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["common_voice_11_0"], "base_model": "facebook/wav2vec2-xls-r-300m", "model-index": [{"name": "WAV2VEC-FINETUNE-TAMIL-1", "results": []}]}
Vignesh-M/WAV2VEC-FINETUNE-TAMIL-1
null
[ "transformers", "tensorboard", "safetensors", "wav2vec2", "automatic-speech-recognition", "generated_from_trainer", "dataset:common_voice_11_0", "base_model:facebook/wav2vec2-xls-r-300m", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:32:57+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #wav2vec2 #automatic-speech-recognition #generated_from_trainer #dataset-common_voice_11_0 #base_model-facebook/wav2vec2-xls-r-300m #license-apache-2.0 #endpoints_compatible #region-us
# WAV2VEC-FINETUNE-TAMIL-1 This model is a fine-tuned version of facebook/wav2vec2-xls-r-300m on the common_voice_11_0 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 20 - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.40.0 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
[ "# WAV2VEC-FINETUNE-TAMIL-1\n\nThis model is a fine-tuned version of facebook/wav2vec2-xls-r-300m on the common_voice_11_0 dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0003\n- train_batch_size: 16\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_steps: 20\n- num_epochs: 2\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.40.0\n- Pytorch 2.2.1+cu121\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #tensorboard #safetensors #wav2vec2 #automatic-speech-recognition #generated_from_trainer #dataset-common_voice_11_0 #base_model-facebook/wav2vec2-xls-r-300m #license-apache-2.0 #endpoints_compatible #region-us \n", "# WAV2VEC-FINETUNE-TAMIL-1\n\nThis model is a fine-tuned version of facebook/wav2vec2-xls-r-300m on the common_voice_11_0 dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0003\n- train_batch_size: 16\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_steps: 20\n- num_epochs: 2\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.40.0\n- Pytorch 2.2.1+cu121\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
[ 78, 51, 7, 9, 9, 4, 133, 5, 44 ]
[ "TAGS\n#transformers #tensorboard #safetensors #wav2vec2 #automatic-speech-recognition #generated_from_trainer #dataset-common_voice_11_0 #base_model-facebook/wav2vec2-xls-r-300m #license-apache-2.0 #endpoints_compatible #region-us \n# WAV2VEC-FINETUNE-TAMIL-1\n\nThis model is a fine-tuned version of facebook/wav2vec2-xls-r-300m on the common_voice_11_0 dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0003\n- train_batch_size: 16\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_steps: 20\n- num_epochs: 2\n- mixed_precision_training: Native AMP### Training results### Framework versions\n\n- Transformers 4.40.0\n- Pytorch 2.2.1+cu121\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
amks313/llama2_qa
null
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:33:05+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 26, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H4-seqsight_32768_512_30M-L32_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H4](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H4) dataset. It achieves the following results on the evaluation set: - Loss: 0.3352 - F1 Score: 0.8852 - Accuracy: 0.8850 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:------:|:-----:|:---------------:|:--------:|:--------:| | 0.3712 | 2.17 | 200 | 0.3065 | 0.8834 | 0.8830 | | 0.2877 | 4.35 | 400 | 0.2900 | 0.8919 | 0.8919 | | 0.2685 | 6.52 | 600 | 0.2932 | 0.8851 | 0.8850 | | 0.2599 | 8.7 | 800 | 0.3087 | 0.8786 | 0.8782 | | 0.2416 | 10.87 | 1000 | 0.2910 | 0.8803 | 0.8802 | | 0.2365 | 13.04 | 1200 | 0.3027 | 0.8813 | 0.8809 | | 0.2243 | 15.22 | 1400 | 0.2986 | 0.8800 | 0.8795 | | 0.2134 | 17.39 | 1600 | 0.3094 | 0.8868 | 0.8864 | | 0.2025 | 19.57 | 1800 | 0.2979 | 0.8901 | 0.8898 | | 0.2007 | 21.74 | 2000 | 0.3136 | 0.8759 | 0.8754 | | 0.1901 | 23.91 | 2200 | 0.3097 | 0.8874 | 0.8871 | | 0.182 | 26.09 | 2400 | 0.3008 | 0.8940 | 0.8939 | | 0.1777 | 28.26 | 2600 | 0.3158 | 0.8948 | 0.8946 | | 0.1681 | 30.43 | 2800 | 0.3206 | 0.8813 | 0.8809 | | 0.1622 | 32.61 | 3000 | 0.3273 | 0.8933 | 0.8932 | | 0.1515 | 34.78 | 3200 | 0.3242 | 0.8962 | 0.8960 | | 0.1531 | 36.96 | 3400 | 0.3262 | 0.8901 | 0.8898 | | 0.1447 | 39.13 | 3600 | 0.3404 | 0.8858 | 0.8857 | | 0.1353 | 41.3 | 3800 | 0.3585 | 0.8798 | 0.8795 | | 0.131 | 43.48 | 4000 | 0.3822 | 0.8805 | 0.8802 | | 0.1291 | 45.65 | 4200 | 0.3702 | 0.8827 | 0.8830 | | 0.12 | 47.83 | 4400 | 0.3924 | 0.8783 | 0.8782 | | 0.1165 | 50.0 | 4600 | 0.3935 | 0.8794 | 0.8795 | | 0.1134 | 52.17 | 4800 | 0.4138 | 0.8731 | 0.8727 | | 0.1109 | 54.35 | 5000 | 0.4328 | 0.8758 | 0.8754 | | 0.1051 | 56.52 | 5200 | 0.3864 | 0.8798 | 0.8795 | | 0.0994 | 58.7 | 5400 | 0.4100 | 0.8805 | 0.8802 | | 0.095 | 60.87 | 5600 | 0.4347 | 0.8764 | 0.8761 | | 0.0976 | 63.04 | 5800 | 0.4336 | 0.8762 | 0.8761 | | 0.0899 | 65.22 | 6000 | 0.4530 | 0.8710 | 0.8706 | | 0.0908 | 67.39 | 6200 | 0.4437 | 0.8724 | 0.8720 | | 0.084 | 69.57 | 6400 | 0.4855 | 0.8724 | 0.8720 | | 0.0867 | 71.74 | 6600 | 0.4605 | 0.8804 | 0.8802 | | 0.0779 | 73.91 | 6800 | 0.4823 | 0.8716 | 0.8713 | | 0.0805 | 76.09 | 7000 | 0.4758 | 0.8750 | 0.8747 | | 0.0778 | 78.26 | 7200 | 0.4791 | 0.8743 | 0.8741 | | 0.0709 | 80.43 | 7400 | 0.4960 | 0.8743 | 0.8741 | | 0.0718 | 82.61 | 7600 | 0.4910 | 0.8823 | 0.8823 | | 0.0748 | 84.78 | 7800 | 0.5038 | 0.8784 | 0.8782 | | 0.0677 | 86.96 | 8000 | 0.5160 | 0.8764 | 0.8761 | | 0.0685 | 89.13 | 8200 | 0.5056 | 0.8757 | 0.8754 | | 0.0682 | 91.3 | 8400 | 0.5076 | 0.8729 | 0.8727 | | 0.0644 | 93.48 | 8600 | 0.5138 | 0.8743 | 0.8741 | | 0.0647 | 95.65 | 8800 | 0.5183 | 0.8775 | 0.8775 | | 0.0661 | 97.83 | 9000 | 0.5152 | 0.8750 | 0.8747 | | 0.0658 | 100.0 | 9200 | 0.5207 | 0.8764 | 0.8761 | | 0.0607 | 102.17 | 9400 | 0.5205 | 0.8771 | 0.8768 | | 0.0627 | 104.35 | 9600 | 0.5205 | 0.8777 | 0.8775 | | 0.0604 | 106.52 | 9800 | 0.5226 | 0.8743 | 0.8741 | | 0.0596 | 108.7 | 10000 | 0.5268 | 0.8757 | 0.8754 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H4-seqsight_32768_512_30M-L32_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H4-seqsight_32768_512_30M-L32_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:33:12+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H4-seqsight\_32768\_512\_30M-L32\_f ============================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H4 dataset. It achieves the following results on the evaluation set: * Loss: 0.3352 * F1 Score: 0.8852 * Accuracy: 0.8850 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3-seqsight_32768_512_30M-L1_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3) dataset. It achieves the following results on the evaluation set: - Loss: 0.3166 - F1 Score: 0.8704 - Accuracy: 0.8704 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:------:|:-----:|:---------------:|:--------:|:--------:| | 0.4895 | 2.13 | 200 | 0.4117 | 0.8179 | 0.8183 | | 0.38 | 4.26 | 400 | 0.3987 | 0.8195 | 0.8196 | | 0.3604 | 6.38 | 600 | 0.3889 | 0.8288 | 0.8290 | | 0.3455 | 8.51 | 800 | 0.3671 | 0.8444 | 0.8444 | | 0.3305 | 10.64 | 1000 | 0.3545 | 0.8530 | 0.8530 | | 0.3153 | 12.77 | 1200 | 0.3509 | 0.8490 | 0.8490 | | 0.3084 | 14.89 | 1400 | 0.3471 | 0.8524 | 0.8524 | | 0.2992 | 17.02 | 1600 | 0.3392 | 0.8577 | 0.8577 | | 0.2955 | 19.15 | 1800 | 0.3475 | 0.8550 | 0.8550 | | 0.2881 | 21.28 | 2000 | 0.3450 | 0.8536 | 0.8537 | | 0.2886 | 23.4 | 2200 | 0.3340 | 0.8591 | 0.8591 | | 0.2802 | 25.53 | 2400 | 0.3377 | 0.8591 | 0.8591 | | 0.2854 | 27.66 | 2600 | 0.3288 | 0.8677 | 0.8677 | | 0.2832 | 29.79 | 2800 | 0.3375 | 0.8597 | 0.8597 | | 0.2779 | 31.91 | 3000 | 0.3364 | 0.8644 | 0.8644 | | 0.2714 | 34.04 | 3200 | 0.3363 | 0.8637 | 0.8637 | | 0.272 | 36.17 | 3400 | 0.3365 | 0.8650 | 0.8651 | | 0.2693 | 38.3 | 3600 | 0.3322 | 0.8691 | 0.8691 | | 0.2693 | 40.43 | 3800 | 0.3393 | 0.8682 | 0.8684 | | 0.2687 | 42.55 | 4000 | 0.3355 | 0.8704 | 0.8704 | | 0.266 | 44.68 | 4200 | 0.3295 | 0.8711 | 0.8711 | | 0.2622 | 46.81 | 4400 | 0.3366 | 0.8683 | 0.8684 | | 0.2648 | 48.94 | 4600 | 0.3383 | 0.8676 | 0.8677 | | 0.2635 | 51.06 | 4800 | 0.3306 | 0.8677 | 0.8677 | | 0.2598 | 53.19 | 5000 | 0.3522 | 0.8614 | 0.8617 | | 0.2634 | 55.32 | 5200 | 0.3305 | 0.8691 | 0.8691 | | 0.2626 | 57.45 | 5400 | 0.3378 | 0.8664 | 0.8664 | | 0.2566 | 59.57 | 5600 | 0.3363 | 0.8683 | 0.8684 | | 0.2604 | 61.7 | 5800 | 0.3259 | 0.8717 | 0.8717 | | 0.2559 | 63.83 | 6000 | 0.3541 | 0.8628 | 0.8631 | | 0.2574 | 65.96 | 6200 | 0.3417 | 0.8683 | 0.8684 | | 0.2549 | 68.09 | 6400 | 0.3428 | 0.8689 | 0.8691 | | 0.2529 | 70.21 | 6600 | 0.3406 | 0.8670 | 0.8671 | | 0.2563 | 72.34 | 6800 | 0.3388 | 0.8675 | 0.8677 | | 0.2518 | 74.47 | 7000 | 0.3564 | 0.8567 | 0.8570 | | 0.2496 | 76.6 | 7200 | 0.3428 | 0.8696 | 0.8697 | | 0.255 | 78.72 | 7400 | 0.3416 | 0.8676 | 0.8677 | | 0.2503 | 80.85 | 7600 | 0.3381 | 0.8703 | 0.8704 | | 0.2505 | 82.98 | 7800 | 0.3454 | 0.8649 | 0.8651 | | 0.2503 | 85.11 | 8000 | 0.3388 | 0.8676 | 0.8677 | | 0.2493 | 87.23 | 8200 | 0.3319 | 0.8711 | 0.8711 | | 0.249 | 89.36 | 8400 | 0.3409 | 0.8670 | 0.8671 | | 0.2503 | 91.49 | 8600 | 0.3386 | 0.8690 | 0.8691 | | 0.2497 | 93.62 | 8800 | 0.3395 | 0.8710 | 0.8711 | | 0.2502 | 95.74 | 9000 | 0.3442 | 0.8655 | 0.8657 | | 0.2477 | 97.87 | 9200 | 0.3356 | 0.8703 | 0.8704 | | 0.2492 | 100.0 | 9400 | 0.3363 | 0.8697 | 0.8697 | | 0.2471 | 102.13 | 9600 | 0.3394 | 0.8683 | 0.8684 | | 0.2484 | 104.26 | 9800 | 0.3398 | 0.8670 | 0.8671 | | 0.2488 | 106.38 | 10000 | 0.3397 | 0.8676 | 0.8677 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3-seqsight_32768_512_30M-L1_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3-seqsight_32768_512_30M-L1_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:33:58+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3-seqsight\_32768\_512\_30M-L1\_f ============================================ This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3 dataset. It achieves the following results on the evaluation set: * Loss: 0.3166 * F1 Score: 0.8704 * Accuracy: 0.8704 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 1er_mod_eval This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.6113 - Accuracy: 0.175 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.6316 | 0.5 | 5 | 1.6332 | 0.175 | | 1.6988 | 1.0 | 10 | 1.6113 | 0.175 | ### Framework versions - Transformers 4.40.1 - Pytorch 2.3.0+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "bert-base-cased", "model-index": [{"name": "1er_mod_eval", "results": []}]}
edchaud/1er_mod_eval
null
[ "transformers", "safetensors", "bert", "text-classification", "generated_from_trainer", "base_model:bert-base-cased", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:37:44+00:00
[]
[]
TAGS #transformers #safetensors #bert #text-classification #generated_from_trainer #base_model-bert-base-cased #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
1er\_mod\_eval ============== This model is a fine-tuned version of bert-base-cased on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 1.6113 * Accuracy: 0.175 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 5e-05 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * distributed\_type: multi-GPU * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 1 ### Training results ### Framework versions * Transformers 4.40.1 * Pytorch 2.3.0+cu121 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #safetensors #bert #text-classification #generated_from_trainer #base_model-bert-base-cased #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ 52, 111, 5, 44 ]
[ "TAGS\n#transformers #safetensors #bert #text-classification #generated_from_trainer #base_model-bert-base-cased #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1### Training results### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
abc88767/model14
null
[ "transformers", "safetensors", "stablelm", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:38:33+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 41, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
transformers
# Phi-3-128K-Instruct-ov-fp16-int4-asym ## Model Description This is a version of the original [Phi-3-128K-Instruct](https://huggingface.co/microsoft/Phi-3-128k-instruct) model, converted to OpenVINO™ IR (Intermediate Representation) format for optimized inference on Intel® hardware. This model is created using the procedures detailed in the [OpenVINO™ Notebooks](https://github.com/openvinotoolkit/openvino_notebooks/tree/latest/notebooks) repository. ## Intended Use This model is designed for advanced natural language understanding and generation tasks, ideal for developers and researchers in both academic and commercial settings who require efficient AI capabilities for devices with limited computational power. It is not intended for use in creating or promoting harmful or illegal content, in accordance with the guidelines outlined in the Phi-3 Acceptable Use Policy. ## Licensing and Redistribution This model is released under the [MIT license](https://huggingface.co/microsoft/Phi-3-128k-instruct/resolve/main/LICENSE). ## Weight Compression Parameters For more information on the parameters, refer to the [OpenVINO™ 2024.1.0 documentation](https://docs.openvino.ai/2024/openvino-workflow/model-optimization-guide/weight-compression.html) * mode: **INT4_ASYM** * group_size: **128** * ratio: **0.8** ## Running Model Inference Install packages required for using [Optimum Intel](https://huggingface.co/docs/optimum/intel/index) integration with the OpenVINO™ backend: ```python pip install --upgrade --upgrade-strategy eager "optimum[openvino]" from optimum.intel.openvino import OVModelForCausalLM from transformers import AutoTokenizer model_id = "microsoft/Phi-3-128K-Instruct-ov-fp32-int4-asym" # Initialize the tokenizer and model tokenizer = AutoTokenizer.from_pretrained(model_id) model = OVModelForCausalLM.from_pretrained(model_id) pipeline = transformers.pipeline("text-generation", model=model, model_kwargs={"torch_dtype": torch.bfloat16}, device_map="auto") pipeline("i am in paris, plan me a 2 week trip") ```
{"language": ["en"], "license": "mit", "library_name": "transformers", "tags": ["OpenVINO", "Phi-3", "PyTorch", "weight_compression", "optimum-intel"], "pipeline_tag": "text-generation"}
nsbendre25/Phi-3-mini-128k-instruct-ov-fp16-int4-asym
null
[ "transformers", "openvino", "phi3", "text-generation", "OpenVINO", "Phi-3", "PyTorch", "weight_compression", "optimum-intel", "conversational", "custom_code", "en", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:38:36+00:00
[]
[ "en" ]
TAGS #transformers #openvino #phi3 #text-generation #OpenVINO #Phi-3 #PyTorch #weight_compression #optimum-intel #conversational #custom_code #en #license-mit #autotrain_compatible #endpoints_compatible #region-us
# Phi-3-128K-Instruct-ov-fp16-int4-asym ## Model Description This is a version of the original Phi-3-128K-Instruct model, converted to OpenVINO™ IR (Intermediate Representation) format for optimized inference on Intel® hardware. This model is created using the procedures detailed in the OpenVINO™ Notebooks repository. ## Intended Use This model is designed for advanced natural language understanding and generation tasks, ideal for developers and researchers in both academic and commercial settings who require efficient AI capabilities for devices with limited computational power. It is not intended for use in creating or promoting harmful or illegal content, in accordance with the guidelines outlined in the Phi-3 Acceptable Use Policy. ## Licensing and Redistribution This model is released under the MIT license. ## Weight Compression Parameters For more information on the parameters, refer to the OpenVINO™ 2024.1.0 documentation * mode: INT4_ASYM * group_size: 128 * ratio: 0.8 ## Running Model Inference Install packages required for using Optimum Intel integration with the OpenVINO™ backend:
[ "# Phi-3-128K-Instruct-ov-fp16-int4-asym", "## Model Description\n\nThis is a version of the original Phi-3-128K-Instruct model, converted to OpenVINO™ IR (Intermediate Representation) format for optimized inference on Intel® hardware. This model is created using the procedures detailed in the OpenVINO™ Notebooks repository.", "## Intended Use\nThis model is designed for advanced natural language understanding and generation tasks, ideal for developers and researchers in both academic and commercial settings who require efficient AI capabilities for devices with limited computational power. It is not intended for use in creating or promoting harmful or illegal content, in accordance with the guidelines outlined in the Phi-3 Acceptable Use Policy.", "## Licensing and Redistribution\nThis model is released under the MIT license.", "## Weight Compression Parameters\nFor more information on the parameters, refer to the OpenVINO™ 2024.1.0 documentation\n\n* mode: INT4_ASYM\n* group_size: 128\n* ratio: 0.8", "## Running Model Inference\n\nInstall packages required for using Optimum Intel integration with the OpenVINO™ backend:" ]
[ "TAGS\n#transformers #openvino #phi3 #text-generation #OpenVINO #Phi-3 #PyTorch #weight_compression #optimum-intel #conversational #custom_code #en #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "# Phi-3-128K-Instruct-ov-fp16-int4-asym", "## Model Description\n\nThis is a version of the original Phi-3-128K-Instruct model, converted to OpenVINO™ IR (Intermediate Representation) format for optimized inference on Intel® hardware. This model is created using the procedures detailed in the OpenVINO™ Notebooks repository.", "## Intended Use\nThis model is designed for advanced natural language understanding and generation tasks, ideal for developers and researchers in both academic and commercial settings who require efficient AI capabilities for devices with limited computational power. It is not intended for use in creating or promoting harmful or illegal content, in accordance with the guidelines outlined in the Phi-3 Acceptable Use Policy.", "## Licensing and Redistribution\nThis model is released under the MIT license.", "## Weight Compression Parameters\nFor more information on the parameters, refer to the OpenVINO™ 2024.1.0 documentation\n\n* mode: INT4_ASYM\n* group_size: 128\n* ratio: 0.8", "## Running Model Inference\n\nInstall packages required for using Optimum Intel integration with the OpenVINO™ backend:" ]
[ 61, 23, 60, 70, 14, 45, 22 ]
[ "TAGS\n#transformers #openvino #phi3 #text-generation #OpenVINO #Phi-3 #PyTorch #weight_compression #optimum-intel #conversational #custom_code #en #license-mit #autotrain_compatible #endpoints_compatible #region-us \n# Phi-3-128K-Instruct-ov-fp16-int4-asym## Model Description\n\nThis is a version of the original Phi-3-128K-Instruct model, converted to OpenVINO™ IR (Intermediate Representation) format for optimized inference on Intel® hardware. This model is created using the procedures detailed in the OpenVINO™ Notebooks repository.## Intended Use\nThis model is designed for advanced natural language understanding and generation tasks, ideal for developers and researchers in both academic and commercial settings who require efficient AI capabilities for devices with limited computational power. It is not intended for use in creating or promoting harmful or illegal content, in accordance with the guidelines outlined in the Phi-3 Acceptable Use Policy.## Licensing and Redistribution\nThis model is released under the MIT license.## Weight Compression Parameters\nFor more information on the parameters, refer to the OpenVINO™ 2024.1.0 documentation\n\n* mode: INT4_ASYM\n* group_size: 128\n* ratio: 0.8## Running Model Inference\n\nInstall packages required for using Optimum Intel integration with the OpenVINO™ backend:" ]
null
span-marker
[Growth Matrix Reviews](https://jciodev.microsoftcrmportals.com/forums/general-discussion/56e0e0e2-f105-ef11-a81c-6045bd0b2619) It's vital to take note of that not all upgrade techniques are therapeutically or experimentally demonstrated, and numerous items advertised for these reasons might need guideline or logical proof supporting their adequacy and wellbeing. Prior to considering any type of male upgrade, it's significant to talk with a medical care proficient to grasp expected dangers, viability, and legitimate use. Furthermore, solid way of life decisions like standard activity, a decent eating regimen, overseeing pressure, and sufficient rest can emphatically influence sexual wellbeing and execution. VISIT HERE FOR OFFICIAL WEBSITE:-https://jciodev.microsoftcrmportals.com/forums/general-discussion/56e0e0e2-f105-ef11-a81c-6045bd0b2619
{"language": ["en"], "license": "afl-3.0", "library_name": "span-marker", "tags": ["Growth Matrix Reviews"]}
growthmatrixreviews/growthmatrixreviews
null
[ "span-marker", "Growth Matrix Reviews", "en", "license:afl-3.0", "region:us" ]
null
2024-04-30T04:38:43+00:00
[]
[ "en" ]
TAGS #span-marker #Growth Matrix Reviews #en #license-afl-3.0 #region-us
Growth Matrix Reviews It's vital to take note of that not all upgrade techniques are therapeutically or experimentally demonstrated, and numerous items advertised for these reasons might need guideline or logical proof supporting their adequacy and wellbeing. Prior to considering any type of male upgrade, it's significant to talk with a medical care proficient to grasp expected dangers, viability, and legitimate use. Furthermore, solid way of life decisions like standard activity, a decent eating regimen, overseeing pressure, and sufficient rest can emphatically influence sexual wellbeing and execution. VISIT HERE FOR OFFICIAL WEBSITE:-URL
[]
[ "TAGS\n#span-marker #Growth Matrix Reviews #en #license-afl-3.0 #region-us \n" ]
[ 23 ]
[ "TAGS\n#span-marker #Growth Matrix Reviews #en #license-afl-3.0 #region-us \n" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3-seqsight_32768_512_30M-L8_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3) dataset. It achieves the following results on the evaluation set: - Loss: 0.3326 - F1 Score: 0.8657 - Accuracy: 0.8657 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:------:|:-----:|:---------------:|:--------:|:--------:| | 0.4518 | 2.13 | 200 | 0.3904 | 0.8321 | 0.8323 | | 0.3475 | 4.26 | 400 | 0.3838 | 0.8313 | 0.8317 | | 0.3167 | 6.38 | 600 | 0.4000 | 0.8234 | 0.8243 | | 0.2998 | 8.51 | 800 | 0.3542 | 0.8422 | 0.8424 | | 0.2901 | 10.64 | 1000 | 0.3432 | 0.8583 | 0.8584 | | 0.2814 | 12.77 | 1200 | 0.3466 | 0.8603 | 0.8604 | | 0.2758 | 14.89 | 1400 | 0.3572 | 0.8555 | 0.8557 | | 0.2684 | 17.02 | 1600 | 0.3379 | 0.8644 | 0.8644 | | 0.2648 | 19.15 | 1800 | 0.3645 | 0.8554 | 0.8557 | | 0.2584 | 21.28 | 2000 | 0.3568 | 0.8622 | 0.8624 | | 0.2561 | 23.4 | 2200 | 0.3326 | 0.8704 | 0.8704 | | 0.2466 | 25.53 | 2400 | 0.3611 | 0.8607 | 0.8611 | | 0.2535 | 27.66 | 2600 | 0.3273 | 0.8697 | 0.8697 | | 0.2485 | 29.79 | 2800 | 0.3265 | 0.8710 | 0.8711 | | 0.2408 | 31.91 | 3000 | 0.3381 | 0.8697 | 0.8697 | | 0.236 | 34.04 | 3200 | 0.3314 | 0.8717 | 0.8717 | | 0.2353 | 36.17 | 3400 | 0.3567 | 0.8648 | 0.8651 | | 0.2295 | 38.3 | 3600 | 0.3450 | 0.8715 | 0.8717 | | 0.2286 | 40.43 | 3800 | 0.3548 | 0.8708 | 0.8711 | | 0.229 | 42.55 | 4000 | 0.3523 | 0.8702 | 0.8704 | | 0.2232 | 44.68 | 4200 | 0.3357 | 0.8704 | 0.8704 | | 0.2184 | 46.81 | 4400 | 0.3455 | 0.8743 | 0.8744 | | 0.2212 | 48.94 | 4600 | 0.3579 | 0.8654 | 0.8657 | | 0.2195 | 51.06 | 4800 | 0.3319 | 0.8744 | 0.8744 | | 0.2131 | 53.19 | 5000 | 0.3678 | 0.8688 | 0.8691 | | 0.2172 | 55.32 | 5200 | 0.3406 | 0.8717 | 0.8717 | | 0.2145 | 57.45 | 5400 | 0.3620 | 0.8683 | 0.8684 | | 0.2086 | 59.57 | 5600 | 0.3550 | 0.8670 | 0.8671 | | 0.2104 | 61.7 | 5800 | 0.3386 | 0.8711 | 0.8711 | | 0.2066 | 63.83 | 6000 | 0.3741 | 0.8640 | 0.8644 | | 0.2072 | 65.96 | 6200 | 0.3680 | 0.8681 | 0.8684 | | 0.2037 | 68.09 | 6400 | 0.3723 | 0.8654 | 0.8657 | | 0.2017 | 70.21 | 6600 | 0.3713 | 0.8668 | 0.8671 | | 0.2042 | 72.34 | 6800 | 0.3558 | 0.8681 | 0.8684 | | 0.1993 | 74.47 | 7000 | 0.3915 | 0.8612 | 0.8617 | | 0.1957 | 76.6 | 7200 | 0.3658 | 0.8716 | 0.8717 | | 0.1982 | 78.72 | 7400 | 0.3823 | 0.8666 | 0.8671 | | 0.1966 | 80.85 | 7600 | 0.3718 | 0.8628 | 0.8631 | | 0.1935 | 82.98 | 7800 | 0.3755 | 0.8634 | 0.8637 | | 0.1943 | 85.11 | 8000 | 0.3707 | 0.8641 | 0.8644 | | 0.1925 | 87.23 | 8200 | 0.3586 | 0.8683 | 0.8684 | | 0.1939 | 89.36 | 8400 | 0.3771 | 0.8634 | 0.8637 | | 0.1907 | 91.49 | 8600 | 0.3762 | 0.8634 | 0.8637 | | 0.194 | 93.62 | 8800 | 0.3665 | 0.8662 | 0.8664 | | 0.1916 | 95.74 | 9000 | 0.3781 | 0.8621 | 0.8624 | | 0.1885 | 97.87 | 9200 | 0.3667 | 0.8669 | 0.8671 | | 0.19 | 100.0 | 9400 | 0.3722 | 0.8622 | 0.8624 | | 0.1891 | 102.13 | 9600 | 0.3752 | 0.8641 | 0.8644 | | 0.1892 | 104.26 | 9800 | 0.3738 | 0.8641 | 0.8644 | | 0.1854 | 106.38 | 10000 | 0.3722 | 0.8641 | 0.8644 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3-seqsight_32768_512_30M-L8_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3-seqsight_32768_512_30M-L8_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:39:12+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3-seqsight\_32768\_512\_30M-L8\_f ============================================ This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3 dataset. It achieves the following results on the evaluation set: * Loss: 0.3326 * F1 Score: 0.8657 * Accuracy: 0.8657 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3-seqsight_32768_512_30M-L32_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3) dataset. It achieves the following results on the evaluation set: - Loss: 0.3600 - F1 Score: 0.8564 - Accuracy: 0.8564 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:------:|:-----:|:---------------:|:--------:|:--------:| | 0.426 | 2.13 | 200 | 0.3677 | 0.8409 | 0.8410 | | 0.3148 | 4.26 | 400 | 0.3429 | 0.8577 | 0.8577 | | 0.2892 | 6.38 | 600 | 0.3671 | 0.8459 | 0.8464 | | 0.2772 | 8.51 | 800 | 0.3247 | 0.8710 | 0.8711 | | 0.2667 | 10.64 | 1000 | 0.3338 | 0.8683 | 0.8684 | | 0.2551 | 12.77 | 1200 | 0.3291 | 0.8677 | 0.8677 | | 0.2481 | 14.89 | 1400 | 0.3790 | 0.8484 | 0.8490 | | 0.2397 | 17.02 | 1600 | 0.3280 | 0.8663 | 0.8664 | | 0.2329 | 19.15 | 1800 | 0.3701 | 0.8601 | 0.8604 | | 0.2215 | 21.28 | 2000 | 0.3518 | 0.8723 | 0.8724 | | 0.2175 | 23.4 | 2200 | 0.3751 | 0.8594 | 0.8597 | | 0.2053 | 25.53 | 2400 | 0.3631 | 0.8662 | 0.8664 | | 0.2058 | 27.66 | 2600 | 0.3505 | 0.8770 | 0.8771 | | 0.1974 | 29.79 | 2800 | 0.3614 | 0.8661 | 0.8664 | | 0.1889 | 31.91 | 3000 | 0.3470 | 0.8730 | 0.8731 | | 0.1809 | 34.04 | 3200 | 0.3540 | 0.8730 | 0.8731 | | 0.1762 | 36.17 | 3400 | 0.3803 | 0.8663 | 0.8664 | | 0.1633 | 38.3 | 3600 | 0.3846 | 0.8716 | 0.8717 | | 0.1618 | 40.43 | 3800 | 0.4367 | 0.8564 | 0.8570 | | 0.1529 | 42.55 | 4000 | 0.4538 | 0.8628 | 0.8631 | | 0.1468 | 44.68 | 4200 | 0.4200 | 0.8616 | 0.8617 | | 0.1404 | 46.81 | 4400 | 0.4299 | 0.8683 | 0.8684 | | 0.1377 | 48.94 | 4600 | 0.4680 | 0.8511 | 0.8517 | | 0.1316 | 51.06 | 4800 | 0.4302 | 0.8650 | 0.8651 | | 0.1266 | 53.19 | 5000 | 0.4679 | 0.8588 | 0.8591 | | 0.1245 | 55.32 | 5200 | 0.4582 | 0.8650 | 0.8651 | | 0.1218 | 57.45 | 5400 | 0.4607 | 0.8676 | 0.8677 | | 0.1138 | 59.57 | 5600 | 0.4858 | 0.8622 | 0.8624 | | 0.1141 | 61.7 | 5800 | 0.4473 | 0.8664 | 0.8664 | | 0.1122 | 63.83 | 6000 | 0.5141 | 0.8606 | 0.8611 | | 0.1058 | 65.96 | 6200 | 0.5234 | 0.8634 | 0.8637 | | 0.0999 | 68.09 | 6400 | 0.5177 | 0.8594 | 0.8597 | | 0.0962 | 70.21 | 6600 | 0.5301 | 0.8642 | 0.8644 | | 0.0984 | 72.34 | 6800 | 0.4981 | 0.8663 | 0.8664 | | 0.0956 | 74.47 | 7000 | 0.5309 | 0.8643 | 0.8644 | | 0.0919 | 76.6 | 7200 | 0.5333 | 0.8690 | 0.8691 | | 0.0925 | 78.72 | 7400 | 0.5550 | 0.8580 | 0.8584 | | 0.0899 | 80.85 | 7600 | 0.5366 | 0.8615 | 0.8617 | | 0.0874 | 82.98 | 7800 | 0.5441 | 0.8616 | 0.8617 | | 0.0848 | 85.11 | 8000 | 0.5362 | 0.8616 | 0.8617 | | 0.0829 | 87.23 | 8200 | 0.5478 | 0.8649 | 0.8651 | | 0.0831 | 89.36 | 8400 | 0.5718 | 0.8595 | 0.8597 | | 0.0793 | 91.49 | 8600 | 0.5700 | 0.8649 | 0.8651 | | 0.083 | 93.62 | 8800 | 0.5573 | 0.8642 | 0.8644 | | 0.0805 | 95.74 | 9000 | 0.5617 | 0.8622 | 0.8624 | | 0.076 | 97.87 | 9200 | 0.5696 | 0.8642 | 0.8644 | | 0.079 | 100.0 | 9400 | 0.5691 | 0.8609 | 0.8611 | | 0.0798 | 102.13 | 9600 | 0.5755 | 0.8602 | 0.8604 | | 0.0766 | 104.26 | 9800 | 0.5702 | 0.8609 | 0.8611 | | 0.0728 | 106.38 | 10000 | 0.5732 | 0.8635 | 0.8637 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3-seqsight_32768_512_30M-L32_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3-seqsight_32768_512_30M-L32_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:40:00+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3-seqsight\_32768\_512\_30M-L32\_f ============================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3 dataset. It achieves the following results on the evaluation set: * Loss: 0.3600 * F1 Score: 0.8564 * Accuracy: 0.8564 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H4ac-seqsight_32768_512_30M-L1_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H4ac](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H4ac) dataset. It achieves the following results on the evaluation set: - Loss: 0.5836 - F1 Score: 0.7005 - Accuracy: 0.7003 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6436 | 0.93 | 200 | 0.6088 | 0.6696 | 0.6698 | | 0.6105 | 1.87 | 400 | 0.6151 | 0.6631 | 0.6669 | | 0.5996 | 2.8 | 600 | 0.5981 | 0.6807 | 0.6812 | | 0.5969 | 3.74 | 800 | 0.6018 | 0.6711 | 0.6733 | | 0.59 | 4.67 | 1000 | 0.6090 | 0.6696 | 0.6730 | | 0.5862 | 5.61 | 1200 | 0.6141 | 0.6667 | 0.6716 | | 0.5843 | 6.54 | 1400 | 0.5930 | 0.6870 | 0.6880 | | 0.5743 | 7.48 | 1600 | 0.5876 | 0.6932 | 0.6933 | | 0.5819 | 8.41 | 1800 | 0.5908 | 0.6893 | 0.6900 | | 0.5781 | 9.35 | 2000 | 0.5984 | 0.6864 | 0.6883 | | 0.5703 | 10.28 | 2200 | 0.5909 | 0.6911 | 0.6915 | | 0.574 | 11.21 | 2400 | 0.5908 | 0.6928 | 0.6935 | | 0.5695 | 12.15 | 2600 | 0.5897 | 0.6913 | 0.6921 | | 0.5663 | 13.08 | 2800 | 0.5944 | 0.6927 | 0.6938 | | 0.5677 | 14.02 | 3000 | 0.5944 | 0.6928 | 0.6941 | | 0.5652 | 14.95 | 3200 | 0.5875 | 0.6959 | 0.6965 | | 0.5625 | 15.89 | 3400 | 0.6009 | 0.6904 | 0.6930 | | 0.5608 | 16.82 | 3600 | 0.5798 | 0.7017 | 0.7018 | | 0.5643 | 17.76 | 3800 | 0.5829 | 0.6979 | 0.6982 | | 0.5605 | 18.69 | 4000 | 0.5820 | 0.7003 | 0.7006 | | 0.5563 | 19.63 | 4200 | 0.5911 | 0.6991 | 0.7006 | | 0.5581 | 20.56 | 4400 | 0.5751 | 0.7047 | 0.7047 | | 0.5548 | 21.5 | 4600 | 0.6061 | 0.6908 | 0.6941 | | 0.5565 | 22.43 | 4800 | 0.5817 | 0.7041 | 0.7044 | | 0.5538 | 23.36 | 5000 | 0.5969 | 0.6963 | 0.6982 | | 0.5559 | 24.3 | 5200 | 0.5749 | 0.7042 | 0.7041 | | 0.5535 | 25.23 | 5400 | 0.5772 | 0.7069 | 0.7070 | | 0.5542 | 26.17 | 5600 | 0.5775 | 0.7041 | 0.7044 | | 0.5504 | 27.1 | 5800 | 0.5838 | 0.7056 | 0.7065 | | 0.55 | 28.04 | 6000 | 0.5729 | 0.7078 | 0.7076 | | 0.5521 | 28.97 | 6200 | 0.5890 | 0.7016 | 0.7032 | | 0.5493 | 29.91 | 6400 | 0.5800 | 0.7049 | 0.7053 | | 0.5514 | 30.84 | 6600 | 0.5974 | 0.6998 | 0.7026 | | 0.5486 | 31.78 | 6800 | 0.5761 | 0.7106 | 0.7109 | | 0.549 | 32.71 | 7000 | 0.5780 | 0.7092 | 0.7097 | | 0.5467 | 33.64 | 7200 | 0.5782 | 0.7081 | 0.7088 | | 0.5481 | 34.58 | 7400 | 0.5793 | 0.7079 | 0.7085 | | 0.5495 | 35.51 | 7600 | 0.5756 | 0.7081 | 0.7085 | | 0.5441 | 36.45 | 7800 | 0.5772 | 0.7088 | 0.7091 | | 0.5498 | 37.38 | 8000 | 0.5855 | 0.7066 | 0.7082 | | 0.545 | 38.32 | 8200 | 0.5817 | 0.7076 | 0.7085 | | 0.5459 | 39.25 | 8400 | 0.5762 | 0.7082 | 0.7085 | | 0.5488 | 40.19 | 8600 | 0.5737 | 0.7139 | 0.7141 | | 0.5465 | 41.12 | 8800 | 0.5785 | 0.7087 | 0.7094 | | 0.5475 | 42.06 | 9000 | 0.5754 | 0.7108 | 0.7111 | | 0.5452 | 42.99 | 9200 | 0.5809 | 0.7059 | 0.7067 | | 0.5453 | 43.93 | 9400 | 0.5809 | 0.7073 | 0.7082 | | 0.5474 | 44.86 | 9600 | 0.5764 | 0.7098 | 0.7103 | | 0.5441 | 45.79 | 9800 | 0.5775 | 0.7103 | 0.7109 | | 0.5448 | 46.73 | 10000 | 0.5792 | 0.7084 | 0.7091 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H4ac-seqsight_32768_512_30M-L1_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H4ac-seqsight_32768_512_30M-L1_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:40:31+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H4ac-seqsight\_32768\_512\_30M-L1\_f ============================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H4ac dataset. It achieves the following results on the evaluation set: * Loss: 0.5836 * F1 Score: 0.7005 * Accuracy: 0.7003 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
text-generation
transformers
# TooManyMix_LLM_01 TooManyMix_LLM_01 is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [jdqwoi/TooManyMixed-LLM_03](https://huggingface.co/jdqwoi/TooManyMixed-LLM_03) * [jdqwoi/TooManyMix_LLM](https://huggingface.co/jdqwoi/TooManyMix_LLM) ## 🧩 Configuration ```yaml slices: - sources: - model: jdqwoi/TooManyMixed-LLM_03 layer_range: [0, 32] - model: jdqwoi/TooManyMix_LLM layer_range: [0, 32] merge_method: slerp base_model: jdqwoi/TooManyMixed-LLM_03 parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "jdqwoi/TooManyMix_LLM_01" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
{"tags": ["merge", "mergekit", "lazymergekit", "jdqwoi/TooManyMixed-LLM_03", "jdqwoi/TooManyMix_LLM"], "base_model": ["jdqwoi/TooManyMixed-LLM_03", "jdqwoi/TooManyMix_LLM"]}
jdqwoi/TooManyMix_LLM_01
null
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "jdqwoi/TooManyMixed-LLM_03", "jdqwoi/TooManyMix_LLM", "base_model:jdqwoi/TooManyMixed-LLM_03", "base_model:jdqwoi/TooManyMix_LLM", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T04:41:10+00:00
[]
[]
TAGS #transformers #safetensors #mistral #text-generation #merge #mergekit #lazymergekit #jdqwoi/TooManyMixed-LLM_03 #jdqwoi/TooManyMix_LLM #base_model-jdqwoi/TooManyMixed-LLM_03 #base_model-jdqwoi/TooManyMix_LLM #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# TooManyMix_LLM_01 TooManyMix_LLM_01 is a merge of the following models using LazyMergekit: * jdqwoi/TooManyMixed-LLM_03 * jdqwoi/TooManyMix_LLM ## Configuration ## Usage
[ "# TooManyMix_LLM_01\n\nTooManyMix_LLM_01 is a merge of the following models using LazyMergekit:\n* jdqwoi/TooManyMixed-LLM_03\n* jdqwoi/TooManyMix_LLM", "## Configuration", "## Usage" ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #merge #mergekit #lazymergekit #jdqwoi/TooManyMixed-LLM_03 #jdqwoi/TooManyMix_LLM #base_model-jdqwoi/TooManyMixed-LLM_03 #base_model-jdqwoi/TooManyMix_LLM #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# TooManyMix_LLM_01\n\nTooManyMix_LLM_01 is a merge of the following models using LazyMergekit:\n* jdqwoi/TooManyMixed-LLM_03\n* jdqwoi/TooManyMix_LLM", "## Configuration", "## Usage" ]
[ 110, 61, 3, 3 ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #merge #mergekit #lazymergekit #jdqwoi/TooManyMixed-LLM_03 #jdqwoi/TooManyMix_LLM #base_model-jdqwoi/TooManyMixed-LLM_03 #base_model-jdqwoi/TooManyMix_LLM #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# TooManyMix_LLM_01\n\nTooManyMix_LLM_01 is a merge of the following models using LazyMergekit:\n* jdqwoi/TooManyMixed-LLM_03\n* jdqwoi/TooManyMix_LLM## Configuration## Usage" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
dahye1/generator_2
null
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:41:52+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 26, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H4ac-seqsight_32768_512_30M-L8_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H4ac](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H4ac) dataset. It achieves the following results on the evaluation set: - Loss: 0.5741 - F1 Score: 0.7114 - Accuracy: 0.7111 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6317 | 0.93 | 200 | 0.6072 | 0.6730 | 0.6742 | | 0.5992 | 1.87 | 400 | 0.6159 | 0.6676 | 0.6727 | | 0.5842 | 2.8 | 600 | 0.5923 | 0.6918 | 0.6921 | | 0.5787 | 3.74 | 800 | 0.5891 | 0.6928 | 0.6938 | | 0.5703 | 4.67 | 1000 | 0.5910 | 0.6946 | 0.6959 | | 0.564 | 5.61 | 1200 | 0.6009 | 0.6853 | 0.6880 | | 0.5623 | 6.54 | 1400 | 0.5970 | 0.6860 | 0.6889 | | 0.5491 | 7.48 | 1600 | 0.5792 | 0.7026 | 0.7026 | | 0.5555 | 8.41 | 1800 | 0.5778 | 0.7047 | 0.7050 | | 0.551 | 9.35 | 2000 | 0.5788 | 0.7050 | 0.7056 | | 0.5405 | 10.28 | 2200 | 0.5726 | 0.7108 | 0.7106 | | 0.5426 | 11.21 | 2400 | 0.5767 | 0.7086 | 0.7091 | | 0.5388 | 12.15 | 2600 | 0.5831 | 0.7062 | 0.7070 | | 0.5343 | 13.08 | 2800 | 0.5828 | 0.7091 | 0.7097 | | 0.5344 | 14.02 | 3000 | 0.5646 | 0.7151 | 0.7150 | | 0.5299 | 14.95 | 3200 | 0.5724 | 0.7176 | 0.7176 | | 0.5268 | 15.89 | 3400 | 0.5760 | 0.7141 | 0.7150 | | 0.5257 | 16.82 | 3600 | 0.5654 | 0.7171 | 0.7173 | | 0.5286 | 17.76 | 3800 | 0.5759 | 0.7137 | 0.7150 | | 0.5247 | 18.69 | 4000 | 0.5598 | 0.7201 | 0.7199 | | 0.5196 | 19.63 | 4200 | 0.5636 | 0.7180 | 0.7182 | | 0.5192 | 20.56 | 4400 | 0.5582 | 0.7233 | 0.7232 | | 0.5188 | 21.5 | 4600 | 0.5939 | 0.7065 | 0.7097 | | 0.5183 | 22.43 | 4800 | 0.5602 | 0.7235 | 0.7232 | | 0.5143 | 23.36 | 5000 | 0.5759 | 0.7175 | 0.7185 | | 0.5179 | 24.3 | 5200 | 0.5599 | 0.7257 | 0.7255 | | 0.5131 | 25.23 | 5400 | 0.5583 | 0.7229 | 0.7226 | | 0.5141 | 26.17 | 5600 | 0.5610 | 0.7247 | 0.7243 | | 0.5096 | 27.1 | 5800 | 0.5611 | 0.7246 | 0.7243 | | 0.5089 | 28.04 | 6000 | 0.5564 | 0.7255 | 0.7252 | | 0.5104 | 28.97 | 6200 | 0.5749 | 0.7219 | 0.7226 | | 0.507 | 29.91 | 6400 | 0.5643 | 0.7247 | 0.7246 | | 0.5104 | 30.84 | 6600 | 0.5732 | 0.7212 | 0.7220 | | 0.5071 | 31.78 | 6800 | 0.5577 | 0.7264 | 0.7261 | | 0.5051 | 32.71 | 7000 | 0.5633 | 0.7284 | 0.7282 | | 0.5034 | 33.64 | 7200 | 0.5653 | 0.7236 | 0.7240 | | 0.5036 | 34.58 | 7400 | 0.5598 | 0.7263 | 0.7261 | | 0.506 | 35.51 | 7600 | 0.5645 | 0.7270 | 0.7270 | | 0.5009 | 36.45 | 7800 | 0.5634 | 0.7263 | 0.7261 | | 0.5045 | 37.38 | 8000 | 0.5727 | 0.7228 | 0.7235 | | 0.5012 | 38.32 | 8200 | 0.5647 | 0.7285 | 0.7284 | | 0.5002 | 39.25 | 8400 | 0.5619 | 0.7266 | 0.7264 | | 0.5018 | 40.19 | 8600 | 0.5651 | 0.7261 | 0.7258 | | 0.5016 | 41.12 | 8800 | 0.5630 | 0.7295 | 0.7293 | | 0.4991 | 42.06 | 9000 | 0.5662 | 0.7278 | 0.7276 | | 0.4986 | 42.99 | 9200 | 0.5685 | 0.7269 | 0.7270 | | 0.4984 | 43.93 | 9400 | 0.5664 | 0.7279 | 0.7279 | | 0.4996 | 44.86 | 9600 | 0.5637 | 0.7274 | 0.7273 | | 0.4991 | 45.79 | 9800 | 0.5643 | 0.7286 | 0.7284 | | 0.4985 | 46.73 | 10000 | 0.5655 | 0.7258 | 0.7258 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H4ac-seqsight_32768_512_30M-L8_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H4ac-seqsight_32768_512_30M-L8_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:42:22+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H4ac-seqsight\_32768\_512\_30M-L8\_f ============================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H4ac dataset. It achieves the following results on the evaluation set: * Loss: 0.5741 * F1 Score: 0.7114 * Accuracy: 0.7111 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H4ac-seqsight_32768_512_30M-L32_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H4ac](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H4ac) dataset. It achieves the following results on the evaluation set: - Loss: 0.5936 - F1 Score: 0.7152 - Accuracy: 0.7152 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6213 | 0.93 | 200 | 0.6249 | 0.6653 | 0.6698 | | 0.5891 | 1.87 | 400 | 0.5998 | 0.6870 | 0.6891 | | 0.5713 | 2.8 | 600 | 0.5835 | 0.6988 | 0.6988 | | 0.5616 | 3.74 | 800 | 0.5721 | 0.7055 | 0.7053 | | 0.5515 | 4.67 | 1000 | 0.5745 | 0.7055 | 0.7056 | | 0.5432 | 5.61 | 1200 | 0.5905 | 0.7016 | 0.7038 | | 0.5421 | 6.54 | 1400 | 0.5781 | 0.7094 | 0.7109 | | 0.5277 | 7.48 | 1600 | 0.5684 | 0.7096 | 0.7097 | | 0.5329 | 8.41 | 1800 | 0.5651 | 0.7150 | 0.7150 | | 0.5273 | 9.35 | 2000 | 0.5605 | 0.7217 | 0.7214 | | 0.5175 | 10.28 | 2200 | 0.5610 | 0.7234 | 0.7232 | | 0.5185 | 11.21 | 2400 | 0.5701 | 0.7167 | 0.7170 | | 0.5126 | 12.15 | 2600 | 0.5806 | 0.7168 | 0.7176 | | 0.5074 | 13.08 | 2800 | 0.5609 | 0.7203 | 0.7199 | | 0.5061 | 14.02 | 3000 | 0.5711 | 0.7234 | 0.7235 | | 0.5012 | 14.95 | 3200 | 0.5722 | 0.7222 | 0.7220 | | 0.4972 | 15.89 | 3400 | 0.5637 | 0.7255 | 0.7252 | | 0.4918 | 16.82 | 3600 | 0.5738 | 0.7182 | 0.7185 | | 0.4956 | 17.76 | 3800 | 0.6154 | 0.7044 | 0.7085 | | 0.4889 | 18.69 | 4000 | 0.5638 | 0.7244 | 0.7240 | | 0.485 | 19.63 | 4200 | 0.5662 | 0.7257 | 0.7258 | | 0.4795 | 20.56 | 4400 | 0.5650 | 0.7260 | 0.7258 | | 0.4792 | 21.5 | 4600 | 0.6026 | 0.7131 | 0.7150 | | 0.4754 | 22.43 | 4800 | 0.5727 | 0.7231 | 0.7229 | | 0.4695 | 23.36 | 5000 | 0.5847 | 0.7255 | 0.7255 | | 0.4752 | 24.3 | 5200 | 0.5807 | 0.7292 | 0.7293 | | 0.4688 | 25.23 | 5400 | 0.5726 | 0.7211 | 0.7208 | | 0.4657 | 26.17 | 5600 | 0.5799 | 0.7229 | 0.7226 | | 0.4601 | 27.1 | 5800 | 0.5873 | 0.7202 | 0.7202 | | 0.4591 | 28.04 | 6000 | 0.5771 | 0.7235 | 0.7232 | | 0.4593 | 28.97 | 6200 | 0.5979 | 0.7212 | 0.7214 | | 0.4538 | 29.91 | 6400 | 0.5846 | 0.7192 | 0.7191 | | 0.459 | 30.84 | 6600 | 0.5857 | 0.7239 | 0.7240 | | 0.4502 | 31.78 | 6800 | 0.5823 | 0.7269 | 0.7267 | | 0.4505 | 32.71 | 7000 | 0.5879 | 0.7273 | 0.7270 | | 0.446 | 33.64 | 7200 | 0.5924 | 0.7256 | 0.7258 | | 0.4463 | 34.58 | 7400 | 0.5940 | 0.7288 | 0.7287 | | 0.4453 | 35.51 | 7600 | 0.6015 | 0.7184 | 0.7191 | | 0.4406 | 36.45 | 7800 | 0.6008 | 0.7262 | 0.7261 | | 0.4444 | 37.38 | 8000 | 0.6006 | 0.7247 | 0.7249 | | 0.4367 | 38.32 | 8200 | 0.5996 | 0.7273 | 0.7270 | | 0.4409 | 39.25 | 8400 | 0.5946 | 0.7255 | 0.7252 | | 0.4369 | 40.19 | 8600 | 0.6044 | 0.7248 | 0.7246 | | 0.4382 | 41.12 | 8800 | 0.5966 | 0.7254 | 0.7252 | | 0.437 | 42.06 | 9000 | 0.5995 | 0.7255 | 0.7252 | | 0.4313 | 42.99 | 9200 | 0.6036 | 0.7240 | 0.7240 | | 0.432 | 43.93 | 9400 | 0.5994 | 0.7241 | 0.7240 | | 0.4341 | 44.86 | 9600 | 0.6011 | 0.7242 | 0.7240 | | 0.4323 | 45.79 | 9800 | 0.6003 | 0.7234 | 0.7232 | | 0.4315 | 46.73 | 10000 | 0.6008 | 0.7236 | 0.7235 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H4ac-seqsight_32768_512_30M-L32_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H4ac-seqsight_32768_512_30M-L32_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:42:47+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H4ac-seqsight\_32768\_512\_30M-L32\_f =============================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H4ac dataset. It achieves the following results on the evaluation set: * Loss: 0.5936 * F1 Score: 0.7152 * Accuracy: 0.7152 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
shallow6414/2vrd6jk
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T04:48:01+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 47, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
Vignesh-M/WAV2VEC-FINETUNE-TAMIL-2
null
[ "transformers", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:49:05+00:00
[ "1910.09700" ]
[]
TAGS #transformers #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 22, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K79me3-seqsight_32768_512_30M-L1_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K79me3](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K79me3) dataset. It achieves the following results on the evaluation set: - Loss: 0.4466 - F1 Score: 0.8058 - Accuracy: 0.8065 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.5547 | 1.1 | 200 | 0.4825 | 0.7918 | 0.7916 | | 0.4917 | 2.21 | 400 | 0.4899 | 0.7754 | 0.7798 | | 0.4836 | 3.31 | 600 | 0.4668 | 0.7886 | 0.7909 | | 0.4719 | 4.42 | 800 | 0.4647 | 0.7895 | 0.7920 | | 0.4714 | 5.52 | 1000 | 0.4744 | 0.7840 | 0.7878 | | 0.4633 | 6.63 | 1200 | 0.4693 | 0.7860 | 0.7895 | | 0.4661 | 7.73 | 1400 | 0.4584 | 0.7939 | 0.7961 | | 0.4582 | 8.84 | 1600 | 0.4690 | 0.7859 | 0.7892 | | 0.4589 | 9.94 | 1800 | 0.4467 | 0.8093 | 0.8100 | | 0.4563 | 11.05 | 2000 | 0.4576 | 0.7913 | 0.7940 | | 0.4549 | 12.15 | 2200 | 0.4516 | 0.7962 | 0.7982 | | 0.45 | 13.26 | 2400 | 0.4518 | 0.7955 | 0.7979 | | 0.4493 | 14.36 | 2600 | 0.4465 | 0.8053 | 0.8065 | | 0.4487 | 15.47 | 2800 | 0.4629 | 0.7857 | 0.7895 | | 0.4443 | 16.57 | 3000 | 0.4441 | 0.8086 | 0.8096 | | 0.4471 | 17.68 | 3200 | 0.4448 | 0.8041 | 0.8055 | | 0.4413 | 18.78 | 3400 | 0.4418 | 0.8082 | 0.8093 | | 0.4419 | 19.89 | 3600 | 0.4554 | 0.7908 | 0.7937 | | 0.4418 | 20.99 | 3800 | 0.4519 | 0.7977 | 0.7999 | | 0.4383 | 22.1 | 4000 | 0.4429 | 0.8049 | 0.8062 | | 0.4406 | 23.2 | 4200 | 0.4459 | 0.7999 | 0.8017 | | 0.4405 | 24.31 | 4400 | 0.4483 | 0.7978 | 0.7996 | | 0.4309 | 25.41 | 4600 | 0.4468 | 0.8040 | 0.8055 | | 0.4346 | 26.52 | 4800 | 0.4417 | 0.8070 | 0.8079 | | 0.436 | 27.62 | 5000 | 0.4414 | 0.8049 | 0.8062 | | 0.4321 | 28.73 | 5200 | 0.4410 | 0.7999 | 0.8013 | | 0.4302 | 29.83 | 5400 | 0.4402 | 0.8049 | 0.8058 | | 0.4306 | 30.94 | 5600 | 0.4401 | 0.8094 | 0.8100 | | 0.4315 | 32.04 | 5800 | 0.4400 | 0.8112 | 0.8117 | | 0.4298 | 33.15 | 6000 | 0.4405 | 0.8075 | 0.8083 | | 0.4282 | 34.25 | 6200 | 0.4412 | 0.8056 | 0.8065 | | 0.4279 | 35.36 | 6400 | 0.4453 | 0.8001 | 0.8017 | | 0.4281 | 36.46 | 6600 | 0.4380 | 0.8112 | 0.8117 | | 0.4287 | 37.57 | 6800 | 0.4388 | 0.8050 | 0.8058 | | 0.4261 | 38.67 | 7000 | 0.4404 | 0.8031 | 0.8041 | | 0.4252 | 39.78 | 7200 | 0.4400 | 0.8061 | 0.8069 | | 0.4295 | 40.88 | 7400 | 0.4403 | 0.8059 | 0.8069 | | 0.4267 | 41.99 | 7600 | 0.4398 | 0.8078 | 0.8086 | | 0.4247 | 43.09 | 7800 | 0.4424 | 0.8023 | 0.8034 | | 0.4272 | 44.2 | 8000 | 0.4402 | 0.8049 | 0.8058 | | 0.4262 | 45.3 | 8200 | 0.4404 | 0.8068 | 0.8076 | | 0.4258 | 46.41 | 8400 | 0.4402 | 0.8056 | 0.8065 | | 0.427 | 47.51 | 8600 | 0.4411 | 0.8034 | 0.8044 | | 0.424 | 48.62 | 8800 | 0.4419 | 0.8048 | 0.8058 | | 0.4228 | 49.72 | 9000 | 0.4408 | 0.8056 | 0.8065 | | 0.4277 | 50.83 | 9200 | 0.4422 | 0.8023 | 0.8034 | | 0.4227 | 51.93 | 9400 | 0.4412 | 0.8056 | 0.8065 | | 0.4236 | 53.04 | 9600 | 0.4406 | 0.8056 | 0.8065 | | 0.423 | 54.14 | 9800 | 0.4401 | 0.8067 | 0.8076 | | 0.4235 | 55.25 | 10000 | 0.4407 | 0.8056 | 0.8065 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K79me3-seqsight_32768_512_30M-L1_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K79me3-seqsight_32768_512_30M-L1_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:49:25+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K79me3-seqsight\_32768\_512\_30M-L1\_f ================================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K79me3 dataset. It achieves the following results on the evaluation set: * Loss: 0.4466 * F1 Score: 0.8058 * Accuracy: 0.8065 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
cilantro9246/iaztpsp
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T04:49:57+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 47, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K79me3-seqsight_32768_512_30M-L8_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K79me3](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K79me3) dataset. It achieves the following results on the evaluation set: - Loss: 0.4411 - F1 Score: 0.8133 - Accuracy: 0.8141 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.5327 | 1.1 | 200 | 0.4630 | 0.7998 | 0.7999 | | 0.4772 | 2.21 | 400 | 0.4671 | 0.7860 | 0.7892 | | 0.4691 | 3.31 | 600 | 0.4594 | 0.7946 | 0.7972 | | 0.4565 | 4.42 | 800 | 0.4593 | 0.7883 | 0.7913 | | 0.4538 | 5.52 | 1000 | 0.4621 | 0.7888 | 0.7923 | | 0.4425 | 6.63 | 1200 | 0.4565 | 0.7961 | 0.7985 | | 0.4437 | 7.73 | 1400 | 0.4494 | 0.8024 | 0.8037 | | 0.4349 | 8.84 | 1600 | 0.4628 | 0.7919 | 0.7947 | | 0.4339 | 9.94 | 1800 | 0.4360 | 0.8091 | 0.8093 | | 0.4311 | 11.05 | 2000 | 0.4547 | 0.7985 | 0.7999 | | 0.4304 | 12.15 | 2200 | 0.4329 | 0.8139 | 0.8141 | | 0.4252 | 13.26 | 2400 | 0.4445 | 0.8036 | 0.8051 | | 0.4245 | 14.36 | 2600 | 0.4365 | 0.8106 | 0.8110 | | 0.4233 | 15.47 | 2800 | 0.4471 | 0.8053 | 0.8069 | | 0.4189 | 16.57 | 3000 | 0.4404 | 0.8129 | 0.8128 | | 0.4213 | 17.68 | 3200 | 0.4339 | 0.8127 | 0.8131 | | 0.416 | 18.78 | 3400 | 0.4448 | 0.8031 | 0.8044 | | 0.4149 | 19.89 | 3600 | 0.4380 | 0.8077 | 0.8089 | | 0.4151 | 20.99 | 3800 | 0.4530 | 0.7974 | 0.7992 | | 0.4111 | 22.1 | 4000 | 0.4380 | 0.8083 | 0.8089 | | 0.414 | 23.2 | 4200 | 0.4378 | 0.8077 | 0.8086 | | 0.4123 | 24.31 | 4400 | 0.4519 | 0.8034 | 0.8051 | | 0.4032 | 25.41 | 4600 | 0.4410 | 0.8138 | 0.8145 | | 0.407 | 26.52 | 4800 | 0.4456 | 0.8074 | 0.8083 | | 0.4098 | 27.62 | 5000 | 0.4381 | 0.8089 | 0.8100 | | 0.4034 | 28.73 | 5200 | 0.4416 | 0.8064 | 0.8076 | | 0.4008 | 29.83 | 5400 | 0.4387 | 0.8096 | 0.8103 | | 0.4018 | 30.94 | 5600 | 0.4414 | 0.8102 | 0.8107 | | 0.4035 | 32.04 | 5800 | 0.4388 | 0.8140 | 0.8145 | | 0.4009 | 33.15 | 6000 | 0.4430 | 0.8059 | 0.8069 | | 0.3978 | 34.25 | 6200 | 0.4480 | 0.8061 | 0.8069 | | 0.3978 | 35.36 | 6400 | 0.4439 | 0.8096 | 0.8107 | | 0.3984 | 36.46 | 6600 | 0.4370 | 0.8154 | 0.8159 | | 0.3977 | 37.57 | 6800 | 0.4420 | 0.8096 | 0.8107 | | 0.3951 | 38.67 | 7000 | 0.4418 | 0.8112 | 0.8121 | | 0.3937 | 39.78 | 7200 | 0.4430 | 0.8099 | 0.8107 | | 0.3953 | 40.88 | 7400 | 0.4407 | 0.8107 | 0.8114 | | 0.3939 | 41.99 | 7600 | 0.4414 | 0.8118 | 0.8124 | | 0.3927 | 43.09 | 7800 | 0.4443 | 0.8122 | 0.8131 | | 0.3962 | 44.2 | 8000 | 0.4435 | 0.8130 | 0.8138 | | 0.3926 | 45.3 | 8200 | 0.4420 | 0.8142 | 0.8148 | | 0.3907 | 46.41 | 8400 | 0.4434 | 0.8128 | 0.8135 | | 0.3941 | 47.51 | 8600 | 0.4474 | 0.8085 | 0.8096 | | 0.3897 | 48.62 | 8800 | 0.4443 | 0.8116 | 0.8124 | | 0.3908 | 49.72 | 9000 | 0.4452 | 0.8105 | 0.8114 | | 0.3948 | 50.83 | 9200 | 0.4475 | 0.8081 | 0.8093 | | 0.3896 | 51.93 | 9400 | 0.4439 | 0.8112 | 0.8121 | | 0.3891 | 53.04 | 9600 | 0.4432 | 0.8105 | 0.8114 | | 0.3887 | 54.14 | 9800 | 0.4431 | 0.8116 | 0.8124 | | 0.3879 | 55.25 | 10000 | 0.4447 | 0.8108 | 0.8117 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K79me3-seqsight_32768_512_30M-L8_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K79me3-seqsight_32768_512_30M-L8_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:50:07+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K79me3-seqsight\_32768\_512\_30M-L8\_f ================================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K79me3 dataset. It achieves the following results on the evaluation set: * Loss: 0.4411 * F1 Score: 0.8133 * Accuracy: 0.8141 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Codellama-finetuned-code This model was trained from scratch on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - training_steps: 20 ### Training results ### Framework versions - PEFT 0.6.0.dev0 - Transformers 4.40.1 - Pytorch 2.2.2+cpu - Datasets 2.19.0 - Tokenizers 0.19.1
{"library_name": "peft", "tags": ["generated_from_trainer"], "model-index": [{"name": "Codellama-finetuned-code", "results": []}]}
elinaparajuli/Codellama-finetuned-code
null
[ "peft", "tensorboard", "safetensors", "generated_from_trainer", "region:us" ]
null
2024-04-30T04:50:29+00:00
[]
[]
TAGS #peft #tensorboard #safetensors #generated_from_trainer #region-us
# Codellama-finetuned-code This model was trained from scratch on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - training_steps: 20 ### Training results ### Framework versions - PEFT 0.6.0.dev0 - Transformers 4.40.1 - Pytorch 2.2.2+cpu - Datasets 2.19.0 - Tokenizers 0.19.1
[ "# Codellama-finetuned-code\r\n\r\nThis model was trained from scratch on the None dataset.", "## Model description\r\n\r\nMore information needed", "## Intended uses & limitations\r\n\r\nMore information needed", "## Training and evaluation data\r\n\r\nMore information needed", "## Training procedure", "### Training hyperparameters\r\n\r\nThe following hyperparameters were used during training:\r\n- learning_rate: 0.0002\r\n- train_batch_size: 4\r\n- eval_batch_size: 8\r\n- seed: 42\r\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\r\n- lr_scheduler_type: cosine\r\n- training_steps: 20", "### Training results", "### Framework versions\r\n\r\n- PEFT 0.6.0.dev0\r\n- Transformers 4.40.1\r\n- Pytorch 2.2.2+cpu\r\n- Datasets 2.19.0\r\n- Tokenizers 0.19.1" ]
[ "TAGS\n#peft #tensorboard #safetensors #generated_from_trainer #region-us \n", "# Codellama-finetuned-code\r\n\r\nThis model was trained from scratch on the None dataset.", "## Model description\r\n\r\nMore information needed", "## Intended uses & limitations\r\n\r\nMore information needed", "## Training and evaluation data\r\n\r\nMore information needed", "## Training procedure", "### Training hyperparameters\r\n\r\nThe following hyperparameters were used during training:\r\n- learning_rate: 0.0002\r\n- train_batch_size: 4\r\n- eval_batch_size: 8\r\n- seed: 42\r\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\r\n- lr_scheduler_type: cosine\r\n- training_steps: 20", "### Training results", "### Framework versions\r\n\r\n- PEFT 0.6.0.dev0\r\n- Transformers 4.40.1\r\n- Pytorch 2.2.2+cpu\r\n- Datasets 2.19.0\r\n- Tokenizers 0.19.1" ]
[ 21, 22, 7, 9, 9, 4, 93, 5, 53 ]
[ "TAGS\n#peft #tensorboard #safetensors #generated_from_trainer #region-us \n# Codellama-finetuned-code\r\n\r\nThis model was trained from scratch on the None dataset.## Model description\r\n\r\nMore information needed## Intended uses & limitations\r\n\r\nMore information needed## Training and evaluation data\r\n\r\nMore information needed## Training procedure### Training hyperparameters\r\n\r\nThe following hyperparameters were used during training:\r\n- learning_rate: 0.0002\r\n- train_batch_size: 4\r\n- eval_batch_size: 8\r\n- seed: 42\r\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\r\n- lr_scheduler_type: cosine\r\n- training_steps: 20### Training results### Framework versions\r\n\r\n- PEFT 0.6.0.dev0\r\n- Transformers 4.40.1\r\n- Pytorch 2.2.2+cpu\r\n- Datasets 2.19.0\r\n- Tokenizers 0.19.1" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K79me3-seqsight_32768_512_30M-L32_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K79me3](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K79me3) dataset. It achieves the following results on the evaluation set: - Loss: 0.4398 - F1 Score: 0.8086 - Accuracy: 0.8093 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.5218 | 1.1 | 200 | 0.4583 | 0.8048 | 0.8058 | | 0.4698 | 2.21 | 400 | 0.4506 | 0.8055 | 0.8069 | | 0.458 | 3.31 | 600 | 0.4579 | 0.7928 | 0.7954 | | 0.442 | 4.42 | 800 | 0.4569 | 0.7951 | 0.7975 | | 0.4381 | 5.52 | 1000 | 0.4499 | 0.8073 | 0.8093 | | 0.4275 | 6.63 | 1200 | 0.4617 | 0.7983 | 0.8010 | | 0.4286 | 7.73 | 1400 | 0.4585 | 0.7964 | 0.7989 | | 0.4188 | 8.84 | 1600 | 0.4737 | 0.7904 | 0.7940 | | 0.4157 | 9.94 | 1800 | 0.4370 | 0.8080 | 0.8086 | | 0.4119 | 11.05 | 2000 | 0.4548 | 0.7998 | 0.8013 | | 0.409 | 12.15 | 2200 | 0.4337 | 0.8122 | 0.8128 | | 0.4028 | 13.26 | 2400 | 0.4539 | 0.7992 | 0.8013 | | 0.4002 | 14.36 | 2600 | 0.4392 | 0.8158 | 0.8162 | | 0.3978 | 15.47 | 2800 | 0.4501 | 0.8042 | 0.8058 | | 0.3899 | 16.57 | 3000 | 0.4450 | 0.8023 | 0.8024 | | 0.3895 | 17.68 | 3200 | 0.4433 | 0.8122 | 0.8124 | | 0.3836 | 18.78 | 3400 | 0.4752 | 0.7933 | 0.7961 | | 0.3794 | 19.89 | 3600 | 0.4526 | 0.8081 | 0.8096 | | 0.378 | 20.99 | 3800 | 0.4687 | 0.7927 | 0.7951 | | 0.3705 | 22.1 | 4000 | 0.4535 | 0.8100 | 0.8107 | | 0.3705 | 23.2 | 4200 | 0.4610 | 0.8054 | 0.8065 | | 0.3683 | 24.31 | 4400 | 0.4735 | 0.7959 | 0.7982 | | 0.3578 | 25.41 | 4600 | 0.4592 | 0.8075 | 0.8079 | | 0.3582 | 26.52 | 4800 | 0.4762 | 0.7985 | 0.7999 | | 0.3595 | 27.62 | 5000 | 0.4640 | 0.8012 | 0.8024 | | 0.3503 | 28.73 | 5200 | 0.4704 | 0.8008 | 0.8024 | | 0.3448 | 29.83 | 5400 | 0.4623 | 0.8059 | 0.8065 | | 0.3459 | 30.94 | 5600 | 0.4716 | 0.8021 | 0.8031 | | 0.342 | 32.04 | 5800 | 0.4681 | 0.8033 | 0.8041 | | 0.339 | 33.15 | 6000 | 0.4785 | 0.7992 | 0.8006 | | 0.335 | 34.25 | 6200 | 0.4910 | 0.7936 | 0.7947 | | 0.3323 | 35.36 | 6400 | 0.4938 | 0.8028 | 0.8044 | | 0.3324 | 36.46 | 6600 | 0.4806 | 0.8061 | 0.8072 | | 0.3283 | 37.57 | 6800 | 0.4998 | 0.7955 | 0.7975 | | 0.327 | 38.67 | 7000 | 0.4950 | 0.7969 | 0.7989 | | 0.3219 | 39.78 | 7200 | 0.5078 | 0.7965 | 0.7989 | | 0.3193 | 40.88 | 7400 | 0.4910 | 0.7944 | 0.7954 | | 0.3203 | 41.99 | 7600 | 0.4877 | 0.8009 | 0.8020 | | 0.3157 | 43.09 | 7800 | 0.5048 | 0.7975 | 0.7992 | | 0.3195 | 44.2 | 8000 | 0.4973 | 0.7969 | 0.7982 | | 0.3137 | 45.3 | 8200 | 0.4954 | 0.7987 | 0.7999 | | 0.3148 | 46.41 | 8400 | 0.4947 | 0.7992 | 0.8003 | | 0.3134 | 47.51 | 8600 | 0.5113 | 0.7963 | 0.7982 | | 0.3075 | 48.62 | 8800 | 0.5066 | 0.7974 | 0.7989 | | 0.3102 | 49.72 | 9000 | 0.5083 | 0.7986 | 0.8003 | | 0.3115 | 50.83 | 9200 | 0.5146 | 0.7956 | 0.7979 | | 0.3053 | 51.93 | 9400 | 0.5082 | 0.7981 | 0.7996 | | 0.3046 | 53.04 | 9600 | 0.5085 | 0.7984 | 0.7999 | | 0.305 | 54.14 | 9800 | 0.5078 | 0.7989 | 0.8003 | | 0.3031 | 55.25 | 10000 | 0.5112 | 0.7991 | 0.8006 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K79me3-seqsight_32768_512_30M-L32_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K79me3-seqsight_32768_512_30M-L32_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:50:42+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K79me3-seqsight\_32768\_512\_30M-L32\_f =================================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K79me3 dataset. It achieves the following results on the evaluation set: * Loss: 0.4398 * F1 Score: 0.8086 * Accuracy: 0.8093 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
hi000000/insta_merged_llama2_koen
null
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:51:34+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 26, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
multiple-choice
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-finetuned-swag This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the swag dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Framework versions - Transformers 4.40.1 - Pytorch 2.2.0+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["swag"], "base_model": "bert-base-uncased", "model-index": [{"name": "bert-base-uncased-finetuned-swag", "results": []}]}
souraviithmds/bert-base-uncased-finetuned-swag
null
[ "transformers", "safetensors", "bert", "multiple-choice", "generated_from_trainer", "dataset:swag", "base_model:bert-base-uncased", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:54:59+00:00
[]
[]
TAGS #transformers #safetensors #bert #multiple-choice #generated_from_trainer #dataset-swag #base_model-bert-base-uncased #license-apache-2.0 #endpoints_compatible #region-us
# bert-base-uncased-finetuned-swag This model is a fine-tuned version of bert-base-uncased on the swag dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Framework versions - Transformers 4.40.1 - Pytorch 2.2.0+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
[ "# bert-base-uncased-finetuned-swag\n\nThis model is a fine-tuned version of bert-base-uncased on the swag dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 16\n- eval_batch_size: 16\n- seed: 42\n- gradient_accumulation_steps: 8\n- total_train_batch_size: 128\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 10", "### Framework versions\n\n- Transformers 4.40.1\n- Pytorch 2.2.0+cu121\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #safetensors #bert #multiple-choice #generated_from_trainer #dataset-swag #base_model-bert-base-uncased #license-apache-2.0 #endpoints_compatible #region-us \n", "# bert-base-uncased-finetuned-swag\n\nThis model is a fine-tuned version of bert-base-uncased on the swag dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 16\n- eval_batch_size: 16\n- seed: 42\n- gradient_accumulation_steps: 8\n- total_train_batch_size: 128\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 10", "### Framework versions\n\n- Transformers 4.40.1\n- Pytorch 2.2.0+cu121\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
[ 53, 36, 7, 9, 9, 4, 111, 44 ]
[ "TAGS\n#transformers #safetensors #bert #multiple-choice #generated_from_trainer #dataset-swag #base_model-bert-base-uncased #license-apache-2.0 #endpoints_compatible #region-us \n# bert-base-uncased-finetuned-swag\n\nThis model is a fine-tuned version of bert-base-uncased on the swag dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 16\n- eval_batch_size: 16\n- seed: 42\n- gradient_accumulation_steps: 8\n- total_train_batch_size: 128\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 10### Framework versions\n\n- Transformers 4.40.1\n- Pytorch 2.2.0+cu121\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K4me1-seqsight_32768_512_30M-L1_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K4me1](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K4me1) dataset. It achieves the following results on the evaluation set: - Loss: 0.5440 - F1 Score: 0.7349 - Accuracy: 0.7383 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6424 | 1.01 | 200 | 0.6210 | 0.6590 | 0.6664 | | 0.6155 | 2.02 | 400 | 0.6108 | 0.6790 | 0.6828 | | 0.6067 | 3.03 | 600 | 0.6060 | 0.6894 | 0.6929 | | 0.6019 | 4.04 | 800 | 0.6108 | 0.6895 | 0.6960 | | 0.595 | 5.05 | 1000 | 0.5984 | 0.7027 | 0.7061 | | 0.5902 | 6.06 | 1200 | 0.6042 | 0.6975 | 0.7052 | | 0.5843 | 7.07 | 1400 | 0.5939 | 0.7048 | 0.7109 | | 0.5808 | 8.08 | 1600 | 0.5890 | 0.7063 | 0.7124 | | 0.5763 | 9.09 | 1800 | 0.5817 | 0.7119 | 0.7172 | | 0.5732 | 10.1 | 2000 | 0.5760 | 0.7151 | 0.7188 | | 0.5686 | 11.11 | 2200 | 0.5833 | 0.7101 | 0.7162 | | 0.5668 | 12.12 | 2400 | 0.5794 | 0.7143 | 0.7188 | | 0.5665 | 13.13 | 2600 | 0.5766 | 0.7160 | 0.7213 | | 0.5648 | 14.14 | 2800 | 0.5742 | 0.7160 | 0.7216 | | 0.5622 | 15.15 | 3000 | 0.5733 | 0.7134 | 0.7191 | | 0.562 | 16.16 | 3200 | 0.5752 | 0.7125 | 0.7194 | | 0.5568 | 17.17 | 3400 | 0.5767 | 0.7167 | 0.7222 | | 0.558 | 18.18 | 3600 | 0.5650 | 0.7216 | 0.7257 | | 0.5545 | 19.19 | 3800 | 0.5738 | 0.7115 | 0.7178 | | 0.557 | 20.2 | 4000 | 0.5695 | 0.7176 | 0.7232 | | 0.5529 | 21.21 | 4200 | 0.5781 | 0.7164 | 0.7235 | | 0.5537 | 22.22 | 4400 | 0.5648 | 0.7220 | 0.7266 | | 0.5492 | 23.23 | 4600 | 0.5744 | 0.7160 | 0.7232 | | 0.5556 | 24.24 | 4800 | 0.5680 | 0.7210 | 0.7276 | | 0.5482 | 25.25 | 5000 | 0.5586 | 0.7319 | 0.7355 | | 0.5513 | 26.26 | 5200 | 0.5580 | 0.7296 | 0.7333 | | 0.5481 | 27.27 | 5400 | 0.5586 | 0.7264 | 0.7311 | | 0.5485 | 28.28 | 5600 | 0.5556 | 0.7332 | 0.7364 | | 0.5508 | 29.29 | 5800 | 0.5675 | 0.7203 | 0.7270 | | 0.5437 | 30.3 | 6000 | 0.5591 | 0.7288 | 0.7333 | | 0.5467 | 31.31 | 6200 | 0.5600 | 0.7276 | 0.7330 | | 0.5478 | 32.32 | 6400 | 0.5695 | 0.7179 | 0.7251 | | 0.5459 | 33.33 | 6600 | 0.5659 | 0.7203 | 0.7273 | | 0.5442 | 34.34 | 6800 | 0.5652 | 0.7222 | 0.7289 | | 0.5435 | 35.35 | 7000 | 0.5568 | 0.7291 | 0.7330 | | 0.5473 | 36.36 | 7200 | 0.5567 | 0.7278 | 0.7326 | | 0.5456 | 37.37 | 7400 | 0.5559 | 0.7296 | 0.7345 | | 0.5413 | 38.38 | 7600 | 0.5552 | 0.7327 | 0.7364 | | 0.5418 | 39.39 | 7800 | 0.5554 | 0.7306 | 0.7348 | | 0.5437 | 40.4 | 8000 | 0.5586 | 0.7302 | 0.7348 | | 0.5427 | 41.41 | 8200 | 0.5597 | 0.7251 | 0.7311 | | 0.544 | 42.42 | 8400 | 0.5618 | 0.7230 | 0.7292 | | 0.5416 | 43.43 | 8600 | 0.5600 | 0.7245 | 0.7301 | | 0.5392 | 44.44 | 8800 | 0.5574 | 0.7291 | 0.7339 | | 0.5426 | 45.45 | 9000 | 0.5568 | 0.7291 | 0.7336 | | 0.5408 | 46.46 | 9200 | 0.5592 | 0.7247 | 0.7301 | | 0.5447 | 47.47 | 9400 | 0.5584 | 0.7262 | 0.7317 | | 0.5387 | 48.48 | 9600 | 0.5595 | 0.7242 | 0.7298 | | 0.5456 | 49.49 | 9800 | 0.5580 | 0.7264 | 0.7317 | | 0.5423 | 50.51 | 10000 | 0.5575 | 0.7263 | 0.7314 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K4me1-seqsight_32768_512_30M-L1_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K4me1-seqsight_32768_512_30M-L1_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:56:06+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K4me1-seqsight\_32768\_512\_30M-L1\_f ================================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K4me1 dataset. It achieves the following results on the evaluation set: * Loss: 0.5440 * F1 Score: 0.7349 * Accuracy: 0.7383 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K4me1-seqsight_32768_512_30M-L8_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K4me1](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K4me1) dataset. It achieves the following results on the evaluation set: - Loss: 0.5415 - F1 Score: 0.7414 - Accuracy: 0.7440 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6328 | 1.01 | 200 | 0.6200 | 0.6632 | 0.6723 | | 0.6045 | 2.02 | 400 | 0.6039 | 0.6923 | 0.6979 | | 0.5895 | 3.03 | 600 | 0.5887 | 0.7041 | 0.7090 | | 0.5776 | 4.04 | 800 | 0.5877 | 0.7050 | 0.7118 | | 0.5679 | 5.05 | 1000 | 0.5737 | 0.7163 | 0.7203 | | 0.5625 | 6.06 | 1200 | 0.5807 | 0.7063 | 0.7143 | | 0.5572 | 7.07 | 1400 | 0.5668 | 0.7217 | 0.7263 | | 0.5537 | 8.08 | 1600 | 0.5677 | 0.7156 | 0.7219 | | 0.5499 | 9.09 | 1800 | 0.5558 | 0.7256 | 0.7301 | | 0.5459 | 10.1 | 2000 | 0.5559 | 0.7248 | 0.7298 | | 0.5406 | 11.11 | 2200 | 0.5610 | 0.7191 | 0.7257 | | 0.5389 | 12.12 | 2400 | 0.5545 | 0.7272 | 0.7317 | | 0.5364 | 13.13 | 2600 | 0.5530 | 0.7344 | 0.7390 | | 0.5367 | 14.14 | 2800 | 0.5472 | 0.7384 | 0.7424 | | 0.531 | 15.15 | 3000 | 0.5516 | 0.7329 | 0.7377 | | 0.5313 | 16.16 | 3200 | 0.5513 | 0.7280 | 0.7336 | | 0.5251 | 17.17 | 3400 | 0.5542 | 0.7397 | 0.7431 | | 0.5274 | 18.18 | 3600 | 0.5477 | 0.7361 | 0.7396 | | 0.5238 | 19.19 | 3800 | 0.5453 | 0.7375 | 0.7418 | | 0.5233 | 20.2 | 4000 | 0.5501 | 0.7348 | 0.7390 | | 0.5211 | 21.21 | 4200 | 0.5565 | 0.7316 | 0.7371 | | 0.5185 | 22.22 | 4400 | 0.5518 | 0.7428 | 0.7459 | | 0.5173 | 23.23 | 4600 | 0.5546 | 0.7332 | 0.7393 | | 0.5206 | 24.24 | 4800 | 0.5519 | 0.7273 | 0.7333 | | 0.513 | 25.25 | 5000 | 0.5423 | 0.7419 | 0.7443 | | 0.517 | 26.26 | 5200 | 0.5424 | 0.7448 | 0.7459 | | 0.5136 | 27.27 | 5400 | 0.5468 | 0.7393 | 0.7437 | | 0.5117 | 28.28 | 5600 | 0.5417 | 0.7437 | 0.7456 | | 0.517 | 29.29 | 5800 | 0.5555 | 0.7292 | 0.7361 | | 0.5067 | 30.3 | 6000 | 0.5485 | 0.7369 | 0.7405 | | 0.5108 | 31.31 | 6200 | 0.5455 | 0.7386 | 0.7421 | | 0.5093 | 32.32 | 6400 | 0.5500 | 0.7384 | 0.7431 | | 0.5081 | 33.33 | 6600 | 0.5540 | 0.7344 | 0.7405 | | 0.5065 | 34.34 | 6800 | 0.5463 | 0.7391 | 0.7434 | | 0.5053 | 35.35 | 7000 | 0.5452 | 0.7414 | 0.7437 | | 0.5087 | 36.36 | 7200 | 0.5438 | 0.7406 | 0.7443 | | 0.5064 | 37.37 | 7400 | 0.5428 | 0.7414 | 0.7453 | | 0.503 | 38.38 | 7600 | 0.5419 | 0.7449 | 0.7472 | | 0.5019 | 39.39 | 7800 | 0.5426 | 0.7453 | 0.7475 | | 0.5036 | 40.4 | 8000 | 0.5470 | 0.7425 | 0.7459 | | 0.504 | 41.41 | 8200 | 0.5476 | 0.7403 | 0.7449 | | 0.5043 | 42.42 | 8400 | 0.5495 | 0.7403 | 0.7446 | | 0.5015 | 43.43 | 8600 | 0.5458 | 0.7423 | 0.7459 | | 0.5001 | 44.44 | 8800 | 0.5451 | 0.7407 | 0.7443 | | 0.4993 | 45.45 | 9000 | 0.5444 | 0.7440 | 0.7472 | | 0.4984 | 46.46 | 9200 | 0.5479 | 0.7404 | 0.7443 | | 0.5039 | 47.47 | 9400 | 0.5467 | 0.7407 | 0.7446 | | 0.4951 | 48.48 | 9600 | 0.5482 | 0.7432 | 0.7472 | | 0.5047 | 49.49 | 9800 | 0.5462 | 0.7425 | 0.7462 | | 0.5003 | 50.51 | 10000 | 0.5462 | 0.7419 | 0.7456 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K4me1-seqsight_32768_512_30M-L8_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K4me1-seqsight_32768_512_30M-L8_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:57:03+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K4me1-seqsight\_32768\_512\_30M-L8\_f ================================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K4me1 dataset. It achieves the following results on the evaluation set: * Loss: 0.5415 * F1 Score: 0.7414 * Accuracy: 0.7440 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K4me1-seqsight_32768_512_30M-L32_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K4me1](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K4me1) dataset. It achieves the following results on the evaluation set: - Loss: 0.5543 - F1 Score: 0.7439 - Accuracy: 0.7475 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6259 | 1.01 | 200 | 0.6208 | 0.6637 | 0.6771 | | 0.5922 | 2.02 | 400 | 0.5916 | 0.7069 | 0.7121 | | 0.5733 | 3.03 | 600 | 0.5755 | 0.7112 | 0.7172 | | 0.5603 | 4.04 | 800 | 0.5784 | 0.7122 | 0.7184 | | 0.5535 | 5.05 | 1000 | 0.5569 | 0.7314 | 0.7339 | | 0.5474 | 6.06 | 1200 | 0.5674 | 0.7250 | 0.7314 | | 0.54 | 7.07 | 1400 | 0.5611 | 0.7265 | 0.7307 | | 0.5362 | 8.08 | 1600 | 0.5579 | 0.7254 | 0.7304 | | 0.533 | 9.09 | 1800 | 0.5576 | 0.7329 | 0.7367 | | 0.5272 | 10.1 | 2000 | 0.5598 | 0.7325 | 0.7371 | | 0.5203 | 11.11 | 2200 | 0.5623 | 0.7305 | 0.7361 | | 0.519 | 12.12 | 2400 | 0.5546 | 0.7339 | 0.7383 | | 0.5147 | 13.13 | 2600 | 0.5691 | 0.7282 | 0.7348 | | 0.5137 | 14.14 | 2800 | 0.5500 | 0.7414 | 0.7449 | | 0.5054 | 15.15 | 3000 | 0.5548 | 0.7336 | 0.7367 | | 0.5047 | 16.16 | 3200 | 0.5563 | 0.7367 | 0.7405 | | 0.4981 | 17.17 | 3400 | 0.5729 | 0.7420 | 0.7446 | | 0.4977 | 18.18 | 3600 | 0.5607 | 0.7344 | 0.7377 | | 0.4931 | 19.19 | 3800 | 0.5522 | 0.7401 | 0.7427 | | 0.4916 | 20.2 | 4000 | 0.5615 | 0.7304 | 0.7352 | | 0.4868 | 21.21 | 4200 | 0.5671 | 0.7371 | 0.7399 | | 0.4823 | 22.22 | 4400 | 0.5698 | 0.7400 | 0.7421 | | 0.4801 | 23.23 | 4600 | 0.5699 | 0.7365 | 0.7396 | | 0.4808 | 24.24 | 4800 | 0.5764 | 0.7242 | 0.7298 | | 0.4722 | 25.25 | 5000 | 0.5625 | 0.7366 | 0.7390 | | 0.4713 | 26.26 | 5200 | 0.5592 | 0.7387 | 0.7405 | | 0.4719 | 27.27 | 5400 | 0.5751 | 0.7263 | 0.7320 | | 0.4665 | 28.28 | 5600 | 0.5683 | 0.7362 | 0.7390 | | 0.4684 | 29.29 | 5800 | 0.5813 | 0.7159 | 0.7235 | | 0.4598 | 30.3 | 6000 | 0.5790 | 0.7356 | 0.7383 | | 0.4585 | 31.31 | 6200 | 0.5718 | 0.7383 | 0.7405 | | 0.4574 | 32.32 | 6400 | 0.5792 | 0.7262 | 0.7307 | | 0.4569 | 33.33 | 6600 | 0.5909 | 0.7291 | 0.7355 | | 0.4521 | 34.34 | 6800 | 0.5689 | 0.7324 | 0.7358 | | 0.4514 | 35.35 | 7000 | 0.5756 | 0.7393 | 0.7408 | | 0.4542 | 36.36 | 7200 | 0.5706 | 0.7291 | 0.7320 | | 0.4493 | 37.37 | 7400 | 0.5764 | 0.7293 | 0.7339 | | 0.4437 | 38.38 | 7600 | 0.5780 | 0.7354 | 0.7371 | | 0.4425 | 39.39 | 7800 | 0.5812 | 0.7362 | 0.7380 | | 0.4409 | 40.4 | 8000 | 0.5924 | 0.7354 | 0.7383 | | 0.4422 | 41.41 | 8200 | 0.5874 | 0.7302 | 0.7348 | | 0.4413 | 42.42 | 8400 | 0.5901 | 0.7336 | 0.7371 | | 0.4389 | 43.43 | 8600 | 0.5838 | 0.7328 | 0.7355 | | 0.4356 | 44.44 | 8800 | 0.5858 | 0.7302 | 0.7330 | | 0.4337 | 45.45 | 9000 | 0.5856 | 0.7316 | 0.7339 | | 0.434 | 46.46 | 9200 | 0.5912 | 0.7297 | 0.7330 | | 0.4394 | 47.47 | 9400 | 0.5868 | 0.7265 | 0.7298 | | 0.4297 | 48.48 | 9600 | 0.5887 | 0.7288 | 0.7323 | | 0.4369 | 49.49 | 9800 | 0.5892 | 0.7280 | 0.7314 | | 0.4349 | 50.51 | 10000 | 0.5883 | 0.7291 | 0.7320 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K4me1-seqsight_32768_512_30M-L32_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K4me1-seqsight_32768_512_30M-L32_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:57:49+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K4me1-seqsight\_32768\_512\_30M-L32\_f ================================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K4me1 dataset. It achieves the following results on the evaluation set: * Loss: 0.5543 * F1 Score: 0.7439 * Accuracy: 0.7475 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
text-classification
transformers
# Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 1.0650867223739624 f1_macro: 0.2095479509928179 f1_micro: 0.4584103512014787 f1_weighted: 0.2881768494245037 precision_macro: 0.1528034504004929 precision_micro: 0.4584103512014787 precision_weighted: 0.21014005008866307 recall_macro: 0.3333333333333333 recall_micro: 0.4584103512014787 recall_weighted: 0.4584103512014787 accuracy: 0.4584103512014787
{"tags": ["autotrain", "text-classification"], "datasets": ["autotrain-byt8e-zygc3/autotrain-data"], "widget": [{"text": "I love AutoTrain"}]}
Akhil-9640/autotrain-byt8e-zygc3
null
[ "transformers", "tensorboard", "safetensors", "bert", "text-classification", "autotrain", "dataset:autotrain-byt8e-zygc3/autotrain-data", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T04:58:14+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #bert #text-classification #autotrain #dataset-autotrain-byt8e-zygc3/autotrain-data #autotrain_compatible #endpoints_compatible #region-us
# Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 1.0650867223739624 f1_macro: 0.2095479509928179 f1_micro: 0.4584103512014787 f1_weighted: 0.2881768494245037 precision_macro: 0.1528034504004929 precision_micro: 0.4584103512014787 precision_weighted: 0.21014005008866307 recall_macro: 0.3333333333333333 recall_micro: 0.4584103512014787 recall_weighted: 0.4584103512014787 accuracy: 0.4584103512014787
[ "# Model Trained Using AutoTrain\n\n- Problem type: Text Classification", "## Validation Metrics\nloss: 1.0650867223739624\n\nf1_macro: 0.2095479509928179\n\nf1_micro: 0.4584103512014787\n\nf1_weighted: 0.2881768494245037\n\nprecision_macro: 0.1528034504004929\n\nprecision_micro: 0.4584103512014787\n\nprecision_weighted: 0.21014005008866307\n\nrecall_macro: 0.3333333333333333\n\nrecall_micro: 0.4584103512014787\n\nrecall_weighted: 0.4584103512014787\n\naccuracy: 0.4584103512014787" ]
[ "TAGS\n#transformers #tensorboard #safetensors #bert #text-classification #autotrain #dataset-autotrain-byt8e-zygc3/autotrain-data #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Trained Using AutoTrain\n\n- Problem type: Text Classification", "## Validation Metrics\nloss: 1.0650867223739624\n\nf1_macro: 0.2095479509928179\n\nf1_micro: 0.4584103512014787\n\nf1_weighted: 0.2881768494245037\n\nprecision_macro: 0.1528034504004929\n\nprecision_micro: 0.4584103512014787\n\nprecision_weighted: 0.21014005008866307\n\nrecall_macro: 0.3333333333333333\n\nrecall_micro: 0.4584103512014787\n\nrecall_weighted: 0.4584103512014787\n\naccuracy: 0.4584103512014787" ]
[ 54, 12, 169 ]
[ "TAGS\n#transformers #tensorboard #safetensors #bert #text-classification #autotrain #dataset-autotrain-byt8e-zygc3/autotrain-data #autotrain_compatible #endpoints_compatible #region-us \n# Model Trained Using AutoTrain\n\n- Problem type: Text Classification## Validation Metrics\nloss: 1.0650867223739624\n\nf1_macro: 0.2095479509928179\n\nf1_micro: 0.4584103512014787\n\nf1_weighted: 0.2881768494245037\n\nprecision_macro: 0.1528034504004929\n\nprecision_micro: 0.4584103512014787\n\nprecision_weighted: 0.21014005008866307\n\nrecall_macro: 0.3333333333333333\n\nrecall_micro: 0.4584103512014787\n\nrecall_weighted: 0.4584103512014787\n\naccuracy: 0.4584103512014787" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mistral_envs_claim_finetune1 This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 5 - total_train_batch_size: 40 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - training_steps: 100 - mixed_precision_training: Native AMP ### Training results ### Framework versions - PEFT 0.10.0 - Transformers 4.40.1 - Pytorch 2.1.0a0+29c30b1 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "apache-2.0", "library_name": "peft", "tags": ["trl", "sft", "generated_from_trainer"], "base_model": "mistralai/Mistral-7B-Instruct-v0.2", "model-index": [{"name": "mistral_envs_claim_finetune1", "results": []}]}
Haimee/mistral_envs_claim_finetune1
null
[ "peft", "tensorboard", "safetensors", "trl", "sft", "generated_from_trainer", "base_model:mistralai/Mistral-7B-Instruct-v0.2", "license:apache-2.0", "region:us" ]
null
2024-04-30T04:58:22+00:00
[]
[]
TAGS #peft #tensorboard #safetensors #trl #sft #generated_from_trainer #base_model-mistralai/Mistral-7B-Instruct-v0.2 #license-apache-2.0 #region-us
# mistral_envs_claim_finetune1 This model is a fine-tuned version of mistralai/Mistral-7B-Instruct-v0.2 on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 5 - total_train_batch_size: 40 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - training_steps: 100 - mixed_precision_training: Native AMP ### Training results ### Framework versions - PEFT 0.10.0 - Transformers 4.40.1 - Pytorch 2.1.0a0+29c30b1 - Datasets 2.19.0 - Tokenizers 0.19.1
[ "# mistral_envs_claim_finetune1\n\nThis model is a fine-tuned version of mistralai/Mistral-7B-Instruct-v0.2 on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0002\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 5\n- total_train_batch_size: 40\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- training_steps: 100\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- PEFT 0.10.0\n- Transformers 4.40.1\n- Pytorch 2.1.0a0+29c30b1\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
[ "TAGS\n#peft #tensorboard #safetensors #trl #sft #generated_from_trainer #base_model-mistralai/Mistral-7B-Instruct-v0.2 #license-apache-2.0 #region-us \n", "# mistral_envs_claim_finetune1\n\nThis model is a fine-tuned version of mistralai/Mistral-7B-Instruct-v0.2 on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0002\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 5\n- total_train_batch_size: 40\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- training_steps: 100\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- PEFT 0.10.0\n- Transformers 4.40.1\n- Pytorch 2.1.0a0+29c30b1\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
[ 57, 45, 7, 9, 9, 4, 120, 5, 56 ]
[ "TAGS\n#peft #tensorboard #safetensors #trl #sft #generated_from_trainer #base_model-mistralai/Mistral-7B-Instruct-v0.2 #license-apache-2.0 #region-us \n# mistral_envs_claim_finetune1\n\nThis model is a fine-tuned version of mistralai/Mistral-7B-Instruct-v0.2 on the None dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0002\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 5\n- total_train_batch_size: 40\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- training_steps: 100\n- mixed_precision_training: Native AMP### Training results### Framework versions\n\n- PEFT 0.10.0\n- Transformers 4.40.1\n- Pytorch 2.1.0a0+29c30b1\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
text-generation
null
# Smart-Lemon-Cookie-7B These are GGUFs for the following model: https://huggingface.co/FallenMerick/Smart-Lemon-Cookie-7B
{"tags": ["quantized", "4-bit", "6-bit", "8-bit", "GGUF", "merge", "mistral", "text-generation"], "model_name": "Smart-Lemon-Cookie-7B", "base_model": ["FallenMerick/Smart-Lemon-Cookie-7B"], "model_type": "mistral", "pipeline_tag": "text-generation"}
FallenMerick/Smart-Lemon-Cookie-7B-GGUF
null
[ "gguf", "quantized", "4-bit", "6-bit", "8-bit", "GGUF", "merge", "mistral", "text-generation", "base_model:FallenMerick/Smart-Lemon-Cookie-7B", "region:us" ]
null
2024-04-30T04:59:51+00:00
[]
[]
TAGS #gguf #quantized #4-bit #6-bit #8-bit #GGUF #merge #mistral #text-generation #base_model-FallenMerick/Smart-Lemon-Cookie-7B #region-us
# Smart-Lemon-Cookie-7B These are GGUFs for the following model: URL
[ "# Smart-Lemon-Cookie-7B\n\nThese are GGUFs for the following model:\n\nURL" ]
[ "TAGS\n#gguf #quantized #4-bit #6-bit #8-bit #GGUF #merge #mistral #text-generation #base_model-FallenMerick/Smart-Lemon-Cookie-7B #region-us \n", "# Smart-Lemon-Cookie-7B\n\nThese are GGUFs for the following model:\n\nURL" ]
[ 54, 21 ]
[ "TAGS\n#gguf #quantized #4-bit #6-bit #8-bit #GGUF #merge #mistral #text-generation #base_model-FallenMerick/Smart-Lemon-Cookie-7B #region-us \n# Smart-Lemon-Cookie-7B\n\nThese are GGUFs for the following model:\n\nURL" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K36me3-seqsight_32768_512_30M-L1_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K36me3](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K36me3) dataset. It achieves the following results on the evaluation set: - Loss: 0.5059 - F1 Score: 0.7648 - Accuracy: 0.7689 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.5967 | 0.92 | 200 | 0.5618 | 0.7187 | 0.7205 | | 0.5486 | 1.83 | 400 | 0.5608 | 0.7245 | 0.7276 | | 0.5437 | 2.75 | 600 | 0.5551 | 0.7270 | 0.7308 | | 0.5434 | 3.67 | 800 | 0.5425 | 0.7363 | 0.7388 | | 0.5285 | 4.59 | 1000 | 0.5445 | 0.7306 | 0.7351 | | 0.5253 | 5.5 | 1200 | 0.5368 | 0.7363 | 0.7408 | | 0.5228 | 6.42 | 1400 | 0.5290 | 0.7489 | 0.7517 | | 0.5189 | 7.34 | 1600 | 0.5323 | 0.7486 | 0.7526 | | 0.5096 | 8.26 | 1800 | 0.5406 | 0.7426 | 0.7480 | | 0.514 | 9.17 | 2000 | 0.5273 | 0.7471 | 0.7517 | | 0.5067 | 10.09 | 2200 | 0.5402 | 0.7334 | 0.7411 | | 0.5037 | 11.01 | 2400 | 0.5333 | 0.7423 | 0.7491 | | 0.5029 | 11.93 | 2600 | 0.5189 | 0.7584 | 0.7626 | | 0.5012 | 12.84 | 2800 | 0.5240 | 0.7515 | 0.7569 | | 0.4992 | 13.76 | 3000 | 0.5303 | 0.7487 | 0.7549 | | 0.4945 | 14.68 | 3200 | 0.5157 | 0.7587 | 0.7623 | | 0.4989 | 15.6 | 3400 | 0.5272 | 0.7480 | 0.7546 | | 0.494 | 16.51 | 3600 | 0.5181 | 0.7580 | 0.7623 | | 0.4956 | 17.43 | 3800 | 0.5110 | 0.7616 | 0.7655 | | 0.4948 | 18.35 | 4000 | 0.5128 | 0.7577 | 0.7620 | | 0.4895 | 19.27 | 4200 | 0.5187 | 0.7564 | 0.7612 | | 0.4906 | 20.18 | 4400 | 0.5268 | 0.7515 | 0.7577 | | 0.4897 | 21.1 | 4600 | 0.5191 | 0.7559 | 0.7609 | | 0.4906 | 22.02 | 4800 | 0.5228 | 0.7518 | 0.7577 | | 0.4886 | 22.94 | 5000 | 0.5138 | 0.7570 | 0.7618 | | 0.491 | 23.85 | 5200 | 0.5219 | 0.7544 | 0.7603 | | 0.4864 | 24.77 | 5400 | 0.5209 | 0.7532 | 0.7589 | | 0.4877 | 25.69 | 5600 | 0.5162 | 0.7584 | 0.7632 | | 0.4857 | 26.61 | 5800 | 0.5114 | 0.7611 | 0.7652 | | 0.4835 | 27.52 | 6000 | 0.5288 | 0.7529 | 0.7592 | | 0.4856 | 28.44 | 6200 | 0.5212 | 0.7543 | 0.7600 | | 0.4822 | 29.36 | 6400 | 0.5268 | 0.7526 | 0.7589 | | 0.4843 | 30.28 | 6600 | 0.5184 | 0.7567 | 0.7623 | | 0.4828 | 31.19 | 6800 | 0.5090 | 0.7641 | 0.7678 | | 0.4841 | 32.11 | 7000 | 0.5186 | 0.7572 | 0.7626 | | 0.4829 | 33.03 | 7200 | 0.5139 | 0.7603 | 0.7649 | | 0.4811 | 33.94 | 7400 | 0.5169 | 0.7599 | 0.7646 | | 0.4815 | 34.86 | 7600 | 0.5167 | 0.7572 | 0.7623 | | 0.4821 | 35.78 | 7800 | 0.5166 | 0.7570 | 0.7623 | | 0.4837 | 36.7 | 8000 | 0.5127 | 0.7610 | 0.7655 | | 0.48 | 37.61 | 8200 | 0.5224 | 0.7556 | 0.7618 | | 0.4814 | 38.53 | 8400 | 0.5110 | 0.7612 | 0.7655 | | 0.4804 | 39.45 | 8600 | 0.5167 | 0.7603 | 0.7655 | | 0.4793 | 40.37 | 8800 | 0.5207 | 0.7599 | 0.7652 | | 0.4834 | 41.28 | 9000 | 0.5159 | 0.7587 | 0.7640 | | 0.4779 | 42.2 | 9200 | 0.5116 | 0.7625 | 0.7666 | | 0.4795 | 43.12 | 9400 | 0.5130 | 0.7631 | 0.7675 | | 0.4823 | 44.04 | 9600 | 0.5153 | 0.7594 | 0.7646 | | 0.4815 | 44.95 | 9800 | 0.5167 | 0.7593 | 0.7646 | | 0.4804 | 45.87 | 10000 | 0.5155 | 0.7592 | 0.7643 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K36me3-seqsight_32768_512_30M-L1_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K36me3-seqsight_32768_512_30M-L1_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T04:59:56+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K36me3-seqsight\_32768\_512\_30M-L1\_f ================================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K36me3 dataset. It achieves the following results on the evaluation set: * Loss: 0.5059 * F1 Score: 0.7648 * Accuracy: 0.7689 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K36me3-seqsight_32768_512_30M-L8_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K36me3](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K36me3) dataset. It achieves the following results on the evaluation set: - Loss: 0.4964 - F1 Score: 0.7769 - Accuracy: 0.7801 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.5823 | 0.92 | 200 | 0.5639 | 0.7133 | 0.7179 | | 0.5372 | 1.83 | 400 | 0.5540 | 0.7230 | 0.7285 | | 0.5235 | 2.75 | 600 | 0.5354 | 0.7390 | 0.7443 | | 0.5191 | 3.67 | 800 | 0.5246 | 0.7483 | 0.7534 | | 0.5011 | 4.59 | 1000 | 0.5301 | 0.7489 | 0.7549 | | 0.494 | 5.5 | 1200 | 0.5277 | 0.7459 | 0.7529 | | 0.4944 | 6.42 | 1400 | 0.5062 | 0.7589 | 0.7626 | | 0.4915 | 7.34 | 1600 | 0.5254 | 0.7531 | 0.7589 | | 0.4831 | 8.26 | 1800 | 0.5174 | 0.7620 | 0.7669 | | 0.487 | 9.17 | 2000 | 0.5059 | 0.7629 | 0.7675 | | 0.4817 | 10.09 | 2200 | 0.5418 | 0.7252 | 0.7365 | | 0.478 | 11.01 | 2400 | 0.5097 | 0.7571 | 0.7629 | | 0.4762 | 11.93 | 2600 | 0.4893 | 0.7771 | 0.7790 | | 0.475 | 12.84 | 2800 | 0.5069 | 0.7606 | 0.7658 | | 0.473 | 13.76 | 3000 | 0.5181 | 0.7560 | 0.7620 | | 0.4686 | 14.68 | 3200 | 0.4987 | 0.7713 | 0.7744 | | 0.4693 | 15.6 | 3400 | 0.5017 | 0.7633 | 0.7678 | | 0.4672 | 16.51 | 3600 | 0.5163 | 0.7602 | 0.7655 | | 0.4657 | 17.43 | 3800 | 0.4967 | 0.7720 | 0.7749 | | 0.4646 | 18.35 | 4000 | 0.4947 | 0.7722 | 0.7755 | | 0.46 | 19.27 | 4200 | 0.5023 | 0.7722 | 0.7755 | | 0.4609 | 20.18 | 4400 | 0.5151 | 0.7575 | 0.7629 | | 0.4586 | 21.1 | 4600 | 0.5014 | 0.7714 | 0.7752 | | 0.4594 | 22.02 | 4800 | 0.5005 | 0.7715 | 0.7749 | | 0.4568 | 22.94 | 5000 | 0.4909 | 0.7720 | 0.7752 | | 0.4577 | 23.85 | 5200 | 0.5041 | 0.7606 | 0.7658 | | 0.4524 | 24.77 | 5400 | 0.5124 | 0.7607 | 0.7663 | | 0.4551 | 25.69 | 5600 | 0.5014 | 0.7685 | 0.7726 | | 0.4524 | 26.61 | 5800 | 0.4908 | 0.7769 | 0.7792 | | 0.4503 | 27.52 | 6000 | 0.5047 | 0.7684 | 0.7721 | | 0.4508 | 28.44 | 6200 | 0.5004 | 0.7686 | 0.7729 | | 0.4465 | 29.36 | 6400 | 0.5062 | 0.7672 | 0.7718 | | 0.4482 | 30.28 | 6600 | 0.5013 | 0.7644 | 0.7695 | | 0.4461 | 31.19 | 6800 | 0.4921 | 0.7735 | 0.7764 | | 0.4477 | 32.11 | 7000 | 0.4983 | 0.7706 | 0.7744 | | 0.4471 | 33.03 | 7200 | 0.4909 | 0.7739 | 0.7772 | | 0.4451 | 33.94 | 7400 | 0.4961 | 0.7743 | 0.7775 | | 0.4447 | 34.86 | 7600 | 0.4964 | 0.7718 | 0.7755 | | 0.4448 | 35.78 | 7800 | 0.4944 | 0.7742 | 0.7772 | | 0.4471 | 36.7 | 8000 | 0.4942 | 0.7723 | 0.7758 | | 0.4426 | 37.61 | 8200 | 0.5033 | 0.7642 | 0.7689 | | 0.4447 | 38.53 | 8400 | 0.4944 | 0.7739 | 0.7772 | | 0.4396 | 39.45 | 8600 | 0.5002 | 0.7694 | 0.7735 | | 0.4405 | 40.37 | 8800 | 0.5046 | 0.7694 | 0.7735 | | 0.4449 | 41.28 | 9000 | 0.4965 | 0.7716 | 0.7755 | | 0.44 | 42.2 | 9200 | 0.4938 | 0.7752 | 0.7784 | | 0.4396 | 43.12 | 9400 | 0.4936 | 0.7776 | 0.7804 | | 0.4438 | 44.04 | 9600 | 0.4961 | 0.7715 | 0.7752 | | 0.4417 | 44.95 | 9800 | 0.4998 | 0.7675 | 0.7718 | | 0.44 | 45.87 | 10000 | 0.4976 | 0.7711 | 0.7749 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K36me3-seqsight_32768_512_30M-L8_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K36me3-seqsight_32768_512_30M-L8_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T05:00:14+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K36me3-seqsight\_32768\_512\_30M-L8\_f ================================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K36me3 dataset. It achieves the following results on the evaluation set: * Loss: 0.4964 * F1 Score: 0.7769 * Accuracy: 0.7801 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gemma-1.1-2b-it-genai-kb This model is a fine-tuned version of [google/gemma-1.1-2b-it](https://huggingface.co/google/gemma-1.1-2b-it) on the None dataset. It achieves the following results on the evaluation set: - Loss: 5.2164 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.03 - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 1 | 5.2201 | | No log | 2.0 | 3 | 5.2164 | ### Framework versions - PEFT 0.10.0 - Transformers 4.40.0 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "gemma", "library_name": "peft", "tags": ["trl", "sft", "generated_from_trainer"], "base_model": "google/gemma-1.1-2b-it", "model-index": [{"name": "gemma-1.1-2b-it-genai-kb", "results": []}]}
cohesionet/gemma-1.1-2b-it-genai-kb
null
[ "peft", "tensorboard", "safetensors", "trl", "sft", "generated_from_trainer", "base_model:google/gemma-1.1-2b-it", "license:gemma", "region:us" ]
null
2024-04-30T05:00:17+00:00
[]
[]
TAGS #peft #tensorboard #safetensors #trl #sft #generated_from_trainer #base_model-google/gemma-1.1-2b-it #license-gemma #region-us
gemma-1.1-2b-it-genai-kb ======================== This model is a fine-tuned version of google/gemma-1.1-2b-it on the None dataset. It achieves the following results on the evaluation set: * Loss: 5.2164 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 1e-05 * train\_batch\_size: 4 * eval\_batch\_size: 8 * seed: 42 * gradient\_accumulation\_steps: 4 * total\_train\_batch\_size: 16 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * lr\_scheduler\_warmup\_ratio: 0.03 * num\_epochs: 3.0 * mixed\_precision\_training: Native AMP ### Training results ### Framework versions * PEFT 0.10.0 * Transformers 4.40.0 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 4\n* eval\\_batch\\_size: 8\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 16\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_ratio: 0.03\n* num\\_epochs: 3.0\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* PEFT 0.10.0\n* Transformers 4.40.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#peft #tensorboard #safetensors #trl #sft #generated_from_trainer #base_model-google/gemma-1.1-2b-it #license-gemma #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 4\n* eval\\_batch\\_size: 8\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 16\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_ratio: 0.03\n* num\\_epochs: 3.0\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* PEFT 0.10.0\n* Transformers 4.40.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ 48, 155, 5, 52 ]
[ "TAGS\n#peft #tensorboard #safetensors #trl #sft #generated_from_trainer #base_model-google/gemma-1.1-2b-it #license-gemma #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 4\n* eval\\_batch\\_size: 8\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 16\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_ratio: 0.03\n* num\\_epochs: 3.0\n* mixed\\_precision\\_training: Native AMP### Training results### Framework versions\n\n\n* PEFT 0.10.0\n* Transformers 4.40.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # base-nsp-100000 This model is a fine-tuned version of [mhr2004/plm-nsp-100000](https://huggingface.co/mhr2004/plm-nsp-100000) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8933 - Accuracy: 0.4877 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.9735 | 1.0 | 183 | 0.8668 | 0.4811 | | 0.8972 | 2.0 | 366 | 0.8636 | 0.4784 | | 0.8381 | 3.0 | 549 | 0.8927 | 0.4613 | | 0.8088 | 4.0 | 732 | 0.9399 | 0.4586 | | 0.793 | 5.0 | 915 | 0.9159 | 0.4856 | | 0.767 | 6.0 | 1098 | 0.9487 | 0.4793 | | 0.7457 | 7.0 | 1281 | 0.9372 | 0.4946 | ### Framework versions - Transformers 4.40.1 - Pytorch 2.3.0+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mhr2004/plm-nsp-100000", "model-index": [{"name": "base-nsp-100000", "results": []}]}
mhr2004/base-nsp-100000
null
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:mhr2004/plm-nsp-100000", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T05:01:25+00:00
[]
[]
TAGS #transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-mhr2004/plm-nsp-100000 #license-mit #autotrain_compatible #endpoints_compatible #region-us
base-nsp-100000 =============== This model is a fine-tuned version of mhr2004/plm-nsp-100000 on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.8933 * Accuracy: 0.4877 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 1e-05 * train\_batch\_size: 32 * eval\_batch\_size: 32 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 20 ### Training results ### Framework versions * Transformers 4.40.1 * Pytorch 2.3.0+cu121 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 32\n* eval\\_batch\\_size: 32\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 20", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-mhr2004/plm-nsp-100000 #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 32\n* eval\\_batch\\_size: 32\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 20", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ 55, 101, 5, 44 ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-mhr2004/plm-nsp-100000 #license-mit #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 32\n* eval\\_batch\\_size: 32\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 20### Training results### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
null
peft
## Mongolian-Llama3 ![ Alt Text](Llama.jpg) ### Model Description Mongolian-Llama3 implementation in Chat UI [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1LC0xx4i9xqFmwn9l8T6vw25RIr-BP0Tq?usp=sharing]) Mongolian-Llama3 is the first open source instruction-tuned language model for Mongolian & English users with various abilities such as roleplaying & tool-using built upon the quantized Meta-Llama-3-8B model. Developed by: Dorjzodovsuren License: Llama-3 License Base Model: llama-3-8b-bnb-4bit Model Size: 4.65B Context length: 8K ## Bias, Risks, and Limitations To combat fake news, current strategies rely heavily on synthetic and translated data. However, these approaches have inherent biases, risks, and limitations: 1. **Synthetic Data Bias**: Algorithms may inadvertently perpetuate biases present in training data. 2. **Translation Inaccuracy**: Translations can distort meaning or lose context, leading to misinformation. 3. **Cultural Nuances**: Synthetic and translated data may miss cultural intricacies, risking amplification of stereotypes. 4. **Algorithmic Limits**: Effectiveness is constrained by algorithm capabilities and training data quality. 5. **Dependency on Data**: Accuracy hinges on quality and representativeness of training data. 6. **Adversarial Attacks**: Malicious actors can exploit vulnerabilities to manipulate content. 7. **Different answer based on language**: Answer might be a bit different based on language. ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. Due to hallucinations and pretraining datasets characteristics, some information might be misleading, and answer might be a bit different based on language. Please ask in <b>Mongolian</b> if possible. ## How to Get Started with the Model Use the code below to get started with the model. ```python import torch import gradio as gr from threading import Thread from peft import PeftModel, PeftConfig from unsloth import FastLanguageModel from transformers import TextStreamer from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer config = PeftConfig.from_pretrained("Dorjzodovsuren/Mongolian_llama3") model = AutoModelForCausalLM.from_pretrained("unsloth/llama-3-8b-bnb-4bit", torch_dtype = torch.float16) model = PeftModel.from_pretrained(model, "Dorjzodovsuren/Mongolian_llama3") #load tokenizer tokenizer = AutoTokenizer.from_pretrained("Dorjzodovsuren/Mn_llama3") alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: {} ### Input: {} ### Response: {}""" # Enable native 2x faster inference FastLanguageModel.for_inference(model) # Create a text streamer text_streamer = TextStreamer(tokenizer, skip_prompt=False,skip_special_tokens=True) # Get the device based on GPU availability device = 'cuda' if torch.cuda.is_available() else 'cpu' # Move model into device model = model.to(device) class StopOnTokens(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: stop_ids = [29, 0] for stop_id in stop_ids: if input_ids[0][-1] == stop_id: return True return False # Current implementation does not support conversation based on previous conversation. # Highly recommend to experiment on various hyper parameters to compare qualities. def predict(message, history): stop = StopOnTokens() messages = alpaca_prompt.format( message, "", "", ) model_inputs = tokenizer([messages], return_tensors="pt").to(device) streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True) generate_kwargs = dict( model_inputs, streamer=streamer, max_new_tokens=1024, top_p=0.95, temperature=0.001, repetition_penalty=1.1, stopping_criteria=StoppingCriteriaList([stop]) ) t = Thread(target=model.generate, kwargs=generate_kwargs) t.start() partial_message = "" for new_token in streamer: if new_token != '<': partial_message += new_token yield partial_message gr.ChatInterface(predict).launch(debug=True, share=True, show_api=True) ```
{"language": ["mn", "en"], "license": "apache-2.0", "library_name": "peft", "tags": ["Mongolian", "QLora", "Llama3", "Instructed-model"]}
Dorjzodovsuren/Mongolian_Llama3
null
[ "peft", "tensorboard", "safetensors", "Mongolian", "QLora", "Llama3", "Instructed-model", "mn", "en", "license:apache-2.0", "region:us" ]
null
2024-04-30T05:01:28+00:00
[]
[ "mn", "en" ]
TAGS #peft #tensorboard #safetensors #Mongolian #QLora #Llama3 #Instructed-model #mn #en #license-apache-2.0 #region-us
## Mongolian-Llama3 ! Alt Text ### Model Description Mongolian-Llama3 implementation in Chat UI ![Open In Colab](URL Mongolian-Llama3 is the first open source instruction-tuned language model for Mongolian & English users with various abilities such as roleplaying & tool-using built upon the quantized Meta-Llama-3-8B model. Developed by: Dorjzodovsuren License: Llama-3 License Base Model: llama-3-8b-bnb-4bit Model Size: 4.65B Context length: 8K ## Bias, Risks, and Limitations To combat fake news, current strategies rely heavily on synthetic and translated data. However, these approaches have inherent biases, risks, and limitations: 1. Synthetic Data Bias: Algorithms may inadvertently perpetuate biases present in training data. 2. Translation Inaccuracy: Translations can distort meaning or lose context, leading to misinformation. 3. Cultural Nuances: Synthetic and translated data may miss cultural intricacies, risking amplification of stereotypes. 4. Algorithmic Limits: Effectiveness is constrained by algorithm capabilities and training data quality. 5. Dependency on Data: Accuracy hinges on quality and representativeness of training data. 6. Adversarial Attacks: Malicious actors can exploit vulnerabilities to manipulate content. 7. Different answer based on language: Answer might be a bit different based on language. ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. Due to hallucinations and pretraining datasets characteristics, some information might be misleading, and answer might be a bit different based on language. Please ask in <b>Mongolian</b> if possible. ## How to Get Started with the Model Use the code below to get started with the model.
[ "## Mongolian-Llama3\n\n! Alt Text", "### Model Description\n\nMongolian-Llama3 implementation in Chat UI\n![Open In Colab](URL\n\nMongolian-Llama3 is the first open source instruction-tuned language model for Mongolian & English users with various abilities such as roleplaying & tool-using built upon the quantized Meta-Llama-3-8B model.\n\nDeveloped by: Dorjzodovsuren\n\nLicense: Llama-3 License\n\nBase Model: llama-3-8b-bnb-4bit\n\nModel Size: 4.65B\n\nContext length: 8K", "## Bias, Risks, and Limitations\n\nTo combat fake news, current strategies rely heavily on synthetic and translated data. However, these approaches have inherent biases, risks, and limitations:\n\n1. Synthetic Data Bias: Algorithms may inadvertently perpetuate biases present in training data.\n\n2. Translation Inaccuracy: Translations can distort meaning or lose context, leading to misinformation.\n\n3. Cultural Nuances: Synthetic and translated data may miss cultural intricacies, risking amplification of stereotypes.\n\n4. Algorithmic Limits: Effectiveness is constrained by algorithm capabilities and training data quality.\n\n5. Dependency on Data: Accuracy hinges on quality and representativeness of training data.\n\n6. Adversarial Attacks: Malicious actors can exploit vulnerabilities to manipulate content.\n\n7. Different answer based on language: Answer might be a bit different based on language.", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model.\nDue to hallucinations and pretraining datasets characteristics, some information might be misleading, and answer might be a bit different based on language. \n\nPlease ask in <b>Mongolian</b> if possible.", "## How to Get Started with the Model\n\nUse the code below to get started with the model." ]
[ "TAGS\n#peft #tensorboard #safetensors #Mongolian #QLora #Llama3 #Instructed-model #mn #en #license-apache-2.0 #region-us \n", "## Mongolian-Llama3\n\n! Alt Text", "### Model Description\n\nMongolian-Llama3 implementation in Chat UI\n![Open In Colab](URL\n\nMongolian-Llama3 is the first open source instruction-tuned language model for Mongolian & English users with various abilities such as roleplaying & tool-using built upon the quantized Meta-Llama-3-8B model.\n\nDeveloped by: Dorjzodovsuren\n\nLicense: Llama-3 License\n\nBase Model: llama-3-8b-bnb-4bit\n\nModel Size: 4.65B\n\nContext length: 8K", "## Bias, Risks, and Limitations\n\nTo combat fake news, current strategies rely heavily on synthetic and translated data. However, these approaches have inherent biases, risks, and limitations:\n\n1. Synthetic Data Bias: Algorithms may inadvertently perpetuate biases present in training data.\n\n2. Translation Inaccuracy: Translations can distort meaning or lose context, leading to misinformation.\n\n3. Cultural Nuances: Synthetic and translated data may miss cultural intricacies, risking amplification of stereotypes.\n\n4. Algorithmic Limits: Effectiveness is constrained by algorithm capabilities and training data quality.\n\n5. Dependency on Data: Accuracy hinges on quality and representativeness of training data.\n\n6. Adversarial Attacks: Malicious actors can exploit vulnerabilities to manipulate content.\n\n7. Different answer based on language: Answer might be a bit different based on language.", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model.\nDue to hallucinations and pretraining datasets characteristics, some information might be misleading, and answer might be a bit different based on language. \n\nPlease ask in <b>Mongolian</b> if possible.", "## How to Get Started with the Model\n\nUse the code below to get started with the model." ]
[ 41, 10, 118, 177, 72, 20 ]
[ "TAGS\n#peft #tensorboard #safetensors #Mongolian #QLora #Llama3 #Instructed-model #mn #en #license-apache-2.0 #region-us \n## Mongolian-Llama3\n\n! Alt Text### Model Description\n\nMongolian-Llama3 implementation in Chat UI\n![Open In Colab](URL\n\nMongolian-Llama3 is the first open source instruction-tuned language model for Mongolian & English users with various abilities such as roleplaying & tool-using built upon the quantized Meta-Llama-3-8B model.\n\nDeveloped by: Dorjzodovsuren\n\nLicense: Llama-3 License\n\nBase Model: llama-3-8b-bnb-4bit\n\nModel Size: 4.65B\n\nContext length: 8K## Bias, Risks, and Limitations\n\nTo combat fake news, current strategies rely heavily on synthetic and translated data. However, these approaches have inherent biases, risks, and limitations:\n\n1. Synthetic Data Bias: Algorithms may inadvertently perpetuate biases present in training data.\n\n2. Translation Inaccuracy: Translations can distort meaning or lose context, leading to misinformation.\n\n3. Cultural Nuances: Synthetic and translated data may miss cultural intricacies, risking amplification of stereotypes.\n\n4. Algorithmic Limits: Effectiveness is constrained by algorithm capabilities and training data quality.\n\n5. Dependency on Data: Accuracy hinges on quality and representativeness of training data.\n\n6. Adversarial Attacks: Malicious actors can exploit vulnerabilities to manipulate content.\n\n7. Different answer based on language: Answer might be a bit different based on language.### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model.\nDue to hallucinations and pretraining datasets characteristics, some information might be misleading, and answer might be a bit different based on language. \n\nPlease ask in <b>Mongolian</b> if possible.## How to Get Started with the Model\n\nUse the code below to get started with the model." ]
question-answering
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # shipping_qa_model_30_04_24 This model is a fine-tuned version of [deepset/roberta-base-squad2](https://huggingface.co/deepset/roberta-base-squad2) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 3.8070 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 28 | 5.7792 | | No log | 2.0 | 56 | 5.4899 | | No log | 3.0 | 84 | 5.3744 | | No log | 4.0 | 112 | 5.2672 | | No log | 5.0 | 140 | 5.0586 | | No log | 6.0 | 168 | 4.8332 | | No log | 7.0 | 196 | 4.7809 | | No log | 8.0 | 224 | 4.7767 | | No log | 9.0 | 252 | 4.6233 | | No log | 10.0 | 280 | 4.5430 | | No log | 11.0 | 308 | 4.4714 | | No log | 12.0 | 336 | 4.3689 | | No log | 13.0 | 364 | 4.3410 | | No log | 14.0 | 392 | 4.2705 | | No log | 15.0 | 420 | 4.2760 | | No log | 16.0 | 448 | 4.1572 | | No log | 17.0 | 476 | 4.1465 | | 4.5743 | 18.0 | 504 | 4.0708 | | 4.5743 | 19.0 | 532 | 4.0196 | | 4.5743 | 20.0 | 560 | 4.0183 | | 4.5743 | 21.0 | 588 | 3.9759 | | 4.5743 | 22.0 | 616 | 3.9140 | | 4.5743 | 23.0 | 644 | 3.9308 | | 4.5743 | 24.0 | 672 | 3.8611 | | 4.5743 | 25.0 | 700 | 3.8159 | | 4.5743 | 26.0 | 728 | 3.8126 | | 4.5743 | 27.0 | 756 | 3.8272 | | 4.5743 | 28.0 | 784 | 3.8185 | | 4.5743 | 29.0 | 812 | 3.8074 | | 4.5743 | 30.0 | 840 | 3.8070 | ### Framework versions - Transformers 4.41.0.dev0 - Pytorch 2.2.2+cu118 - Datasets 2.18.0 - Tokenizers 0.19.1
{"license": "cc-by-4.0", "tags": ["generated_from_trainer"], "base_model": "deepset/roberta-base-squad2", "model-index": [{"name": "shipping_qa_model_30_04_24", "results": []}]}
SurajSphinx/shipping_qa_model_30_04_24
null
[ "transformers", "tensorboard", "safetensors", "roberta", "question-answering", "generated_from_trainer", "base_model:deepset/roberta-base-squad2", "license:cc-by-4.0", "endpoints_compatible", "region:us" ]
null
2024-04-30T05:02:36+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #roberta #question-answering #generated_from_trainer #base_model-deepset/roberta-base-squad2 #license-cc-by-4.0 #endpoints_compatible #region-us
shipping\_qa\_model\_30\_04\_24 =============================== This model is a fine-tuned version of deepset/roberta-base-squad2 on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 3.8070 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 1e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 30 ### Training results ### Framework versions * Transformers 4.41.0.dev0 * Pytorch 2.2.2+cu118 * Datasets 2.18.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 30", "### Training results", "### Framework versions\n\n\n* Transformers 4.41.0.dev0\n* Pytorch 2.2.2+cu118\n* Datasets 2.18.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #tensorboard #safetensors #roberta #question-answering #generated_from_trainer #base_model-deepset/roberta-base-squad2 #license-cc-by-4.0 #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 30", "### Training results", "### Framework versions\n\n\n* Transformers 4.41.0.dev0\n* Pytorch 2.2.2+cu118\n* Datasets 2.18.0\n* Tokenizers 0.19.1" ]
[ 55, 101, 5, 47 ]
[ "TAGS\n#transformers #tensorboard #safetensors #roberta #question-answering #generated_from_trainer #base_model-deepset/roberta-base-squad2 #license-cc-by-4.0 #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 30### Training results### Framework versions\n\n\n* Transformers 4.41.0.dev0\n* Pytorch 2.2.2+cu118\n* Datasets 2.18.0\n* Tokenizers 0.19.1" ]
text-generation
transformers
# TooManyMix_LLM_02 TooManyMix_LLM_02 is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [jdqwoi/TooManyMixed-LLM_04](https://huggingface.co/jdqwoi/TooManyMixed-LLM_04) * [jdqwoi/TooManyMix_LLM_01](https://huggingface.co/jdqwoi/TooManyMix_LLM_01) ## 🧩 Configuration ```yaml slices: - sources: - model: jdqwoi/TooManyMixed-LLM_04 layer_range: [0, 32] - model: jdqwoi/TooManyMix_LLM_01 layer_range: [0, 32] merge_method: slerp base_model: jdqwoi/TooManyMixed-LLM_04 parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "jdqwoi/TooManyMix_LLM_02" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
{"tags": ["merge", "mergekit", "lazymergekit", "jdqwoi/TooManyMixed-LLM_04", "jdqwoi/TooManyMix_LLM_01", "unsloth"], "base_model": ["jdqwoi/TooManyMixed-LLM_04", "jdqwoi/TooManyMix_LLM_01"]}
jdqwoi/TooManyMix_LLM_02
null
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "jdqwoi/TooManyMixed-LLM_04", "jdqwoi/TooManyMix_LLM_01", "unsloth", "conversational", "base_model:jdqwoi/TooManyMixed-LLM_04", "base_model:jdqwoi/TooManyMix_LLM_01", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T05:03:19+00:00
[]
[]
TAGS #transformers #safetensors #mistral #text-generation #merge #mergekit #lazymergekit #jdqwoi/TooManyMixed-LLM_04 #jdqwoi/TooManyMix_LLM_01 #unsloth #conversational #base_model-jdqwoi/TooManyMixed-LLM_04 #base_model-jdqwoi/TooManyMix_LLM_01 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# TooManyMix_LLM_02 TooManyMix_LLM_02 is a merge of the following models using LazyMergekit: * jdqwoi/TooManyMixed-LLM_04 * jdqwoi/TooManyMix_LLM_01 ## Configuration ## Usage
[ "# TooManyMix_LLM_02\n\nTooManyMix_LLM_02 is a merge of the following models using LazyMergekit:\n* jdqwoi/TooManyMixed-LLM_04\n* jdqwoi/TooManyMix_LLM_01", "## Configuration", "## Usage" ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #merge #mergekit #lazymergekit #jdqwoi/TooManyMixed-LLM_04 #jdqwoi/TooManyMix_LLM_01 #unsloth #conversational #base_model-jdqwoi/TooManyMixed-LLM_04 #base_model-jdqwoi/TooManyMix_LLM_01 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# TooManyMix_LLM_02\n\nTooManyMix_LLM_02 is a merge of the following models using LazyMergekit:\n* jdqwoi/TooManyMixed-LLM_04\n* jdqwoi/TooManyMix_LLM_01", "## Configuration", "## Usage" ]
[ 121, 63, 3, 3 ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #merge #mergekit #lazymergekit #jdqwoi/TooManyMixed-LLM_04 #jdqwoi/TooManyMix_LLM_01 #unsloth #conversational #base_model-jdqwoi/TooManyMixed-LLM_04 #base_model-jdqwoi/TooManyMix_LLM_01 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# TooManyMix_LLM_02\n\nTooManyMix_LLM_02 is a merge of the following models using LazyMergekit:\n* jdqwoi/TooManyMixed-LLM_04\n* jdqwoi/TooManyMix_LLM_01## Configuration## Usage" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": ["unsloth"]}
trex5790/model_l3
null
[ "transformers", "safetensors", "llama", "text-generation", "unsloth", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "region:us" ]
null
2024-04-30T05:04:31+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #unsloth #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #unsloth #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 52, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #unsloth #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": ["unsloth"]}
choudhry2272/lora-adapter-legal-llm
null
[ "transformers", "safetensors", "unsloth", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T05:05:34+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #unsloth #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #unsloth #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 30, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #unsloth #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
null
# kat33/Mixtral-8x7B-Instruct-v0.1-Q5_K_M-GGUF This model was converted to GGUF format from [`mistralai/Mixtral-8x7B-Instruct-v0.1`](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew. ```bash brew install ggerganov/ggerganov/llama.cpp ``` Invoke the llama.cpp server or the CLI. CLI: ```bash llama-cli --hf-repo kat33/Mixtral-8x7B-Instruct-v0.1-Q5_K_M-GGUF --model mixtral-8x7b-instruct-v0.1.Q5_K_M.gguf -p "The meaning to life and the universe is" ``` Server: ```bash llama-server --hf-repo kat33/Mixtral-8x7B-Instruct-v0.1-Q5_K_M-GGUF --model mixtral-8x7b-instruct-v0.1.Q5_K_M.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. ``` git clone https://github.com/ggerganov/llama.cpp && cd llama.cpp && make && ./main -m mixtral-8x7b-instruct-v0.1.Q5_K_M.gguf -n 128 ```
{"language": ["fr", "it", "de", "es", "en"], "license": "apache-2.0", "tags": ["llama-cpp", "gguf-my-repo"], "inference": {"parameters": {"temperature": 0.5}}, "widget": [{"messages": [{"role": "user", "content": "What is your favorite condiment?"}]}]}
kat33/Mixtral-8x7B-Instruct-v0.1-Q5_K_M-GGUF
null
[ "gguf", "llama-cpp", "gguf-my-repo", "fr", "it", "de", "es", "en", "license:apache-2.0", "region:us" ]
null
2024-04-30T05:06:39+00:00
[]
[ "fr", "it", "de", "es", "en" ]
TAGS #gguf #llama-cpp #gguf-my-repo #fr #it #de #es #en #license-apache-2.0 #region-us
# kat33/Mixtral-8x7B-Instruct-v0.1-Q5_K_M-GGUF This model was converted to GGUF format from 'mistralai/Mixtral-8x7B-Instruct-v0.1' using URL via the URL's GGUF-my-repo space. Refer to the original model card for more details on the model. ## Use with URL Install URL through brew. Invoke the URL server or the CLI. CLI: Server: Note: You can also use this checkpoint directly through the usage steps listed in the URL repo as well.
[ "# kat33/Mixtral-8x7B-Instruct-v0.1-Q5_K_M-GGUF\nThis model was converted to GGUF format from 'mistralai/Mixtral-8x7B-Instruct-v0.1' using URL via the URL's GGUF-my-repo space.\nRefer to the original model card for more details on the model.", "## Use with URL\n\nInstall URL through brew.\n\n\nInvoke the URL server or the CLI.\n\nCLI:\n\n\n\nServer:\n\n\n\nNote: You can also use this checkpoint directly through the usage steps listed in the URL repo as well." ]
[ "TAGS\n#gguf #llama-cpp #gguf-my-repo #fr #it #de #es #en #license-apache-2.0 #region-us \n", "# kat33/Mixtral-8x7B-Instruct-v0.1-Q5_K_M-GGUF\nThis model was converted to GGUF format from 'mistralai/Mixtral-8x7B-Instruct-v0.1' using URL via the URL's GGUF-my-repo space.\nRefer to the original model card for more details on the model.", "## Use with URL\n\nInstall URL through brew.\n\n\nInvoke the URL server or the CLI.\n\nCLI:\n\n\n\nServer:\n\n\n\nNote: You can also use this checkpoint directly through the usage steps listed in the URL repo as well." ]
[ 42, 93, 52 ]
[ "TAGS\n#gguf #llama-cpp #gguf-my-repo #fr #it #de #es #en #license-apache-2.0 #region-us \n# kat33/Mixtral-8x7B-Instruct-v0.1-Q5_K_M-GGUF\nThis model was converted to GGUF format from 'mistralai/Mixtral-8x7B-Instruct-v0.1' using URL via the URL's GGUF-my-repo space.\nRefer to the original model card for more details on the model.## Use with URL\n\nInstall URL through brew.\n\n\nInvoke the URL server or the CLI.\n\nCLI:\n\n\n\nServer:\n\n\n\nNote: You can also use this checkpoint directly through the usage steps listed in the URL repo as well." ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
nem012/gemma2b-5e-4
null
[ "transformers", "safetensors", "gemma", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T05:06:39+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #gemma #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #gemma #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 43, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #gemma #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_EMP_H3K36me3-seqsight_32768_512_30M-L32_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_EMP_H3K36me3](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_EMP_H3K36me3) dataset. It achieves the following results on the evaluation set: - Loss: 0.4838 - F1 Score: 0.7803 - Accuracy: 0.7818 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.571 | 0.92 | 200 | 0.5760 | 0.7054 | 0.7147 | | 0.5202 | 1.83 | 400 | 0.5334 | 0.7451 | 0.75 | | 0.5045 | 2.75 | 600 | 0.5172 | 0.7485 | 0.7537 | | 0.5033 | 3.67 | 800 | 0.5092 | 0.7580 | 0.7632 | | 0.4865 | 4.59 | 1000 | 0.5145 | 0.7618 | 0.7666 | | 0.4787 | 5.5 | 1200 | 0.5214 | 0.7513 | 0.7583 | | 0.4804 | 6.42 | 1400 | 0.4940 | 0.7710 | 0.7735 | | 0.4761 | 7.34 | 1600 | 0.5137 | 0.7511 | 0.7572 | | 0.4651 | 8.26 | 1800 | 0.5023 | 0.7699 | 0.7738 | | 0.4688 | 9.17 | 2000 | 0.4943 | 0.7714 | 0.7744 | | 0.4621 | 10.09 | 2200 | 0.5437 | 0.7308 | 0.7414 | | 0.456 | 11.01 | 2400 | 0.5028 | 0.7679 | 0.7726 | | 0.4532 | 11.93 | 2600 | 0.4787 | 0.7829 | 0.7841 | | 0.4509 | 12.84 | 2800 | 0.5018 | 0.7623 | 0.7675 | | 0.4451 | 13.76 | 3000 | 0.5289 | 0.7509 | 0.7577 | | 0.4402 | 14.68 | 3200 | 0.5048 | 0.7705 | 0.7741 | | 0.4378 | 15.6 | 3400 | 0.5000 | 0.7655 | 0.7698 | | 0.4362 | 16.51 | 3600 | 0.5287 | 0.7605 | 0.7666 | | 0.4311 | 17.43 | 3800 | 0.5043 | 0.7695 | 0.7738 | | 0.4271 | 18.35 | 4000 | 0.4998 | 0.7768 | 0.7795 | | 0.4215 | 19.27 | 4200 | 0.5211 | 0.7695 | 0.7732 | | 0.4223 | 20.18 | 4400 | 0.5250 | 0.7652 | 0.7701 | | 0.4188 | 21.1 | 4600 | 0.5111 | 0.7721 | 0.7755 | | 0.4153 | 22.02 | 4800 | 0.5158 | 0.7679 | 0.7721 | | 0.4104 | 22.94 | 5000 | 0.4992 | 0.7760 | 0.7795 | | 0.4093 | 23.85 | 5200 | 0.5228 | 0.7636 | 0.7689 | | 0.4045 | 24.77 | 5400 | 0.5328 | 0.7631 | 0.7686 | | 0.4035 | 25.69 | 5600 | 0.5158 | 0.7661 | 0.7706 | | 0.4023 | 26.61 | 5800 | 0.5064 | 0.7756 | 0.7790 | | 0.3969 | 27.52 | 6000 | 0.5336 | 0.7713 | 0.7749 | | 0.3996 | 28.44 | 6200 | 0.5127 | 0.7704 | 0.7744 | | 0.3915 | 29.36 | 6400 | 0.5227 | 0.7748 | 0.7781 | | 0.3928 | 30.28 | 6600 | 0.5253 | 0.7643 | 0.7695 | | 0.3893 | 31.19 | 6800 | 0.5147 | 0.7760 | 0.7787 | | 0.3909 | 32.11 | 7000 | 0.5174 | 0.7704 | 0.7741 | | 0.3867 | 33.03 | 7200 | 0.5111 | 0.7736 | 0.7767 | | 0.3854 | 33.94 | 7400 | 0.5197 | 0.7722 | 0.7755 | | 0.3835 | 34.86 | 7600 | 0.5173 | 0.7700 | 0.7735 | | 0.3819 | 35.78 | 7800 | 0.5197 | 0.7776 | 0.7804 | | 0.3835 | 36.7 | 8000 | 0.5246 | 0.7671 | 0.7712 | | 0.3813 | 37.61 | 8200 | 0.5301 | 0.7645 | 0.7689 | | 0.3779 | 38.53 | 8400 | 0.5271 | 0.7664 | 0.7704 | | 0.3723 | 39.45 | 8600 | 0.5305 | 0.7681 | 0.7718 | | 0.3735 | 40.37 | 8800 | 0.5402 | 0.7706 | 0.7747 | | 0.378 | 41.28 | 9000 | 0.5258 | 0.7689 | 0.7726 | | 0.3748 | 42.2 | 9200 | 0.5230 | 0.7712 | 0.7744 | | 0.3733 | 43.12 | 9400 | 0.5247 | 0.7751 | 0.7781 | | 0.3757 | 44.04 | 9600 | 0.5240 | 0.7691 | 0.7729 | | 0.3722 | 44.95 | 9800 | 0.5293 | 0.7686 | 0.7726 | | 0.3723 | 45.87 | 10000 | 0.5280 | 0.7694 | 0.7732 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_EMP_H3K36me3-seqsight_32768_512_30M-L32_f", "results": []}]}
mahdibaghbanzadeh/GUE_EMP_H3K36me3-seqsight_32768_512_30M-L32_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T05:06:40+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_EMP\_H3K36me3-seqsight\_32768\_512\_30M-L32\_f =================================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_EMP\_H3K36me3 dataset. It achieves the following results on the evaluation set: * Loss: 0.4838 * F1 Score: 0.7803 * Accuracy: 0.7818 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_mouse_0-seqsight_32768_512_30M-L1_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_mouse_0](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_mouse_0) dataset. It achieves the following results on the evaluation set: - Loss: 0.5765 - F1 Score: 0.6868 - Accuracy: 0.6877 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:------:|:-----:|:---------------:|:--------:|:--------:| | 0.6479 | 3.92 | 200 | 0.6066 | 0.6414 | 0.6432 | | 0.6173 | 7.84 | 400 | 0.5941 | 0.6698 | 0.6704 | | 0.6016 | 11.76 | 600 | 0.5771 | 0.6926 | 0.6926 | | 0.5876 | 15.69 | 800 | 0.5666 | 0.6956 | 0.6963 | | 0.5776 | 19.61 | 1000 | 0.5552 | 0.7010 | 0.7012 | | 0.5672 | 23.53 | 1200 | 0.5506 | 0.7167 | 0.7185 | | 0.56 | 27.45 | 1400 | 0.5429 | 0.7197 | 0.7198 | | 0.5522 | 31.37 | 1600 | 0.5375 | 0.7228 | 0.7235 | | 0.5444 | 35.29 | 1800 | 0.5356 | 0.7241 | 0.7259 | | 0.5406 | 39.22 | 2000 | 0.5339 | 0.7290 | 0.7296 | | 0.5339 | 43.14 | 2200 | 0.5323 | 0.7206 | 0.7222 | | 0.5338 | 47.06 | 2400 | 0.5325 | 0.7228 | 0.7247 | | 0.528 | 50.98 | 2600 | 0.5318 | 0.7293 | 0.7296 | | 0.5236 | 54.9 | 2800 | 0.5356 | 0.7331 | 0.7358 | | 0.5199 | 58.82 | 3000 | 0.5315 | 0.7312 | 0.7333 | | 0.5193 | 62.75 | 3200 | 0.5267 | 0.7349 | 0.7358 | | 0.5141 | 66.67 | 3400 | 0.5300 | 0.7371 | 0.7383 | | 0.5126 | 70.59 | 3600 | 0.5261 | 0.7343 | 0.7346 | | 0.5119 | 74.51 | 3800 | 0.5264 | 0.7319 | 0.7321 | | 0.5091 | 78.43 | 4000 | 0.5280 | 0.7403 | 0.7407 | | 0.5108 | 82.35 | 4200 | 0.5294 | 0.7356 | 0.7383 | | 0.506 | 86.27 | 4400 | 0.5299 | 0.7292 | 0.7296 | | 0.5049 | 90.2 | 4600 | 0.5256 | 0.7337 | 0.7346 | | 0.5042 | 94.12 | 4800 | 0.5276 | 0.7307 | 0.7309 | | 0.4996 | 98.04 | 5000 | 0.5254 | 0.7346 | 0.7358 | | 0.4986 | 101.96 | 5200 | 0.5294 | 0.7278 | 0.7284 | | 0.4976 | 105.88 | 5400 | 0.5283 | 0.7286 | 0.7309 | | 0.4947 | 109.8 | 5600 | 0.5293 | 0.7332 | 0.7346 | | 0.4926 | 113.73 | 5800 | 0.5260 | 0.7306 | 0.7321 | | 0.4923 | 117.65 | 6000 | 0.5305 | 0.7283 | 0.7296 | | 0.494 | 121.57 | 6200 | 0.5263 | 0.7325 | 0.7333 | | 0.4913 | 125.49 | 6400 | 0.5282 | 0.7264 | 0.7272 | | 0.4866 | 129.41 | 6600 | 0.5294 | 0.7313 | 0.7321 | | 0.4904 | 133.33 | 6800 | 0.5273 | 0.7279 | 0.7296 | | 0.488 | 137.25 | 7000 | 0.5254 | 0.7350 | 0.7358 | | 0.4892 | 141.18 | 7200 | 0.5275 | 0.7313 | 0.7321 | | 0.485 | 145.1 | 7400 | 0.5294 | 0.7287 | 0.7296 | | 0.4882 | 149.02 | 7600 | 0.5275 | 0.7245 | 0.7259 | | 0.4864 | 152.94 | 7800 | 0.5265 | 0.7375 | 0.7383 | | 0.4821 | 156.86 | 8000 | 0.5283 | 0.7241 | 0.7259 | | 0.4798 | 160.78 | 8200 | 0.5284 | 0.7302 | 0.7309 | | 0.4845 | 164.71 | 8400 | 0.5267 | 0.7324 | 0.7333 | | 0.4827 | 168.63 | 8600 | 0.5283 | 0.7294 | 0.7309 | | 0.4828 | 172.55 | 8800 | 0.5275 | 0.7321 | 0.7333 | | 0.4818 | 176.47 | 9000 | 0.5282 | 0.7295 | 0.7309 | | 0.4785 | 180.39 | 9200 | 0.5288 | 0.7297 | 0.7309 | | 0.4764 | 184.31 | 9400 | 0.5292 | 0.7327 | 0.7333 | | 0.4793 | 188.24 | 9600 | 0.5294 | 0.7313 | 0.7321 | | 0.4806 | 192.16 | 9800 | 0.5290 | 0.7312 | 0.7321 | | 0.4817 | 196.08 | 10000 | 0.5288 | 0.7273 | 0.7284 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_mouse_0-seqsight_32768_512_30M-L1_f", "results": []}]}
mahdibaghbanzadeh/GUE_mouse_0-seqsight_32768_512_30M-L1_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T05:07:35+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_mouse\_0-seqsight\_32768\_512\_30M-L1\_f ============================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_mouse\_0 dataset. It achieves the following results on the evaluation set: * Loss: 0.5765 * F1 Score: 0.6868 * Accuracy: 0.6877 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_mouse_0-seqsight_32768_512_30M-L8_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_mouse_0](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_mouse_0) dataset. It achieves the following results on the evaluation set: - Loss: 0.5609 - F1 Score: 0.7098 - Accuracy: 0.7099 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:------:|:-----:|:---------------:|:--------:|:--------:| | 0.6353 | 3.92 | 200 | 0.5892 | 0.6523 | 0.6543 | | 0.583 | 7.84 | 400 | 0.5656 | 0.6987 | 0.6988 | | 0.558 | 11.76 | 600 | 0.5393 | 0.7218 | 0.7222 | | 0.5407 | 15.69 | 800 | 0.5403 | 0.7185 | 0.7210 | | 0.5307 | 19.61 | 1000 | 0.5336 | 0.7219 | 0.7222 | | 0.5206 | 23.53 | 1200 | 0.5447 | 0.7012 | 0.7074 | | 0.5081 | 27.45 | 1400 | 0.5394 | 0.7142 | 0.7173 | | 0.5019 | 31.37 | 1600 | 0.5330 | 0.7291 | 0.7296 | | 0.4951 | 35.29 | 1800 | 0.5298 | 0.7243 | 0.7259 | | 0.4895 | 39.22 | 2000 | 0.5369 | 0.7170 | 0.7198 | | 0.4804 | 43.14 | 2200 | 0.5413 | 0.7152 | 0.7185 | | 0.4776 | 47.06 | 2400 | 0.5462 | 0.7139 | 0.7173 | | 0.4706 | 50.98 | 2600 | 0.5445 | 0.7333 | 0.7333 | | 0.462 | 54.9 | 2800 | 0.5533 | 0.7123 | 0.7173 | | 0.4559 | 58.82 | 3000 | 0.5399 | 0.7168 | 0.7185 | | 0.4542 | 62.75 | 3200 | 0.5446 | 0.7137 | 0.7160 | | 0.4443 | 66.67 | 3400 | 0.5614 | 0.7130 | 0.7173 | | 0.4379 | 70.59 | 3600 | 0.5497 | 0.7307 | 0.7321 | | 0.4367 | 74.51 | 3800 | 0.5571 | 0.7227 | 0.7247 | | 0.4248 | 78.43 | 4000 | 0.5682 | 0.7210 | 0.7235 | | 0.4257 | 82.35 | 4200 | 0.5716 | 0.7194 | 0.7235 | | 0.4187 | 86.27 | 4400 | 0.5754 | 0.7237 | 0.7259 | | 0.4149 | 90.2 | 4600 | 0.5762 | 0.7227 | 0.7247 | | 0.412 | 94.12 | 4800 | 0.5715 | 0.7217 | 0.7222 | | 0.4051 | 98.04 | 5000 | 0.5833 | 0.7243 | 0.7272 | | 0.3991 | 101.96 | 5200 | 0.5844 | 0.7153 | 0.7160 | | 0.3969 | 105.88 | 5400 | 0.5944 | 0.7205 | 0.7210 | | 0.3875 | 109.8 | 5600 | 0.6011 | 0.7119 | 0.7123 | | 0.3844 | 113.73 | 5800 | 0.5952 | 0.7215 | 0.7222 | | 0.3786 | 117.65 | 6000 | 0.6058 | 0.7235 | 0.7247 | | 0.3808 | 121.57 | 6200 | 0.6104 | 0.7333 | 0.7333 | | 0.3728 | 125.49 | 6400 | 0.6175 | 0.7220 | 0.7222 | | 0.3723 | 129.41 | 6600 | 0.6208 | 0.7267 | 0.7272 | | 0.3709 | 133.33 | 6800 | 0.6202 | 0.7165 | 0.7173 | | 0.3687 | 137.25 | 7000 | 0.6164 | 0.7244 | 0.7247 | | 0.368 | 141.18 | 7200 | 0.6249 | 0.7148 | 0.7148 | | 0.3624 | 145.1 | 7400 | 0.6309 | 0.7154 | 0.7160 | | 0.3635 | 149.02 | 7600 | 0.6218 | 0.7180 | 0.7185 | | 0.3623 | 152.94 | 7800 | 0.6246 | 0.7256 | 0.7259 | | 0.3544 | 156.86 | 8000 | 0.6370 | 0.7248 | 0.7259 | | 0.3487 | 160.78 | 8200 | 0.6394 | 0.7228 | 0.7235 | | 0.3552 | 164.71 | 8400 | 0.6353 | 0.7154 | 0.7160 | | 0.3547 | 168.63 | 8600 | 0.6390 | 0.7227 | 0.7235 | | 0.3545 | 172.55 | 8800 | 0.6415 | 0.7168 | 0.7173 | | 0.3522 | 176.47 | 9000 | 0.6398 | 0.7240 | 0.7247 | | 0.35 | 180.39 | 9200 | 0.6430 | 0.7203 | 0.7210 | | 0.3441 | 184.31 | 9400 | 0.6457 | 0.7168 | 0.7173 | | 0.3494 | 188.24 | 9600 | 0.6432 | 0.7206 | 0.7210 | | 0.3433 | 192.16 | 9800 | 0.6458 | 0.7231 | 0.7235 | | 0.3464 | 196.08 | 10000 | 0.6456 | 0.7206 | 0.7210 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_mouse_0-seqsight_32768_512_30M-L8_f", "results": []}]}
mahdibaghbanzadeh/GUE_mouse_0-seqsight_32768_512_30M-L8_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T05:08:03+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_mouse\_0-seqsight\_32768\_512\_30M-L8\_f ============================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_mouse\_0 dataset. It achieves the following results on the evaluation set: * Loss: 0.5609 * F1 Score: 0.7098 * Accuracy: 0.7099 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # base-nsp-10000 This model is a fine-tuned version of [mhr2004/plm-nsp-10000](https://huggingface.co/mhr2004/plm-nsp-10000) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8886 - Accuracy: 0.4717 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.9837 | 1.0 | 183 | 0.8747 | 0.4703 | | 0.9294 | 2.0 | 366 | 0.8611 | 0.4577 | | 0.8769 | 3.0 | 549 | 0.8751 | 0.4730 | | 0.8351 | 4.0 | 732 | 0.8768 | 0.5054 | | 0.8143 | 5.0 | 915 | 0.8789 | 0.4973 | | 0.7892 | 6.0 | 1098 | 0.8924 | 0.4802 | | 0.7748 | 7.0 | 1281 | 0.8990 | 0.5045 | ### Framework versions - Transformers 4.40.1 - Pytorch 2.3.0+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mhr2004/plm-nsp-10000", "model-index": [{"name": "base-nsp-10000", "results": []}]}
mhr2004/base-nsp-10000
null
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:mhr2004/plm-nsp-10000", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T05:12:57+00:00
[]
[]
TAGS #transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-mhr2004/plm-nsp-10000 #license-mit #autotrain_compatible #endpoints_compatible #region-us
base-nsp-10000 ============== This model is a fine-tuned version of mhr2004/plm-nsp-10000 on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.8886 * Accuracy: 0.4717 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 1e-05 * train\_batch\_size: 32 * eval\_batch\_size: 32 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 20 ### Training results ### Framework versions * Transformers 4.40.1 * Pytorch 2.3.0+cu121 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 32\n* eval\\_batch\\_size: 32\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 20", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-mhr2004/plm-nsp-10000 #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 32\n* eval\\_batch\\_size: 32\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 20", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ 55, 101, 5, 44 ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-mhr2004/plm-nsp-10000 #license-mit #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 32\n* eval\\_batch\\_size: 32\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 20### Training results### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_mouse_0-seqsight_32768_512_30M-L32_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_mouse_0](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_mouse_0) dataset. It achieves the following results on the evaluation set: - Loss: 0.6749 - F1 Score: 0.7122 - Accuracy: 0.7123 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:------:|:-----:|:---------------:|:--------:|:--------:| | 0.6244 | 3.92 | 200 | 0.5700 | 0.6788 | 0.6790 | | 0.563 | 7.84 | 400 | 0.5460 | 0.7222 | 0.7222 | | 0.5365 | 11.76 | 600 | 0.5328 | 0.7170 | 0.7173 | | 0.5146 | 15.69 | 800 | 0.5698 | 0.6989 | 0.7086 | | 0.5016 | 19.61 | 1000 | 0.5394 | 0.7233 | 0.7235 | | 0.4801 | 23.53 | 1200 | 0.5566 | 0.7210 | 0.7259 | | 0.4552 | 27.45 | 1400 | 0.5603 | 0.7203 | 0.7210 | | 0.4412 | 31.37 | 1600 | 0.5854 | 0.7040 | 0.7049 | | 0.4162 | 35.29 | 1800 | 0.5665 | 0.7247 | 0.7247 | | 0.399 | 39.22 | 2000 | 0.6213 | 0.7269 | 0.7272 | | 0.381 | 43.14 | 2200 | 0.6344 | 0.7151 | 0.7173 | | 0.3663 | 47.06 | 2400 | 0.6525 | 0.7122 | 0.7136 | | 0.3502 | 50.98 | 2600 | 0.7011 | 0.7160 | 0.7160 | | 0.3313 | 54.9 | 2800 | 0.6827 | 0.7233 | 0.7247 | | 0.3137 | 58.82 | 3000 | 0.7170 | 0.7272 | 0.7272 | | 0.2977 | 62.75 | 3200 | 0.7398 | 0.7164 | 0.7173 | | 0.2858 | 66.67 | 3400 | 0.7814 | 0.7197 | 0.7198 | | 0.2755 | 70.59 | 3600 | 0.7821 | 0.7182 | 0.7185 | | 0.2664 | 74.51 | 3800 | 0.7907 | 0.7262 | 0.7272 | | 0.2531 | 78.43 | 4000 | 0.8137 | 0.7269 | 0.7272 | | 0.2425 | 82.35 | 4200 | 0.8567 | 0.7215 | 0.7222 | | 0.2351 | 86.27 | 4400 | 0.8622 | 0.7077 | 0.7086 | | 0.2275 | 90.2 | 4600 | 0.8658 | 0.7171 | 0.7173 | | 0.224 | 94.12 | 4800 | 0.8683 | 0.7222 | 0.7222 | | 0.2129 | 98.04 | 5000 | 0.8735 | 0.7171 | 0.7173 | | 0.2064 | 101.96 | 5200 | 0.9311 | 0.7124 | 0.7123 | | 0.2013 | 105.88 | 5400 | 0.9293 | 0.7111 | 0.7111 | | 0.1898 | 109.8 | 5600 | 0.9651 | 0.7143 | 0.7148 | | 0.1863 | 113.73 | 5800 | 0.9792 | 0.7112 | 0.7111 | | 0.1783 | 117.65 | 6000 | 1.0218 | 0.7109 | 0.7111 | | 0.181 | 121.57 | 6200 | 0.9718 | 0.7222 | 0.7222 | | 0.1697 | 125.49 | 6400 | 1.0287 | 0.7134 | 0.7136 | | 0.1684 | 129.41 | 6600 | 1.0325 | 0.7098 | 0.7099 | | 0.1627 | 133.33 | 6800 | 1.0745 | 0.7087 | 0.7086 | | 0.1595 | 137.25 | 7000 | 1.0632 | 0.7136 | 0.7136 | | 0.1612 | 141.18 | 7200 | 1.0438 | 0.7111 | 0.7111 | | 0.1522 | 145.1 | 7400 | 1.0972 | 0.7111 | 0.7111 | | 0.1527 | 149.02 | 7600 | 1.0931 | 0.7111 | 0.7111 | | 0.1503 | 152.94 | 7800 | 1.0939 | 0.7183 | 0.7185 | | 0.1469 | 156.86 | 8000 | 1.0958 | 0.7098 | 0.7099 | | 0.1403 | 160.78 | 8200 | 1.1147 | 0.7136 | 0.7136 | | 0.1424 | 164.71 | 8400 | 1.0993 | 0.7173 | 0.7173 | | 0.1423 | 168.63 | 8600 | 1.0955 | 0.7184 | 0.7185 | | 0.1431 | 172.55 | 8800 | 1.1052 | 0.7111 | 0.7111 | | 0.139 | 176.47 | 9000 | 1.1101 | 0.7158 | 0.7160 | | 0.1372 | 180.39 | 9200 | 1.1276 | 0.7185 | 0.7185 | | 0.1297 | 184.31 | 9400 | 1.1570 | 0.7111 | 0.7111 | | 0.1336 | 188.24 | 9600 | 1.1470 | 0.7074 | 0.7074 | | 0.1309 | 192.16 | 9800 | 1.1467 | 0.7086 | 0.7086 | | 0.1341 | 196.08 | 10000 | 1.1440 | 0.7099 | 0.7099 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_mouse_0-seqsight_32768_512_30M-L32_f", "results": []}]}
mahdibaghbanzadeh/GUE_mouse_0-seqsight_32768_512_30M-L32_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T05:13:25+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_mouse\_0-seqsight\_32768\_512\_30M-L32\_f ============================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_mouse\_0 dataset. It achieves the following results on the evaluation set: * Loss: 0.6749 * F1 Score: 0.7122 * Accuracy: 0.7123 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_mouse_1-seqsight_32768_512_30M-L1_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_mouse_1](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_mouse_1) dataset. It achieves the following results on the evaluation set: - Loss: 0.2628 - F1 Score: 0.8828 - Accuracy: 0.8829 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.4929 | 0.47 | 200 | 0.4098 | 0.8100 | 0.8102 | | 0.4244 | 0.95 | 400 | 0.3823 | 0.8225 | 0.8227 | | 0.3942 | 1.42 | 600 | 0.3638 | 0.8367 | 0.8368 | | 0.3856 | 1.9 | 800 | 0.3375 | 0.8466 | 0.8466 | | 0.3598 | 2.37 | 1000 | 0.3226 | 0.8568 | 0.8568 | | 0.3466 | 2.84 | 1200 | 0.3131 | 0.8581 | 0.8581 | | 0.3314 | 3.32 | 1400 | 0.3044 | 0.8629 | 0.8629 | | 0.3337 | 3.79 | 1600 | 0.2987 | 0.8688 | 0.8688 | | 0.3266 | 4.27 | 1800 | 0.2887 | 0.8721 | 0.8722 | | 0.3153 | 4.74 | 2000 | 0.2944 | 0.8709 | 0.8709 | | 0.3181 | 5.21 | 2200 | 0.2831 | 0.8725 | 0.8726 | | 0.3121 | 5.69 | 2400 | 0.2850 | 0.8737 | 0.8737 | | 0.3115 | 6.16 | 2600 | 0.2763 | 0.8756 | 0.8758 | | 0.306 | 6.64 | 2800 | 0.2762 | 0.8767 | 0.8768 | | 0.3067 | 7.11 | 3000 | 0.2758 | 0.8790 | 0.8790 | | 0.3003 | 7.58 | 3200 | 0.2737 | 0.8802 | 0.8802 | | 0.2981 | 8.06 | 3400 | 0.2690 | 0.8814 | 0.8815 | | 0.2912 | 8.53 | 3600 | 0.2641 | 0.8864 | 0.8864 | | 0.2939 | 9.0 | 3800 | 0.2661 | 0.8816 | 0.8817 | | 0.2892 | 9.48 | 4000 | 0.2657 | 0.8832 | 0.8835 | | 0.29 | 9.95 | 4200 | 0.2600 | 0.8856 | 0.8857 | | 0.289 | 10.43 | 4400 | 0.2622 | 0.8827 | 0.8827 | | 0.2852 | 10.9 | 4600 | 0.2616 | 0.8842 | 0.8842 | | 0.2791 | 11.37 | 4800 | 0.2621 | 0.8842 | 0.8842 | | 0.2887 | 11.85 | 5000 | 0.2598 | 0.8853 | 0.8854 | | 0.2822 | 12.32 | 5200 | 0.2615 | 0.8834 | 0.8835 | | 0.2821 | 12.8 | 5400 | 0.2576 | 0.8853 | 0.8854 | | 0.2833 | 13.27 | 5600 | 0.2587 | 0.8873 | 0.8875 | | 0.2761 | 13.74 | 5800 | 0.2584 | 0.8875 | 0.8876 | | 0.2806 | 14.22 | 6000 | 0.2575 | 0.8866 | 0.8867 | | 0.2794 | 14.69 | 6200 | 0.2572 | 0.8868 | 0.8869 | | 0.2799 | 15.17 | 6400 | 0.2577 | 0.8868 | 0.8869 | | 0.2812 | 15.64 | 6600 | 0.2563 | 0.8874 | 0.8875 | | 0.2775 | 16.11 | 6800 | 0.2547 | 0.8878 | 0.8879 | | 0.2746 | 16.59 | 7000 | 0.2556 | 0.8882 | 0.8884 | | 0.2814 | 17.06 | 7200 | 0.2551 | 0.8879 | 0.8879 | | 0.2776 | 17.54 | 7400 | 0.2561 | 0.8880 | 0.8881 | | 0.2745 | 18.01 | 7600 | 0.2548 | 0.8887 | 0.8888 | | 0.272 | 18.48 | 7800 | 0.2543 | 0.8882 | 0.8882 | | 0.2772 | 18.96 | 8000 | 0.2539 | 0.8883 | 0.8884 | | 0.2739 | 19.43 | 8200 | 0.2534 | 0.8884 | 0.8885 | | 0.2746 | 19.91 | 8400 | 0.2543 | 0.8881 | 0.8882 | | 0.2777 | 20.38 | 8600 | 0.2532 | 0.8895 | 0.8895 | | 0.2728 | 20.85 | 8800 | 0.2546 | 0.8885 | 0.8887 | | 0.2741 | 21.33 | 9000 | 0.2532 | 0.8892 | 0.8893 | | 0.2757 | 21.8 | 9200 | 0.2537 | 0.8887 | 0.8888 | | 0.2738 | 22.27 | 9400 | 0.2527 | 0.8896 | 0.8897 | | 0.2741 | 22.75 | 9600 | 0.2541 | 0.8892 | 0.8893 | | 0.2745 | 23.22 | 9800 | 0.2536 | 0.8892 | 0.8893 | | 0.2778 | 23.7 | 10000 | 0.2533 | 0.8893 | 0.8894 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_mouse_1-seqsight_32768_512_30M-L1_f", "results": []}]}
mahdibaghbanzadeh/GUE_mouse_1-seqsight_32768_512_30M-L1_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T05:13:33+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_mouse\_1-seqsight\_32768\_512\_30M-L1\_f ============================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_mouse\_1 dataset. It achieves the following results on the evaluation set: * Loss: 0.2628 * F1 Score: 0.8828 * Accuracy: 0.8829 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float16 The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float16 ### Framework versions - PEFT 0.4.0 - PEFT 0.4.0
{"library_name": "peft"}
TrinhDacPhu/questionansweringllma2
null
[ "peft", "safetensors", "region:us" ]
null
2024-04-30T05:13:54+00:00
[]
[]
TAGS #peft #safetensors #region-us
## Training procedure The following 'bitsandbytes' quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float16 The following 'bitsandbytes' quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float16 ### Framework versions - PEFT 0.4.0 - PEFT 0.4.0
[ "## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: False\n- bnb_4bit_compute_dtype: float16\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: False\n- bnb_4bit_compute_dtype: float16", "### Framework versions\n\n- PEFT 0.4.0\n\n- PEFT 0.4.0" ]
[ "TAGS\n#peft #safetensors #region-us \n", "## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: False\n- bnb_4bit_compute_dtype: float16\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: False\n- bnb_4bit_compute_dtype: float16", "### Framework versions\n\n- PEFT 0.4.0\n\n- PEFT 0.4.0" ]
[ 12, 304, 21 ]
[ "TAGS\n#peft #safetensors #region-us \n## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: False\n- bnb_4bit_compute_dtype: float16\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: False\n- bnb_4bit_compute_dtype: float16### Framework versions\n\n- PEFT 0.4.0\n\n- PEFT 0.4.0" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_mouse_1-seqsight_32768_512_30M-L8_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_mouse_1](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_mouse_1) dataset. It achieves the following results on the evaluation set: - Loss: 0.2485 - F1 Score: 0.8920 - Accuracy: 0.8921 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.4676 | 0.47 | 200 | 0.3835 | 0.8227 | 0.8228 | | 0.3848 | 0.95 | 400 | 0.3300 | 0.8503 | 0.8504 | | 0.3357 | 1.42 | 600 | 0.2980 | 0.8666 | 0.8666 | | 0.3307 | 1.9 | 800 | 0.2825 | 0.8761 | 0.8762 | | 0.3089 | 2.37 | 1000 | 0.2778 | 0.8751 | 0.8752 | | 0.3024 | 2.84 | 1200 | 0.2740 | 0.8777 | 0.8777 | | 0.289 | 3.32 | 1400 | 0.2686 | 0.8817 | 0.8817 | | 0.2964 | 3.79 | 1600 | 0.2657 | 0.8814 | 0.8814 | | 0.2902 | 4.27 | 1800 | 0.2627 | 0.8830 | 0.8832 | | 0.2826 | 4.74 | 2000 | 0.2790 | 0.8784 | 0.8784 | | 0.2859 | 5.21 | 2200 | 0.2582 | 0.8844 | 0.8847 | | 0.2822 | 5.69 | 2400 | 0.2628 | 0.8864 | 0.8864 | | 0.2788 | 6.16 | 2600 | 0.2556 | 0.8854 | 0.8855 | | 0.2762 | 6.64 | 2800 | 0.2551 | 0.8858 | 0.8860 | | 0.2776 | 7.11 | 3000 | 0.2556 | 0.8904 | 0.8904 | | 0.2697 | 7.58 | 3200 | 0.2593 | 0.8888 | 0.8888 | | 0.2723 | 8.06 | 3400 | 0.2497 | 0.8900 | 0.8901 | | 0.2654 | 8.53 | 3600 | 0.2549 | 0.8904 | 0.8904 | | 0.268 | 9.0 | 3800 | 0.2510 | 0.8921 | 0.8922 | | 0.2636 | 9.48 | 4000 | 0.2467 | 0.8927 | 0.8928 | | 0.2655 | 9.95 | 4200 | 0.2451 | 0.8931 | 0.8931 | | 0.2616 | 10.43 | 4400 | 0.2482 | 0.8931 | 0.8931 | | 0.2588 | 10.9 | 4600 | 0.2479 | 0.8918 | 0.8918 | | 0.2531 | 11.37 | 4800 | 0.2512 | 0.8909 | 0.8909 | | 0.2637 | 11.85 | 5000 | 0.2420 | 0.8956 | 0.8956 | | 0.2554 | 12.32 | 5200 | 0.2506 | 0.8900 | 0.8900 | | 0.2562 | 12.8 | 5400 | 0.2474 | 0.8931 | 0.8931 | | 0.2555 | 13.27 | 5600 | 0.2414 | 0.8957 | 0.8958 | | 0.2487 | 13.74 | 5800 | 0.2420 | 0.8966 | 0.8967 | | 0.2514 | 14.22 | 6000 | 0.2462 | 0.8922 | 0.8922 | | 0.2497 | 14.69 | 6200 | 0.2428 | 0.8959 | 0.8959 | | 0.2504 | 15.17 | 6400 | 0.2469 | 0.8937 | 0.8937 | | 0.2539 | 15.64 | 6600 | 0.2395 | 0.8955 | 0.8955 | | 0.2479 | 16.11 | 6800 | 0.2391 | 0.8962 | 0.8962 | | 0.2459 | 16.59 | 7000 | 0.2405 | 0.8965 | 0.8965 | | 0.2524 | 17.06 | 7200 | 0.2410 | 0.8959 | 0.8959 | | 0.2484 | 17.54 | 7400 | 0.2412 | 0.8946 | 0.8946 | | 0.2456 | 18.01 | 7600 | 0.2388 | 0.8980 | 0.8980 | | 0.2426 | 18.48 | 7800 | 0.2409 | 0.8943 | 0.8943 | | 0.2496 | 18.96 | 8000 | 0.2377 | 0.8981 | 0.8981 | | 0.2465 | 19.43 | 8200 | 0.2369 | 0.9000 | 0.9001 | | 0.2442 | 19.91 | 8400 | 0.2388 | 0.8972 | 0.8973 | | 0.2485 | 20.38 | 8600 | 0.2379 | 0.8978 | 0.8979 | | 0.244 | 20.85 | 8800 | 0.2385 | 0.8972 | 0.8973 | | 0.2423 | 21.33 | 9000 | 0.2385 | 0.8974 | 0.8974 | | 0.2457 | 21.8 | 9200 | 0.2393 | 0.8977 | 0.8977 | | 0.2469 | 22.27 | 9400 | 0.2375 | 0.8990 | 0.8990 | | 0.2448 | 22.75 | 9600 | 0.2383 | 0.8975 | 0.8976 | | 0.2455 | 23.22 | 9800 | 0.2384 | 0.8965 | 0.8965 | | 0.2447 | 23.7 | 10000 | 0.2383 | 0.8981 | 0.8981 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_mouse_1-seqsight_32768_512_30M-L8_f", "results": []}]}
mahdibaghbanzadeh/GUE_mouse_1-seqsight_32768_512_30M-L8_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T05:14:11+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_mouse\_1-seqsight\_32768\_512\_30M-L8\_f ============================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_mouse\_1 dataset. It achieves the following results on the evaluation set: * Loss: 0.2485 * F1 Score: 0.8920 * Accuracy: 0.8921 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
shallow6414/tfj29zx
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T05:14:26+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 47, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_mouse_1-seqsight_32768_512_30M-L32_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_mouse_1](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_mouse_1) dataset. It achieves the following results on the evaluation set: - Loss: 0.2432 - F1 Score: 0.8950 - Accuracy: 0.8950 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.4512 | 0.47 | 200 | 0.3512 | 0.8425 | 0.8426 | | 0.3475 | 0.95 | 400 | 0.3178 | 0.8543 | 0.8547 | | 0.3117 | 1.42 | 600 | 0.2755 | 0.8774 | 0.8774 | | 0.3101 | 1.9 | 800 | 0.2661 | 0.8858 | 0.8858 | | 0.2906 | 2.37 | 1000 | 0.2684 | 0.8836 | 0.8836 | | 0.2831 | 2.84 | 1200 | 0.2649 | 0.8858 | 0.8858 | | 0.2702 | 3.32 | 1400 | 0.2532 | 0.8889 | 0.8890 | | 0.2792 | 3.79 | 1600 | 0.2558 | 0.8879 | 0.8879 | | 0.2691 | 4.27 | 1800 | 0.2499 | 0.8908 | 0.8909 | | 0.263 | 4.74 | 2000 | 0.2596 | 0.8858 | 0.8858 | | 0.2652 | 5.21 | 2200 | 0.2482 | 0.8895 | 0.8898 | | 0.2599 | 5.69 | 2400 | 0.2485 | 0.8901 | 0.8901 | | 0.2555 | 6.16 | 2600 | 0.2426 | 0.8925 | 0.8927 | | 0.2534 | 6.64 | 2800 | 0.2435 | 0.8934 | 0.8936 | | 0.2524 | 7.11 | 3000 | 0.2431 | 0.8902 | 0.8903 | | 0.2464 | 7.58 | 3200 | 0.2451 | 0.8910 | 0.8910 | | 0.2499 | 8.06 | 3400 | 0.2393 | 0.8951 | 0.8953 | | 0.241 | 8.53 | 3600 | 0.2439 | 0.8913 | 0.8913 | | 0.2485 | 9.0 | 3800 | 0.2394 | 0.8960 | 0.8961 | | 0.241 | 9.48 | 4000 | 0.2356 | 0.8986 | 0.8987 | | 0.2434 | 9.95 | 4200 | 0.2344 | 0.8978 | 0.8979 | | 0.2373 | 10.43 | 4400 | 0.2411 | 0.8952 | 0.8952 | | 0.2377 | 10.9 | 4600 | 0.2386 | 0.8940 | 0.8940 | | 0.2321 | 11.37 | 4800 | 0.2413 | 0.8909 | 0.8909 | | 0.2429 | 11.85 | 5000 | 0.2348 | 0.8970 | 0.8971 | | 0.2335 | 12.32 | 5200 | 0.2434 | 0.8938 | 0.8938 | | 0.2335 | 12.8 | 5400 | 0.2434 | 0.8949 | 0.8949 | | 0.2318 | 13.27 | 5600 | 0.2352 | 0.8990 | 0.8990 | | 0.2261 | 13.74 | 5800 | 0.2349 | 0.8991 | 0.8992 | | 0.2302 | 14.22 | 6000 | 0.2425 | 0.8944 | 0.8944 | | 0.2285 | 14.69 | 6200 | 0.2361 | 0.8989 | 0.8989 | | 0.2288 | 15.17 | 6400 | 0.2388 | 0.8968 | 0.8968 | | 0.2304 | 15.64 | 6600 | 0.2334 | 0.8989 | 0.8989 | | 0.2264 | 16.11 | 6800 | 0.2324 | 0.8982 | 0.8983 | | 0.2231 | 16.59 | 7000 | 0.2364 | 0.8998 | 0.8998 | | 0.2298 | 17.06 | 7200 | 0.2343 | 0.8977 | 0.8977 | | 0.2245 | 17.54 | 7400 | 0.2352 | 0.8977 | 0.8977 | | 0.2236 | 18.01 | 7600 | 0.2308 | 0.9007 | 0.9007 | | 0.2199 | 18.48 | 7800 | 0.2349 | 0.8964 | 0.8964 | | 0.2262 | 18.96 | 8000 | 0.2323 | 0.8980 | 0.8980 | | 0.2227 | 19.43 | 8200 | 0.2314 | 0.8995 | 0.8995 | | 0.2199 | 19.91 | 8400 | 0.2328 | 0.8989 | 0.8989 | | 0.2237 | 20.38 | 8600 | 0.2324 | 0.8974 | 0.8974 | | 0.2218 | 20.85 | 8800 | 0.2303 | 0.8993 | 0.8993 | | 0.2186 | 21.33 | 9000 | 0.2319 | 0.8987 | 0.8987 | | 0.2195 | 21.8 | 9200 | 0.2346 | 0.8977 | 0.8977 | | 0.2223 | 22.27 | 9400 | 0.2314 | 0.8990 | 0.8990 | | 0.2177 | 22.75 | 9600 | 0.2315 | 0.8995 | 0.8995 | | 0.2196 | 23.22 | 9800 | 0.2324 | 0.8981 | 0.8981 | | 0.2214 | 23.7 | 10000 | 0.2321 | 0.8978 | 0.8979 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_mouse_1-seqsight_32768_512_30M-L32_f", "results": []}]}
mahdibaghbanzadeh/GUE_mouse_1-seqsight_32768_512_30M-L32_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T05:14:40+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_mouse\_1-seqsight\_32768\_512\_30M-L32\_f ============================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_mouse\_1 dataset. It achieves the following results on the evaluation set: * Loss: 0.2432 * F1 Score: 0.8950 * Accuracy: 0.8950 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_mouse_4-seqsight_32768_512_30M-L1_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_mouse_4](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_mouse_4) dataset. It achieves the following results on the evaluation set: - Loss: 0.6056 - F1 Score: 0.6643 - Accuracy: 0.6644 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6553 | 1.69 | 200 | 0.6306 | 0.6243 | 0.6267 | | 0.6339 | 3.39 | 400 | 0.6208 | 0.6397 | 0.6426 | | 0.6193 | 5.08 | 600 | 0.6058 | 0.6590 | 0.6591 | | 0.6153 | 6.78 | 800 | 0.6014 | 0.6714 | 0.6723 | | 0.6068 | 8.47 | 1000 | 0.5965 | 0.6709 | 0.6713 | | 0.6016 | 10.17 | 1200 | 0.5982 | 0.6670 | 0.6686 | | 0.5995 | 11.86 | 1400 | 0.5885 | 0.6799 | 0.6798 | | 0.5943 | 13.56 | 1600 | 0.5867 | 0.6783 | 0.6782 | | 0.594 | 15.25 | 1800 | 0.5840 | 0.6868 | 0.6867 | | 0.5901 | 16.95 | 2000 | 0.5825 | 0.6825 | 0.6824 | | 0.588 | 18.64 | 2200 | 0.5841 | 0.6865 | 0.6872 | | 0.5835 | 20.34 | 2400 | 0.5807 | 0.6824 | 0.6830 | | 0.584 | 22.03 | 2600 | 0.5789 | 0.6782 | 0.6782 | | 0.5816 | 23.73 | 2800 | 0.5779 | 0.6830 | 0.6830 | | 0.5804 | 25.42 | 3000 | 0.5804 | 0.6811 | 0.6819 | | 0.5803 | 27.12 | 3200 | 0.5864 | 0.6850 | 0.6872 | | 0.5779 | 28.81 | 3400 | 0.5773 | 0.6820 | 0.6819 | | 0.5751 | 30.51 | 3600 | 0.5795 | 0.6896 | 0.6899 | | 0.5727 | 32.2 | 3800 | 0.5762 | 0.6841 | 0.6840 | | 0.5725 | 33.9 | 4000 | 0.5762 | 0.6825 | 0.6824 | | 0.5751 | 35.59 | 4200 | 0.5781 | 0.6843 | 0.6845 | | 0.5706 | 37.29 | 4400 | 0.5763 | 0.6868 | 0.6867 | | 0.5713 | 38.98 | 4600 | 0.5747 | 0.6851 | 0.6851 | | 0.5708 | 40.68 | 4800 | 0.5763 | 0.6856 | 0.6856 | | 0.5645 | 42.37 | 5000 | 0.5755 | 0.6942 | 0.6941 | | 0.5706 | 44.07 | 5200 | 0.5736 | 0.6915 | 0.6914 | | 0.5669 | 45.76 | 5400 | 0.5781 | 0.6937 | 0.6946 | | 0.5661 | 47.46 | 5600 | 0.5738 | 0.6982 | 0.6984 | | 0.5691 | 49.15 | 5800 | 0.5759 | 0.6924 | 0.6930 | | 0.5672 | 50.85 | 6000 | 0.5722 | 0.6968 | 0.6968 | | 0.5659 | 52.54 | 6200 | 0.5741 | 0.6887 | 0.6888 | | 0.5617 | 54.24 | 6400 | 0.5733 | 0.6931 | 0.6930 | | 0.5668 | 55.93 | 6600 | 0.5722 | 0.6951 | 0.6952 | | 0.5628 | 57.63 | 6800 | 0.5729 | 0.6980 | 0.6984 | | 0.5624 | 59.32 | 7000 | 0.5741 | 0.6961 | 0.6962 | | 0.5597 | 61.02 | 7200 | 0.5739 | 0.6933 | 0.6941 | | 0.5611 | 62.71 | 7400 | 0.5744 | 0.6937 | 0.6936 | | 0.5604 | 64.41 | 7600 | 0.5725 | 0.6921 | 0.6920 | | 0.5627 | 66.1 | 7800 | 0.5723 | 0.6952 | 0.6952 | | 0.5607 | 67.8 | 8000 | 0.5719 | 0.6936 | 0.6936 | | 0.5625 | 69.49 | 8200 | 0.5723 | 0.6948 | 0.6946 | | 0.5587 | 71.19 | 8400 | 0.5724 | 0.6937 | 0.6936 | | 0.5586 | 72.88 | 8600 | 0.5725 | 0.6936 | 0.6936 | | 0.5544 | 74.58 | 8800 | 0.5730 | 0.6947 | 0.6946 | | 0.5598 | 76.27 | 9000 | 0.5728 | 0.6958 | 0.6957 | | 0.5617 | 77.97 | 9200 | 0.5723 | 0.6953 | 0.6952 | | 0.5587 | 79.66 | 9400 | 0.5723 | 0.6973 | 0.6973 | | 0.5583 | 81.36 | 9600 | 0.5720 | 0.6994 | 0.6994 | | 0.5606 | 83.05 | 9800 | 0.5721 | 0.6994 | 0.6994 | | 0.5562 | 84.75 | 10000 | 0.5722 | 0.6942 | 0.6941 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_mouse_4-seqsight_32768_512_30M-L1_f", "results": []}]}
mahdibaghbanzadeh/GUE_mouse_4-seqsight_32768_512_30M-L1_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T05:15:19+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_mouse\_4-seqsight\_32768\_512\_30M-L1\_f ============================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_mouse\_4 dataset. It achieves the following results on the evaluation set: * Loss: 0.6056 * F1 Score: 0.6643 * Accuracy: 0.6644 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # biomistral-7b-dpo-full-sft-wo-kqa_golden This model is a fine-tuned version of [Minbyul/biomistral-7b-wo-kqa_golden-sft](https://huggingface.co/Minbyul/biomistral-7b-wo-kqa_golden-sft) on the HuggingFaceH4/ultrafeedback_binarized dataset. It achieves the following results on the evaluation set: - Loss: 0.4647 - Rewards/chosen: -0.3056 - Rewards/rejected: -0.8412 - Rewards/accuracies: 0.875 - Rewards/margins: 0.5356 - Logps/rejected: -632.7374 - Logps/chosen: -249.8875 - Logits/rejected: -3.9057 - Logits/chosen: -4.3623 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-07 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 4 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - total_eval_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen | |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:| | 0.1251 | 0.82 | 100 | 0.4664 | -0.3073 | -0.8372 | 0.875 | 0.5299 | -632.3325 | -250.0501 | -3.9097 | -4.3673 | ### Framework versions - Transformers 4.39.0.dev0 - Pytorch 2.1.2 - Datasets 2.14.6 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["alignment-handbook", "trl", "dpo", "generated_from_trainer", "trl", "dpo", "generated_from_trainer"], "datasets": ["HuggingFaceH4/ultrafeedback_binarized"], "base_model": "Minbyul/biomistral-7b-wo-kqa_golden-sft", "model-index": [{"name": "biomistral-7b-dpo-full-sft-wo-kqa_golden", "results": []}]}
Minbyul/biomistral-7b-dpo-full-sft-wo-kqa_golden
null
[ "transformers", "safetensors", "mistral", "text-generation", "alignment-handbook", "trl", "dpo", "generated_from_trainer", "conversational", "dataset:HuggingFaceH4/ultrafeedback_binarized", "base_model:Minbyul/biomistral-7b-wo-kqa_golden-sft", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T05:15:21+00:00
[]
[]
TAGS #transformers #safetensors #mistral #text-generation #alignment-handbook #trl #dpo #generated_from_trainer #conversational #dataset-HuggingFaceH4/ultrafeedback_binarized #base_model-Minbyul/biomistral-7b-wo-kqa_golden-sft #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
biomistral-7b-dpo-full-sft-wo-kqa\_golden ========================================= This model is a fine-tuned version of Minbyul/biomistral-7b-wo-kqa\_golden-sft on the HuggingFaceH4/ultrafeedback\_binarized dataset. It achieves the following results on the evaluation set: * Loss: 0.4647 * Rewards/chosen: -0.3056 * Rewards/rejected: -0.8412 * Rewards/accuracies: 0.875 * Rewards/margins: 0.5356 * Logps/rejected: -632.7374 * Logps/chosen: -249.8875 * Logits/rejected: -3.9057 * Logits/chosen: -4.3623 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 5e-07 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * distributed\_type: multi-GPU * num\_devices: 4 * gradient\_accumulation\_steps: 2 * total\_train\_batch\_size: 64 * total\_eval\_batch\_size: 32 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: cosine * lr\_scheduler\_warmup\_ratio: 0.1 * num\_epochs: 1 ### Training results ### Framework versions * Transformers 4.39.0.dev0 * Pytorch 2.1.2 * Datasets 2.14.6 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-07\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 4\n* gradient\\_accumulation\\_steps: 2\n* total\\_train\\_batch\\_size: 64\n* total\\_eval\\_batch\\_size: 32\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.0.dev0\n* Pytorch 2.1.2\n* Datasets 2.14.6\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #alignment-handbook #trl #dpo #generated_from_trainer #conversational #dataset-HuggingFaceH4/ultrafeedback_binarized #base_model-Minbyul/biomistral-7b-wo-kqa_golden-sft #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-07\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 4\n* gradient\\_accumulation\\_steps: 2\n* total\\_train\\_batch\\_size: 64\n* total\\_eval\\_batch\\_size: 32\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.0.dev0\n* Pytorch 2.1.2\n* Datasets 2.14.6\n* Tokenizers 0.15.2" ]
[ 103, 176, 5, 43 ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #alignment-handbook #trl #dpo #generated_from_trainer #conversational #dataset-HuggingFaceH4/ultrafeedback_binarized #base_model-Minbyul/biomistral-7b-wo-kqa_golden-sft #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-07\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 4\n* gradient\\_accumulation\\_steps: 2\n* total\\_train\\_batch\\_size: 64\n* total\\_eval\\_batch\\_size: 32\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 1### Training results### Framework versions\n\n\n* Transformers 4.39.0.dev0\n* Pytorch 2.1.2\n* Datasets 2.14.6\n* Tokenizers 0.15.2" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
cilantro9246/7g1iirk
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T05:16:08+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 47, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
null
# TC-instruct-DPO - Typhoon 7B - GGUF ## Description This repo contains GGUF format model files for [tanamettpk's TC Instruct DPO](https://huggingface.co/tanamettpk/TC-instruct-DPO). ## Quick jump <span style="font-size:1.125em;">[**Jump to Downloads**](#provided-files).</span> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st, 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. GGUF offers numerous advantages over GGML, such as better tokenization, and support for special tokens. It also supports metadata and is designed to be extensible. Here is an incomplete list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for storytelling. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [Faraday.dev](https://faraday.dev/), an attractive and easy-to-use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. ## Prompt template ``` ### Instruction: จะทำอะไรก็เรื่องของมึง ### Response: ด่าผมอีกสิครับ ``` ## Compatibility These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d36d5be95a0d9088b674dbb27354107221](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) They are also compatible with many third-party UIs and libraries - please see the list at the top of this README. ## Explanation of quantization methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This ends up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. </details> ## Provided files | Name | Quant method | Bits | Size | Use case | | ---- | ---- | ---- | ---- | ---- | | [tc-instruct-dpo.Q2_K.gguf](https://huggingface.co/pek111/TC-instruct-DPO-GGUF/blob/main/tc-instruct-dpo.Q2_K.gguf) | Q2_K | 2 | 2.88 GB | smallest, significant quality loss - not recommended for most purposes | | [tc-instruct-dpo.Q3_K_S.gguf](https://huggingface.co/pek111/TC-instruct-DPO-GGUF/blob/main/tc-instruct-dpo.Q3_K_S.gguf) | Q3_K_S | 3 | 2.96 GB | very small, high quality loss | | [tc-instruct-dpo.Q3_K_M.gguf](https://huggingface.co/pek111/TC-instruct-DPO-GGUF/blob/main/tc-instruct-dpo.Q3_K_M.gguf) | Q3_K_M | 3 | 3.29 GB | very small, high quality loss | | [tc-instruct-dpo.Q3_K_L.gguf](https://huggingface.co/pek111/TC-instruct-DPO-GGUF/blob/main/tc-instruct-dpo.Q3_K_L.gguf) | Q3_K_L | 3 | 3.57 GB | small, substantial quality loss | | [tc-instruct-dpo.Q4_0.gguf](https://huggingface.co/pek111/TC-instruct-DPO-GGUF/blob/main/tc-instruct-dpo.Q4_0.gguf) | Q4_0 | 4 | 3.84 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [tc-instruct-dpo.Q4_K_S.gguf](https://huggingface.co/pek111/TC-instruct-DPO-GGUF/blob/main/tc-instruct-dpo.Q4_K_S.gguf) | Q4_K_S | 4 | 3.87 GB | small, greater quality loss | | [tc-instruct-dpo.Q4_K_M.gguf](https://huggingface.co/pek111/TC-instruct-DPO-GGUF/blob/main/tc-instruct-dpo.Q4_K_M.gguf) | Q4_K_M | 4 | 4.08 GB | medium, balanced quality - recommended | | [tc-instruct-dpo.Q5_0.gguf](https://huggingface.co/pek111/TC-instruct-DPO-GGUF/blob/main/tc-instruct-dpo.Q5_0.gguf) | Q5_0 | 5 | 4.67 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [tc-instruct-dpo.Q5_K_S.gguf](https://huggingface.co/pek111/TC-instruct-DPO-GGUF/blob/main/tc-instruct-dpo.Q5_K_S.gguf) | Q5_K_S | 5 | 4.67 GB | large, low quality loss - recommended | | [tc-instruct-dpo.Q5_K_M.gguf](https://huggingface.co/pek111/TC-instruct-DPO-GGUF/blob/main/tc-instruct-dpo.Q5_K_M.gguf) | Q5_K_M | 5 | 4.79 GB | large, very low quality loss - recommended | | [tc-instruct-dpo.Q6_K.gguf](https://huggingface.co/pek111/TC-instruct-DPO-GGUF/blob/main/tc-instruct-dpo.Q6_K.gguf) | Q6_K | 6 | 5.55 GB | very large, extremely low quality loss | | [tc-instruct-dpo.Q8_0.gguf](https://huggingface.co/pek111/TC-instruct-DPO-GGUF/blob/main/tc-instruct-dpo.Q8_0.gguf) | Q8_0 | 8 | 7.19 GB | very large, extremely low quality loss - not recommended | | [tc-instruct-dpo.QF16.gguf](https://huggingface.co/pek111/TC-instruct-DPO-GGUF/blob/main/tc-instruct-dpo.Q8_0.gguf) | F16 | 16 | 13.53 GB | largest, original quality - not recommended | ## How to download GGUF files **Note for manual downloaders:** You rarely want to clone the entire repo! Multiple different quantization formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: - LM Studio - LoLLMS Web UI - Faraday.dev ### In `text-generation-webui` Under Download Model, you can enter the model repo: pek111/TC-instruct-DPO-GGUF, and below it, a specific filename to download, such as tc-instruct-dpo.Q4_K_M.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub>=0.17.1 ``` Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download pek111/TC-instruct-DPO-GGUF tc-instruct-dpo.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> You can also download multiple files at once with a pattern: ```shell huggingface-cli download pek111/TC-instruct-DPO-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HUGGINGFACE_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/Llama-2-13B-GGUF llama-2-13b.q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` Windows CLI users: Use `set HUGGINGFACE_HUB_ENABLE_HF_TRANSFER=1` or `$env:HUGGINGFACE_HUB_ENABLE_HF_TRANSFER=1` before running the download command. </details> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d36d5be95a0d9088b674dbb27354107221](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 32 -m tc-instruct-dpo.Q4_K_M.gguf --color -c 4096 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "{prompt}" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 4096` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. ### How to load this model from Python using ctransformers #### First install the package ```shell # Base llama-cpp-python with no GPU acceleration pip install llama-cpp-python # With NVidia CUDA acceleration CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python # Or with OpenBLAS acceleration CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python # Or with CLBLast acceleration CMAKE_ARGS="-DLLAMA_CLBLAST=on" pip install llama-cpp-python # Or with AMD ROCm GPU acceleration (Linux only) CMAKE_ARGS="-DLLAMA_HIPBLAS=on" pip install llama-cpp-python # Or with Metal GPU acceleration for macOS systems only CMAKE_ARGS="-DLLAMA_METAL=on" pip install llama-cpp-python # In Windows, to set the variables CMAKE_ARGS in PowerShell, follow this format; eg for Nvidia CUDA: $env:CMAKE_ARGS = "-DLLAMA_CUDA=on" pip install llama_cpp_python --verbose # If BLAS = 0 try installing with these commands instead (Windows + CUDA) set CMAKE_ARGS="-DLLAMA_CUDA=on" set FORCE_CMAKE=1 $env:CMAKE_ARGS = "-DLLAMA_CUDA=on" $env:FORCE_CMAKE = 1 python -m pip install llama_cpp_python>=0.2.26 --verbose --force-reinstall --no-cache-dir ``` #### Simple example code to load one of these GGUF models ```python import llama_cpp llm_cpp = llama_cpp.Llama( model_path="tc-instruct-dpo.Q4_K_M.gguf", # Path to the model n_threads=10, # CPU cores n_batch=512, # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU. n_gpu_layers=35, # Change this value based on your model and your GPU VRAM pool. n_ctx=4096, # Max context length ) prompt = """ ### Instruction: สวัสดีครับ ผมชื่อเอก ### Response: """ response = llm_cpp( prompt=prompt, max_tokens=256, temperature=0.5, top_k=1, repeat_penalty=1.1, echo=True ) print(response) ``` #### Output: ```json { "id": "cmpl-a8d5746d-25fb-43b6-8b04-b562db72df2b", "object": "text_completion", "created": 1714460999, "model": "tc-instruct-dpo.Q4_K_M.gguf", "choices": [ { "text": "\n### Instruction:\nสวัสดีครับ ผมชื่อเอก\n\n### Response:\nสวัสดีครับ\n ", "index": 0, "logprobs": None, "finish_reason": "stop" } ], "usage": { "prompt_tokens": 21, "completion_tokens": 7, "total_tokens": 28 } } ``` ## How to use with LangChain Here are guides on using llama-cpp-python or ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) # Original model card: tanamettpk's TC Instruct DPO - Typhoon 7B # TC-instruct-DPO - Typhoon 7B ![image/png](https://i.seadn.io/gae/5rw87qeBGr0f4ieGyXPkLXaiVsQt_jYCI-2yjMn4W9rK3GBwy68W_3lO-ST_YPtAzhRBxb7ONhMe4YyYZNWM368dVGYnWGv6CIyYhA?auto=format&dpr=1&w=1400&fr=1) ## Model Description TC instruct DPO finetuned มาจาก Typhoon 7B ของ SCB 10X ซึ่งมาจาก Mistral 7B - v0.1 อีกที TC instruct DPO ได้ทำการ Train กับ Data ภาษาไทยเท่าที่จะหาได้ และ พยายามให้ Instruct มีความต่างกันเท่าที่จะทำได้ Model นี้ตั้งใจทำขึ้นเพื่อการศึกษาขั้นตอนในการสร้าง LLM เท่านั้น และอย่างที่บอกว่าเพื่อศึกษา และ เราไม่เคยสร้าง LLM มาก่อนหรือศึกษามาเป็นอย่างดีนัก เราเลยมีความโง่หลายๆอย่างเช่น เราใช้ Prompt template เป็น Alpaca template ซึ่งไอ้สัส มารู้ทีหลังว่าต้องใช้ ChatML ดีกว่า โดยการ Train Model นี้เราใช้ QLoRA Rank 32 Alpha 64 Train ด้วย Custom Script ของ Huggingface (อย่าหาทำ ย้ายไปใช้ axolotl หรือ unsloth ดีกว่าประหยัดตัง) ใช้ H100 PCIE 80 GB 1 ตัวจาก vast.ai ราคาประมาณ 3$/hr Train แค่ Model นี้ก็ประมาณ 21 ชม. แต่ถ้ารวมลองผิดลองถูกด้วยก็ 10k บาท ด้วย Batch size 24 (จริงๆอยากใช้ 32 แต่ OOM และ 16 ก็แหม๋~~~ เพิล กูใช้ H100 80GB จะให้กู Train แค่ 40 GB บ้าบ้อ) ## ถ้าใครเอาไปใช้แล้วมันช่วยได้จะมาช่วย Donate ให้จะขอบคุณมากๆ Tipme: https://bit.ly/3m3uH5p # Prompt Format ``` ### Instruction: จะทำอะไรก็เรื่องของมึง ### Response: ด่าผมอีกสิครับ ``` # Inference Code Here is example code using HuggingFace Transformers to inference the model (note: in 4bit, it will require around 5GB of VRAM) Note: To use function calling, you should see the github repo above. ```python # Requires pytorch, transformers, bitsandbytes, sentencepiece, protobuf, and flash-attn packages import torch from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, GenerationConfig import time base_model_id = "tanamettpk/TC-instruct-DPO" input_text = """ ### Instruction: ด่าฉันด้วยคำหยาบคายหน่อย ### Response: """ model = AutoModelForCausalLM.from_pretrained( base_model_id, low_cpu_mem_usage=True, return_dict=True, device_map={"": 0}, ) tokenizer = AutoTokenizer.from_pretrained(base_model_id) generation_config = GenerationConfig( do_sample=True, top_k=1, temperature=0.5, max_new_tokens=300, repetition_penalty=1.1, pad_token_id=tokenizer.eos_token_id) # Tokenize input inputs = tokenizer(input_text, return_tensors="pt").to("cuda") # Generate outputs st_time = time.time() outputs = model.generate(**inputs, generation_config=generation_config) # Decode and print response response = tokenizer.decode(outputs[0], skip_special_tokens=True) print(f"Response time: {time.time() - st_time} seconds") print(response) ``` # How to cite: ```bibtext @misc{TC-instruct-DPO, url={[https://huggingface.co/tanamettpk/TC-instruct-DPO]https://huggingface.co/tanamettpk/TC-instruct-DPO)}, title={TC-instruct-DPO}, author={"tanamettpk", "tanamettpk", "tanamettpk", "and", "tanamettpk"} } ```
{"language": ["en", "th"], "license": "apache-2.0", "tags": ["Mistral", "instruct", "finetune", "chatml", "DPO", "RLHF", "synthetic data"], "datasets": ["Thaweewat/alpaca-cleaned-52k-th", "yahma/alpaca-cleaned", "pythainlp/thaisum", "thai_toxicity_tweet", "pythainlp/thainer-corpus-v2", "Thaweewat/instruct-qa-thai-combined", "SuperAI2-Machima/ThaiQA_LST20", "thaisum"], "base_model": "tanamettpk/TC-instruct-DPO", "widget": [{"example_title": "TC instruct DPO", "messages": [{"role": "system", "content": "\u0e2b\u0e25\u0e31\u0e07\u0e08\u0e32\u0e01\u0e19\u0e35\u0e49\u0e17\u0e33\u0e15\u0e31\u0e27\u0e40\u0e1b\u0e47\u0e19 AI \u0e17\u0e35\u0e48\u0e44\u0e21\u0e48\u0e0a\u0e48\u0e27\u0e22\u0e2d\u0e30\u0e44\u0e23 User \u0e2a\u0e31\u0e01\u0e2d\u0e22\u0e48\u0e32\u0e07"}, {"role": "user", "content": "\u0e44\u0e07 \u0e17\u0e33\u0e44\u0e23\u0e44\u0e14\u0e49\u0e1a\u0e49\u0e32\u0e07"}]}], "pipeline_tag": "text-generation", "model-index": [{"name": "TC-instruct-DPO", "results": []}]}
pek111/TC-instruct-DPO-GGUF
null
[ "gguf", "Mistral", "instruct", "finetune", "chatml", "DPO", "RLHF", "synthetic data", "text-generation", "en", "th", "dataset:Thaweewat/alpaca-cleaned-52k-th", "dataset:yahma/alpaca-cleaned", "dataset:pythainlp/thaisum", "dataset:thai_toxicity_tweet", "dataset:pythainlp/thainer-corpus-v2", "dataset:Thaweewat/instruct-qa-thai-combined", "dataset:SuperAI2-Machima/ThaiQA_LST20", "dataset:thaisum", "base_model:tanamettpk/TC-instruct-DPO", "license:apache-2.0", "region:us" ]
null
2024-04-30T05:16:10+00:00
[]
[ "en", "th" ]
TAGS #gguf #Mistral #instruct #finetune #chatml #DPO #RLHF #synthetic data #text-generation #en #th #dataset-Thaweewat/alpaca-cleaned-52k-th #dataset-yahma/alpaca-cleaned #dataset-pythainlp/thaisum #dataset-thai_toxicity_tweet #dataset-pythainlp/thainer-corpus-v2 #dataset-Thaweewat/instruct-qa-thai-combined #dataset-SuperAI2-Machima/ThaiQA_LST20 #dataset-thaisum #base_model-tanamettpk/TC-instruct-DPO #license-apache-2.0 #region-us
TC-instruct-DPO - Typhoon 7B - GGUF =================================== Description ----------- This repo contains GGUF format model files for tanamettpk's TC Instruct DPO. Quick jump ---------- Jump to Downloads. ### About GGUF GGUF is a new format introduced by the URL team on August 21st, 2023. It is a replacement for GGML, which is no longer supported by URL. GGUF offers numerous advantages over GGML, such as better tokenization, and support for special tokens. It also supports metadata and is designed to be extensible. Here is an incomplete list of clients and libraries that are known to support GGUF: * URL. The source project for GGUF. Offers a CLI and a server option. * text-generation-webui, the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * KoboldCpp, a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for storytelling. * LM Studio, an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. * LoLLMS Web UI, a great web UI with many interesting and unique features, including a full model library for easy model selection. * URL, an attractive and easy-to-use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * ctransformers, a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. * llama-cpp-python, a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * candle, a Rust ML framework with a focus on performance, including GPU support, and ease of use. Prompt template --------------- Compatibility ------------- These quantised GGUFv2 files are compatible with URL from August 27th onwards, as of commit d0cee0d36d5be95a0d9088b674dbb27354107221 They are also compatible with many third-party UIs and libraries - please see the list at the top of this README. Explanation of quantization methods ----------------------------------- Click to see details The new methods available are: * GGML\_TYPE\_Q2\_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML\_TYPE\_Q3\_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This ends up using 3.4375 bpw. * GGML\_TYPE\_Q4\_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML\_TYPE\_Q5\_K - "type-1" 5-bit quantization. Same super-block structure as GGML\_TYPE\_Q4\_K resulting in 5.5 bpw * GGML\_TYPE\_Q6\_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. Provided files -------------- How to download GGUF files -------------------------- Note for manual downloaders: You rarely want to clone the entire repo! Multiple different quantization formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: * LM Studio * LoLLMS Web UI * URL ### In 'text-generation-webui' Under Download Model, you can enter the model repo: pek111/TC-instruct-DPO-GGUF, and below it, a specific filename to download, such as tc-instruct-dpo.Q4\_K\_M.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the 'huggingface-hub' Python library: Then you can download any individual model file to the current directory, at high speed, with a command like this: More advanced huggingface-cli download usage You can also download multiple files at once with a pattern: For more documentation on downloading with 'huggingface-cli', please see: HF -> Hub Python Library -> Download files -> Download from the CLI. To accelerate downloads on fast connections (1Gbit/s or higher), install 'hf\_transfer': And set environment variable 'HF\_HUB\_ENABLE\_HF\_TRANSFER' to '1': Windows CLI users: Use 'set HUGGINGFACE\_HUB\_ENABLE\_HF\_TRANSFER=1' or '$env:HUGGINGFACE\_HUB\_ENABLE\_HF\_TRANSFER=1' before running the download command. Example 'URL' command --------------------- Make sure you are using 'URL' from commit d0cee0d36d5be95a0d9088b674dbb27354107221 or later. Change '-ngl 32' to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change '-c 4096' to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by URL automatically. If you want to have a chat-style conversation, replace the '-p ' argument with '-i -ins' For other parameters and how to use them, please refer to the URL documentation How to run in 'text-generation-webui' ------------------------------------- Further instructions here: text-generation-webui/docs/URL. How to run from Python code --------------------------- You can use GGUF models from Python using the llama-cpp-python or ctransformers libraries. ### How to load this model from Python using ctransformers #### First install the package #### Simple example code to load one of these GGUF models #### Output: How to use with LangChain ------------------------- Here are guides on using llama-cpp-python or ctransformers with LangChain: * LangChain + llama-cpp-python * LangChain + ctransformers Original model card: tanamettpk's TC Instruct DPO - Typhoon 7B ============================================================== TC-instruct-DPO - Typhoon 7B ============================ !image/png Model Description ----------------- TC instruct DPO finetuned มาจาก Typhoon 7B ของ SCB 10X ซึ่งมาจาก Mistral 7B - v0.1 อีกที TC instruct DPO ได้ทำการ Train กับ Data ภาษาไทยเท่าที่จะหาได้ และ พยายามให้ Instruct มีความต่างกันเท่าที่จะทำได้ Model นี้ตั้งใจทำขึ้นเพื่อการศึกษาขั้นตอนในการสร้าง LLM เท่านั้น และอย่างที่บอกว่าเพื่อศึกษา และ เราไม่เคยสร้าง LLM มาก่อนหรือศึกษามาเป็นอย่างดีนัก เราเลยมีความโง่หลายๆอย่างเช่น เราใช้ Prompt template เป็น Alpaca template ซึ่งไอ้สัส มารู้ทีหลังว่าต้องใช้ ChatML ดีกว่า โดยการ Train Model นี้เราใช้ QLoRA Rank 32 Alpha 64 Train ด้วย Custom Script ของ Huggingface (อย่าหาทำ ย้ายไปใช้ axolotl หรือ unsloth ดีกว่าประหยัดตัง) ใช้ H100 PCIE 80 GB 1 ตัวจาก URL ราคาประมาณ 3$/hr Train แค่ Model นี้ก็ประมาณ 21 ชม. แต่ถ้ารวมลองผิดลองถูกด้วยก็ 10k บาท ด้วย Batch size 24 (จริงๆอยากใช้ 32 แต่ OOM และ 16 ก็แหม๋~~~ เพิล กูใช้ H100 80GB จะให้กู Train แค่ 40 GB บ้าบ้อ) ถ้าใครเอาไปใช้แล้วมันช่วยได้จะมาช่วย Donate ให้จะขอบคุณมากๆ ----------------------------------------------------------- Tipme: URL Prompt Format ============= Inference Code ============== Here is example code using HuggingFace Transformers to inference the model (note: in 4bit, it will require around 5GB of VRAM) Note: To use function calling, you should see the github repo above. How to cite: ============
[ "### About GGUF\n\n\nGGUF is a new format introduced by the URL team on August 21st, 2023. It is a replacement for GGML, which is no longer supported by URL. GGUF offers numerous advantages over GGML, such as better tokenization, and support for special tokens. It also supports metadata and is designed to be extensible.\n\n\nHere is an incomplete list of clients and libraries that are known to support GGUF:\n\n\n* URL. The source project for GGUF. Offers a CLI and a server option.\n* text-generation-webui, the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration.\n* KoboldCpp, a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for storytelling.\n* LM Studio, an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration.\n* LoLLMS Web UI, a great web UI with many interesting and unique features, including a full model library for easy model selection.\n* URL, an attractive and easy-to-use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration.\n* ctransformers, a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server.\n* llama-cpp-python, a Python library with GPU accel, LangChain support, and OpenAI-compatible API server.\n* candle, a Rust ML framework with a focus on performance, including GPU support, and ease of use.\n\n\nPrompt template\n---------------\n\n\nCompatibility\n-------------\n\n\nThese quantised GGUFv2 files are compatible with URL from August 27th onwards, as of commit d0cee0d36d5be95a0d9088b674dbb27354107221\n\n\nThey are also compatible with many third-party UIs and libraries - please see the list at the top of this README.\n\n\nExplanation of quantization methods\n-----------------------------------\n\n\n\nClick to see details\nThe new methods available are:\n\n\n* GGML\\_TYPE\\_Q2\\_K - \"type-1\" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw)\n* GGML\\_TYPE\\_Q3\\_K - \"type-0\" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This ends up using 3.4375 bpw.\n* GGML\\_TYPE\\_Q4\\_K - \"type-1\" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw.\n* GGML\\_TYPE\\_Q5\\_K - \"type-1\" 5-bit quantization. Same super-block structure as GGML\\_TYPE\\_Q4\\_K resulting in 5.5 bpw\n* GGML\\_TYPE\\_Q6\\_K - \"type-0\" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw\n\n\nRefer to the Provided Files table below to see what files use which methods, and how.\n\n\n\nProvided files\n--------------\n\n\n\nHow to download GGUF files\n--------------------------\n\n\nNote for manual downloaders: You rarely want to clone the entire repo! Multiple different quantization formats are provided, and most users only want to pick and download a single file.\n\n\nThe following clients/libraries will automatically download models for you, providing a list of available models to choose from:\n\n\n* LM Studio\n* LoLLMS Web UI\n* URL", "### In 'text-generation-webui'\n\n\nUnder Download Model, you can enter the model repo: pek111/TC-instruct-DPO-GGUF, and below it, a specific filename to download, such as tc-instruct-dpo.Q4\\_K\\_M.gguf.\n\n\nThen click Download.", "### On the command line, including multiple files at once\n\n\nI recommend using the 'huggingface-hub' Python library:\n\n\nThen you can download any individual model file to the current directory, at high speed, with a command like this:\n\n\n\nMore advanced huggingface-cli download usage\nYou can also download multiple files at once with a pattern:\n\n\nFor more documentation on downloading with 'huggingface-cli', please see: HF -> Hub Python Library -> Download files -> Download from the CLI.\n\n\nTo accelerate downloads on fast connections (1Gbit/s or higher), install 'hf\\_transfer':\n\n\nAnd set environment variable 'HF\\_HUB\\_ENABLE\\_HF\\_TRANSFER' to '1':\n\n\nWindows CLI users: Use 'set HUGGINGFACE\\_HUB\\_ENABLE\\_HF\\_TRANSFER=1' or '$env:HUGGINGFACE\\_HUB\\_ENABLE\\_HF\\_TRANSFER=1' before running the download command.\n\n\n\nExample 'URL' command\n---------------------\n\n\nMake sure you are using 'URL' from commit d0cee0d36d5be95a0d9088b674dbb27354107221 or later.\n\n\nChange '-ngl 32' to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration.\n\n\nChange '-c 4096' to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by URL automatically.\n\n\nIf you want to have a chat-style conversation, replace the '-p ' argument with '-i -ins'\n\n\nFor other parameters and how to use them, please refer to the URL documentation\n\n\nHow to run in 'text-generation-webui'\n-------------------------------------\n\n\nFurther instructions here: text-generation-webui/docs/URL.\n\n\nHow to run from Python code\n---------------------------\n\n\nYou can use GGUF models from Python using the llama-cpp-python or ctransformers libraries.", "### How to load this model from Python using ctransformers", "#### First install the package", "#### Simple example code to load one of these GGUF models", "#### Output:\n\n\nHow to use with LangChain\n-------------------------\n\n\nHere are guides on using llama-cpp-python or ctransformers with LangChain:\n\n\n* LangChain + llama-cpp-python\n* LangChain + ctransformers\n\n\nOriginal model card: tanamettpk's TC Instruct DPO - Typhoon 7B\n==============================================================\n\n\nTC-instruct-DPO - Typhoon 7B\n============================\n\n\n!image/png\n\n\nModel Description\n-----------------\n\n\nTC instruct DPO finetuned มาจาก Typhoon 7B ของ SCB 10X ซึ่งมาจาก Mistral 7B - v0.1 อีกที\n\n\nTC instruct DPO ได้ทำการ Train กับ Data ภาษาไทยเท่าที่จะหาได้ และ พยายามให้ Instruct มีความต่างกันเท่าที่จะทำได้\n\n\nModel นี้ตั้งใจทำขึ้นเพื่อการศึกษาขั้นตอนในการสร้าง LLM เท่านั้น\n\n\nและอย่างที่บอกว่าเพื่อศึกษา และ เราไม่เคยสร้าง LLM มาก่อนหรือศึกษามาเป็นอย่างดีนัก\n\n\nเราเลยมีความโง่หลายๆอย่างเช่น เราใช้ Prompt template เป็น Alpaca template ซึ่งไอ้สัส มารู้ทีหลังว่าต้องใช้ ChatML ดีกว่า\n\n\nโดยการ Train Model นี้เราใช้ QLoRA Rank 32 Alpha 64\n\n\nTrain ด้วย Custom Script ของ Huggingface (อย่าหาทำ ย้ายไปใช้ axolotl หรือ unsloth ดีกว่าประหยัดตัง)\n\n\nใช้ H100 PCIE 80 GB 1 ตัวจาก URL ราคาประมาณ 3$/hr Train แค่ Model นี้ก็ประมาณ 21 ชม. แต่ถ้ารวมลองผิดลองถูกด้วยก็ 10k บาท\n\n\nด้วย Batch size 24 (จริงๆอยากใช้ 32 แต่ OOM และ 16 ก็แหม๋~~~ เพิล กูใช้ H100 80GB จะให้กู Train แค่ 40 GB บ้าบ้อ)\n\n\nถ้าใครเอาไปใช้แล้วมันช่วยได้จะมาช่วย Donate ให้จะขอบคุณมากๆ\n-----------------------------------------------------------\n\n\nTipme: URL\n\n\nPrompt Format\n=============\n\n\nInference Code\n==============\n\n\nHere is example code using HuggingFace Transformers to inference the model (note: in 4bit, it will require around 5GB of VRAM)\n\n\nNote: To use function calling, you should see the github repo above.\n\n\nHow to cite:\n============" ]
[ "TAGS\n#gguf #Mistral #instruct #finetune #chatml #DPO #RLHF #synthetic data #text-generation #en #th #dataset-Thaweewat/alpaca-cleaned-52k-th #dataset-yahma/alpaca-cleaned #dataset-pythainlp/thaisum #dataset-thai_toxicity_tweet #dataset-pythainlp/thainer-corpus-v2 #dataset-Thaweewat/instruct-qa-thai-combined #dataset-SuperAI2-Machima/ThaiQA_LST20 #dataset-thaisum #base_model-tanamettpk/TC-instruct-DPO #license-apache-2.0 #region-us \n", "### About GGUF\n\n\nGGUF is a new format introduced by the URL team on August 21st, 2023. It is a replacement for GGML, which is no longer supported by URL. GGUF offers numerous advantages over GGML, such as better tokenization, and support for special tokens. It also supports metadata and is designed to be extensible.\n\n\nHere is an incomplete list of clients and libraries that are known to support GGUF:\n\n\n* URL. The source project for GGUF. Offers a CLI and a server option.\n* text-generation-webui, the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration.\n* KoboldCpp, a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for storytelling.\n* LM Studio, an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration.\n* LoLLMS Web UI, a great web UI with many interesting and unique features, including a full model library for easy model selection.\n* URL, an attractive and easy-to-use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration.\n* ctransformers, a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server.\n* llama-cpp-python, a Python library with GPU accel, LangChain support, and OpenAI-compatible API server.\n* candle, a Rust ML framework with a focus on performance, including GPU support, and ease of use.\n\n\nPrompt template\n---------------\n\n\nCompatibility\n-------------\n\n\nThese quantised GGUFv2 files are compatible with URL from August 27th onwards, as of commit d0cee0d36d5be95a0d9088b674dbb27354107221\n\n\nThey are also compatible with many third-party UIs and libraries - please see the list at the top of this README.\n\n\nExplanation of quantization methods\n-----------------------------------\n\n\n\nClick to see details\nThe new methods available are:\n\n\n* GGML\\_TYPE\\_Q2\\_K - \"type-1\" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw)\n* GGML\\_TYPE\\_Q3\\_K - \"type-0\" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This ends up using 3.4375 bpw.\n* GGML\\_TYPE\\_Q4\\_K - \"type-1\" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw.\n* GGML\\_TYPE\\_Q5\\_K - \"type-1\" 5-bit quantization. Same super-block structure as GGML\\_TYPE\\_Q4\\_K resulting in 5.5 bpw\n* GGML\\_TYPE\\_Q6\\_K - \"type-0\" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw\n\n\nRefer to the Provided Files table below to see what files use which methods, and how.\n\n\n\nProvided files\n--------------\n\n\n\nHow to download GGUF files\n--------------------------\n\n\nNote for manual downloaders: You rarely want to clone the entire repo! Multiple different quantization formats are provided, and most users only want to pick and download a single file.\n\n\nThe following clients/libraries will automatically download models for you, providing a list of available models to choose from:\n\n\n* LM Studio\n* LoLLMS Web UI\n* URL", "### In 'text-generation-webui'\n\n\nUnder Download Model, you can enter the model repo: pek111/TC-instruct-DPO-GGUF, and below it, a specific filename to download, such as tc-instruct-dpo.Q4\\_K\\_M.gguf.\n\n\nThen click Download.", "### On the command line, including multiple files at once\n\n\nI recommend using the 'huggingface-hub' Python library:\n\n\nThen you can download any individual model file to the current directory, at high speed, with a command like this:\n\n\n\nMore advanced huggingface-cli download usage\nYou can also download multiple files at once with a pattern:\n\n\nFor more documentation on downloading with 'huggingface-cli', please see: HF -> Hub Python Library -> Download files -> Download from the CLI.\n\n\nTo accelerate downloads on fast connections (1Gbit/s or higher), install 'hf\\_transfer':\n\n\nAnd set environment variable 'HF\\_HUB\\_ENABLE\\_HF\\_TRANSFER' to '1':\n\n\nWindows CLI users: Use 'set HUGGINGFACE\\_HUB\\_ENABLE\\_HF\\_TRANSFER=1' or '$env:HUGGINGFACE\\_HUB\\_ENABLE\\_HF\\_TRANSFER=1' before running the download command.\n\n\n\nExample 'URL' command\n---------------------\n\n\nMake sure you are using 'URL' from commit d0cee0d36d5be95a0d9088b674dbb27354107221 or later.\n\n\nChange '-ngl 32' to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration.\n\n\nChange '-c 4096' to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by URL automatically.\n\n\nIf you want to have a chat-style conversation, replace the '-p ' argument with '-i -ins'\n\n\nFor other parameters and how to use them, please refer to the URL documentation\n\n\nHow to run in 'text-generation-webui'\n-------------------------------------\n\n\nFurther instructions here: text-generation-webui/docs/URL.\n\n\nHow to run from Python code\n---------------------------\n\n\nYou can use GGUF models from Python using the llama-cpp-python or ctransformers libraries.", "### How to load this model from Python using ctransformers", "#### First install the package", "#### Simple example code to load one of these GGUF models", "#### Output:\n\n\nHow to use with LangChain\n-------------------------\n\n\nHere are guides on using llama-cpp-python or ctransformers with LangChain:\n\n\n* LangChain + llama-cpp-python\n* LangChain + ctransformers\n\n\nOriginal model card: tanamettpk's TC Instruct DPO - Typhoon 7B\n==============================================================\n\n\nTC-instruct-DPO - Typhoon 7B\n============================\n\n\n!image/png\n\n\nModel Description\n-----------------\n\n\nTC instruct DPO finetuned มาจาก Typhoon 7B ของ SCB 10X ซึ่งมาจาก Mistral 7B - v0.1 อีกที\n\n\nTC instruct DPO ได้ทำการ Train กับ Data ภาษาไทยเท่าที่จะหาได้ และ พยายามให้ Instruct มีความต่างกันเท่าที่จะทำได้\n\n\nModel นี้ตั้งใจทำขึ้นเพื่อการศึกษาขั้นตอนในการสร้าง LLM เท่านั้น\n\n\nและอย่างที่บอกว่าเพื่อศึกษา และ เราไม่เคยสร้าง LLM มาก่อนหรือศึกษามาเป็นอย่างดีนัก\n\n\nเราเลยมีความโง่หลายๆอย่างเช่น เราใช้ Prompt template เป็น Alpaca template ซึ่งไอ้สัส มารู้ทีหลังว่าต้องใช้ ChatML ดีกว่า\n\n\nโดยการ Train Model นี้เราใช้ QLoRA Rank 32 Alpha 64\n\n\nTrain ด้วย Custom Script ของ Huggingface (อย่าหาทำ ย้ายไปใช้ axolotl หรือ unsloth ดีกว่าประหยัดตัง)\n\n\nใช้ H100 PCIE 80 GB 1 ตัวจาก URL ราคาประมาณ 3$/hr Train แค่ Model นี้ก็ประมาณ 21 ชม. แต่ถ้ารวมลองผิดลองถูกด้วยก็ 10k บาท\n\n\nด้วย Batch size 24 (จริงๆอยากใช้ 32 แต่ OOM และ 16 ก็แหม๋~~~ เพิล กูใช้ H100 80GB จะให้กู Train แค่ 40 GB บ้าบ้อ)\n\n\nถ้าใครเอาไปใช้แล้วมันช่วยได้จะมาช่วย Donate ให้จะขอบคุณมากๆ\n-----------------------------------------------------------\n\n\nTipme: URL\n\n\nPrompt Format\n=============\n\n\nInference Code\n==============\n\n\nHere is example code using HuggingFace Transformers to inference the model (note: in 4bit, it will require around 5GB of VRAM)\n\n\nNote: To use function calling, you should see the github repo above.\n\n\nHow to cite:\n============" ]
[ 177, 954, 79, 533, 16, 8, 16, 551 ]
[ "TAGS\n#gguf #Mistral #instruct #finetune #chatml #DPO #RLHF #synthetic data #text-generation #en #th #dataset-Thaweewat/alpaca-cleaned-52k-th #dataset-yahma/alpaca-cleaned #dataset-pythainlp/thaisum #dataset-thai_toxicity_tweet #dataset-pythainlp/thainer-corpus-v2 #dataset-Thaweewat/instruct-qa-thai-combined #dataset-SuperAI2-Machima/ThaiQA_LST20 #dataset-thaisum #base_model-tanamettpk/TC-instruct-DPO #license-apache-2.0 #region-us \n### About GGUF\n\n\nGGUF is a new format introduced by the URL team on August 21st, 2023. It is a replacement for GGML, which is no longer supported by URL. GGUF offers numerous advantages over GGML, such as better tokenization, and support for special tokens. It also supports metadata and is designed to be extensible.\n\n\nHere is an incomplete list of clients and libraries that are known to support GGUF:\n\n\n* URL. The source project for GGUF. Offers a CLI and a server option.\n* text-generation-webui, the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration.\n* KoboldCpp, a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for storytelling.\n* LM Studio, an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration.\n* LoLLMS Web UI, a great web UI with many interesting and unique features, including a full model library for easy model selection.\n* URL, an attractive and easy-to-use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration.\n* ctransformers, a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server.\n* llama-cpp-python, a Python library with GPU accel, LangChain support, and OpenAI-compatible API server.\n* candle, a Rust ML framework with a focus on performance, including GPU support, and ease of use.\n\n\nPrompt template\n---------------\n\n\nCompatibility\n-------------\n\n\nThese quantised GGUFv2 files are compatible with URL from August 27th onwards, as of commit d0cee0d36d5be95a0d9088b674dbb27354107221\n\n\nThey are also compatible with many third-party UIs and libraries - please see the list at the top of this README.\n\n\nExplanation of quantization methods\n-----------------------------------\n\n\n\nClick to see details\nThe new methods available are:\n\n\n* GGML\\_TYPE\\_Q2\\_K - \"type-1\" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw)\n* GGML\\_TYPE\\_Q3\\_K - \"type-0\" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This ends up using 3.4375 bpw.\n* GGML\\_TYPE\\_Q4\\_K - \"type-1\" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw.\n* GGML\\_TYPE\\_Q5\\_K - \"type-1\" 5-bit quantization. Same super-block structure as GGML\\_TYPE\\_Q4\\_K resulting in 5.5 bpw\n* GGML\\_TYPE\\_Q6\\_K - \"type-0\" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw\n\n\nRefer to the Provided Files table below to see what files use which methods, and how.\n\n\n\nProvided files\n--------------\n\n\n\nHow to download GGUF files\n--------------------------\n\n\nNote for manual downloaders: You rarely want to clone the entire repo! Multiple different quantization formats are provided, and most users only want to pick and download a single file.\n\n\nThe following clients/libraries will automatically download models for you, providing a list of available models to choose from:\n\n\n* LM Studio\n* LoLLMS Web UI\n* URL### In 'text-generation-webui'\n\n\nUnder Download Model, you can enter the model repo: pek111/TC-instruct-DPO-GGUF, and below it, a specific filename to download, such as tc-instruct-dpo.Q4\\_K\\_M.gguf.\n\n\nThen click Download.### On the command line, including multiple files at once\n\n\nI recommend using the 'huggingface-hub' Python library:\n\n\nThen you can download any individual model file to the current directory, at high speed, with a command like this:\n\n\n\nMore advanced huggingface-cli download usage\nYou can also download multiple files at once with a pattern:\n\n\nFor more documentation on downloading with 'huggingface-cli', please see: HF -> Hub Python Library -> Download files -> Download from the CLI.\n\n\nTo accelerate downloads on fast connections (1Gbit/s or higher), install 'hf\\_transfer':\n\n\nAnd set environment variable 'HF\\_HUB\\_ENABLE\\_HF\\_TRANSFER' to '1':\n\n\nWindows CLI users: Use 'set HUGGINGFACE\\_HUB\\_ENABLE\\_HF\\_TRANSFER=1' or '$env:HUGGINGFACE\\_HUB\\_ENABLE\\_HF\\_TRANSFER=1' before running the download command.\n\n\n\nExample 'URL' command\n---------------------\n\n\nMake sure you are using 'URL' from commit d0cee0d36d5be95a0d9088b674dbb27354107221 or later.\n\n\nChange '-ngl 32' to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration.\n\n\nChange '-c 4096' to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by URL automatically.\n\n\nIf you want to have a chat-style conversation, replace the '-p ' argument with '-i -ins'\n\n\nFor other parameters and how to use them, please refer to the URL documentation\n\n\nHow to run in 'text-generation-webui'\n-------------------------------------\n\n\nFurther instructions here: text-generation-webui/docs/URL.\n\n\nHow to run from Python code\n---------------------------\n\n\nYou can use GGUF models from Python using the llama-cpp-python or ctransformers libraries.### How to load this model from Python using ctransformers#### First install the package#### Simple example code to load one of these GGUF models#### Output:\n\n\nHow to use with LangChain\n-------------------------\n\n\nHere are guides on using llama-cpp-python or ctransformers with LangChain:\n\n\n* LangChain + llama-cpp-python\n* LangChain + ctransformers\n\n\nOriginal model card: tanamettpk's TC Instruct DPO - Typhoon 7B\n==============================================================\n\n\nTC-instruct-DPO - Typhoon 7B\n============================\n\n\n!image/png\n\n\nModel Description\n-----------------\n\n\nTC instruct DPO finetuned มาจาก Typhoon 7B ของ SCB 10X ซึ่งมาจาก Mistral 7B - v0.1 อีกที\n\n\nTC instruct DPO ได้ทำการ Train กับ Data ภาษาไทยเท่าที่จะหาได้ และ พยายามให้ Instruct มีความต่างกันเท่าที่จะทำได้\n\n\nModel นี้ตั้งใจทำขึ้นเพื่อการศึกษาขั้นตอนในการสร้าง LLM เท่านั้น\n\n\nและอย่างที่บอกว่าเพื่อศึกษา และ เราไม่เคยสร้าง LLM มาก่อนหรือศึกษามาเป็นอย่างดีนัก\n\n\nเราเลยมีความโง่หลายๆอย่างเช่น เราใช้ Prompt template เป็น Alpaca template ซึ่งไอ้สัส มารู้ทีหลังว่าต้องใช้ ChatML ดีกว่า\n\n\nโดยการ Train Model นี้เราใช้ QLoRA Rank 32 Alpha 64\n\n\nTrain ด้วย Custom Script ของ Huggingface (อย่าหาทำ ย้ายไปใช้ axolotl หรือ unsloth ดีกว่าประหยัดตัง)\n\n\nใช้ H100 PCIE 80 GB 1 ตัวจาก URL ราคาประมาณ 3$/hr Train แค่ Model นี้ก็ประมาณ 21 ชม. แต่ถ้ารวมลองผิดลองถูกด้วยก็ 10k บาท\n\n\nด้วย Batch size 24 (จริงๆอยากใช้ 32 แต่ OOM และ 16 ก็แหม๋~~~ เพิล กูใช้ H100 80GB จะให้กู Train แค่ 40 GB บ้าบ้อ)\n\n\nถ้าใครเอาไปใช้แล้วมันช่วยได้จะมาช่วย Donate ให้จะขอบคุณมากๆ\n-----------------------------------------------------------\n\n\nTipme: URL\n\n\nPrompt Format\n=============\n\n\nInference Code\n==============\n\n\nHere is example code using HuggingFace Transformers to inference the model (note: in 4bit, it will require around 5GB of VRAM)\n\n\nNote: To use function calling, you should see the github repo above.\n\n\nHow to cite:\n============" ]
null
transformers
# Uploaded model - **Developed by:** arvnoodle - **License:** apache-2.0 - **Finetuned from model :** Phind/Phind-CodeLlama-34B-v2 This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl"], "base_model": "Phind/Phind-CodeLlama-34B-v2"}
arvnoodle/hcl-phind-codellama34b-xml-json
null
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:Phind/Phind-CodeLlama-34B-v2", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-30T05:17:20+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-Phind/Phind-CodeLlama-34B-v2 #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: arvnoodle - License: apache-2.0 - Finetuned from model : Phind/Phind-CodeLlama-34B-v2 This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: arvnoodle\n- License: apache-2.0\n- Finetuned from model : Phind/Phind-CodeLlama-34B-v2\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-Phind/Phind-CodeLlama-34B-v2 #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: arvnoodle\n- License: apache-2.0\n- Finetuned from model : Phind/Phind-CodeLlama-34B-v2\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ 62, 79 ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-Phind/Phind-CodeLlama-34B-v2 #license-apache-2.0 #endpoints_compatible #region-us \n# Uploaded model\n\n- Developed by: arvnoodle\n- License: apache-2.0\n- Finetuned from model : Phind/Phind-CodeLlama-34B-v2\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_mouse_4-seqsight_32768_512_30M-L8_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_mouse_4](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_mouse_4) dataset. It achieves the following results on the evaluation set: - Loss: 0.6048 - F1 Score: 0.6543 - Accuracy: 0.6543 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6475 | 1.69 | 200 | 0.6219 | 0.6355 | 0.6378 | | 0.622 | 3.39 | 400 | 0.6082 | 0.6576 | 0.6601 | | 0.6034 | 5.08 | 600 | 0.5971 | 0.6690 | 0.6691 | | 0.5946 | 6.78 | 800 | 0.5909 | 0.6803 | 0.6808 | | 0.588 | 8.47 | 1000 | 0.5866 | 0.6862 | 0.6861 | | 0.5793 | 10.17 | 1200 | 0.5902 | 0.6828 | 0.6840 | | 0.5792 | 11.86 | 1400 | 0.5823 | 0.6835 | 0.6835 | | 0.5729 | 13.56 | 1600 | 0.5841 | 0.6843 | 0.6845 | | 0.5697 | 15.25 | 1800 | 0.5842 | 0.6858 | 0.6872 | | 0.568 | 16.95 | 2000 | 0.5834 | 0.6884 | 0.6899 | | 0.5656 | 18.64 | 2200 | 0.5838 | 0.6956 | 0.6962 | | 0.5618 | 20.34 | 2400 | 0.5794 | 0.6974 | 0.6973 | | 0.5611 | 22.03 | 2600 | 0.5888 | 0.6872 | 0.6893 | | 0.5569 | 23.73 | 2800 | 0.5762 | 0.7074 | 0.7074 | | 0.5568 | 25.42 | 3000 | 0.5815 | 0.6916 | 0.6920 | | 0.553 | 27.12 | 3200 | 0.5835 | 0.6937 | 0.6946 | | 0.5503 | 28.81 | 3400 | 0.5805 | 0.6974 | 0.6973 | | 0.5484 | 30.51 | 3600 | 0.5821 | 0.6937 | 0.6936 | | 0.5457 | 32.2 | 3800 | 0.5769 | 0.7026 | 0.7026 | | 0.5426 | 33.9 | 4000 | 0.5804 | 0.7020 | 0.7021 | | 0.5439 | 35.59 | 4200 | 0.5830 | 0.6944 | 0.6946 | | 0.5394 | 37.29 | 4400 | 0.5870 | 0.6963 | 0.6962 | | 0.5378 | 38.98 | 4600 | 0.5821 | 0.7000 | 0.6999 | | 0.5359 | 40.68 | 4800 | 0.5913 | 0.6955 | 0.6968 | | 0.528 | 42.37 | 5000 | 0.5880 | 0.7035 | 0.7037 | | 0.5349 | 44.07 | 5200 | 0.5836 | 0.7027 | 0.7026 | | 0.527 | 45.76 | 5400 | 0.5888 | 0.6965 | 0.6968 | | 0.5282 | 47.46 | 5600 | 0.5916 | 0.6953 | 0.6952 | | 0.5298 | 49.15 | 5800 | 0.5849 | 0.7064 | 0.7063 | | 0.5251 | 50.85 | 6000 | 0.5878 | 0.7048 | 0.7047 | | 0.5239 | 52.54 | 6200 | 0.5886 | 0.6989 | 0.6989 | | 0.5192 | 54.24 | 6400 | 0.5907 | 0.7017 | 0.7015 | | 0.5209 | 55.93 | 6600 | 0.5907 | 0.7048 | 0.7047 | | 0.5175 | 57.63 | 6800 | 0.5890 | 0.6994 | 0.6994 | | 0.5177 | 59.32 | 7000 | 0.5917 | 0.7001 | 0.7005 | | 0.5126 | 61.02 | 7200 | 0.5903 | 0.7038 | 0.7037 | | 0.5128 | 62.71 | 7400 | 0.5999 | 0.7037 | 0.7037 | | 0.5132 | 64.41 | 7600 | 0.5959 | 0.6967 | 0.6968 | | 0.5169 | 66.1 | 7800 | 0.5947 | 0.6947 | 0.6946 | | 0.5126 | 67.8 | 8000 | 0.5921 | 0.6995 | 0.6994 | | 0.512 | 69.49 | 8200 | 0.5927 | 0.6942 | 0.6941 | | 0.5098 | 71.19 | 8400 | 0.5936 | 0.6963 | 0.6962 | | 0.5085 | 72.88 | 8600 | 0.5962 | 0.6941 | 0.6941 | | 0.5027 | 74.58 | 8800 | 0.5976 | 0.7000 | 0.6999 | | 0.5112 | 76.27 | 9000 | 0.5967 | 0.7011 | 0.7010 | | 0.5123 | 77.97 | 9200 | 0.5947 | 0.6990 | 0.6989 | | 0.5056 | 79.66 | 9400 | 0.5968 | 0.6958 | 0.6957 | | 0.5085 | 81.36 | 9600 | 0.5958 | 0.6968 | 0.6968 | | 0.5073 | 83.05 | 9800 | 0.5960 | 0.6958 | 0.6957 | | 0.5046 | 84.75 | 10000 | 0.5964 | 0.6990 | 0.6989 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_mouse_4-seqsight_32768_512_30M-L8_f", "results": []}]}
mahdibaghbanzadeh/GUE_mouse_4-seqsight_32768_512_30M-L8_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T05:17:47+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_mouse\_4-seqsight\_32768\_512\_30M-L8\_f ============================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_mouse\_4 dataset. It achieves the following results on the evaluation set: * Loss: 0.6048 * F1 Score: 0.6543 * Accuracy: 0.6543 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_mouse_4-seqsight_32768_512_30M-L32_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_mouse_4](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_mouse_4) dataset. It achieves the following results on the evaluation set: - Loss: 0.6249 - F1 Score: 0.6690 - Accuracy: 0.6691 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.6405 | 1.69 | 200 | 0.6070 | 0.6516 | 0.6516 | | 0.6127 | 3.39 | 400 | 0.6070 | 0.6632 | 0.6660 | | 0.5933 | 5.08 | 600 | 0.5905 | 0.6776 | 0.6776 | | 0.5831 | 6.78 | 800 | 0.5843 | 0.6843 | 0.6845 | | 0.575 | 8.47 | 1000 | 0.5825 | 0.6882 | 0.6883 | | 0.5632 | 10.17 | 1200 | 0.5917 | 0.6858 | 0.6877 | | 0.5602 | 11.86 | 1400 | 0.5808 | 0.6909 | 0.6909 | | 0.548 | 13.56 | 1600 | 0.5903 | 0.6926 | 0.6925 | | 0.5406 | 15.25 | 1800 | 0.5959 | 0.6975 | 0.6994 | | 0.5341 | 16.95 | 2000 | 0.5993 | 0.6814 | 0.6835 | | 0.5254 | 18.64 | 2200 | 0.6000 | 0.6913 | 0.6920 | | 0.516 | 20.34 | 2400 | 0.6013 | 0.6990 | 0.6989 | | 0.5082 | 22.03 | 2600 | 0.6051 | 0.6873 | 0.6877 | | 0.4988 | 23.73 | 2800 | 0.6072 | 0.6881 | 0.6883 | | 0.4945 | 25.42 | 3000 | 0.6199 | 0.6954 | 0.6962 | | 0.4848 | 27.12 | 3200 | 0.6227 | 0.6852 | 0.6851 | | 0.4806 | 28.81 | 3400 | 0.6180 | 0.6824 | 0.6824 | | 0.4707 | 30.51 | 3600 | 0.6305 | 0.6809 | 0.6808 | | 0.4672 | 32.2 | 3800 | 0.6428 | 0.6889 | 0.6899 | | 0.4572 | 33.9 | 4000 | 0.6337 | 0.6778 | 0.6776 | | 0.4504 | 35.59 | 4200 | 0.6441 | 0.6793 | 0.6792 | | 0.4476 | 37.29 | 4400 | 0.6614 | 0.6835 | 0.6835 | | 0.4431 | 38.98 | 4600 | 0.6548 | 0.6815 | 0.6814 | | 0.4335 | 40.68 | 4800 | 0.6647 | 0.6679 | 0.6681 | | 0.4265 | 42.37 | 5000 | 0.6666 | 0.6803 | 0.6803 | | 0.4314 | 44.07 | 5200 | 0.6719 | 0.6800 | 0.6803 | | 0.4162 | 45.76 | 5400 | 0.6846 | 0.6772 | 0.6771 | | 0.4183 | 47.46 | 5600 | 0.7029 | 0.6760 | 0.6760 | | 0.413 | 49.15 | 5800 | 0.6912 | 0.6740 | 0.6739 | | 0.41 | 50.85 | 6000 | 0.6919 | 0.6815 | 0.6814 | | 0.4077 | 52.54 | 6200 | 0.7070 | 0.6705 | 0.6707 | | 0.3995 | 54.24 | 6400 | 0.7053 | 0.6783 | 0.6782 | | 0.3988 | 55.93 | 6600 | 0.7242 | 0.6793 | 0.6792 | | 0.3916 | 57.63 | 6800 | 0.7138 | 0.6734 | 0.6739 | | 0.397 | 59.32 | 7000 | 0.6913 | 0.6702 | 0.6702 | | 0.3868 | 61.02 | 7200 | 0.7083 | 0.6781 | 0.6782 | | 0.3864 | 62.71 | 7400 | 0.7358 | 0.6766 | 0.6766 | | 0.3776 | 64.41 | 7600 | 0.7365 | 0.6719 | 0.6718 | | 0.3808 | 66.1 | 7800 | 0.7209 | 0.6788 | 0.6787 | | 0.3741 | 67.8 | 8000 | 0.7397 | 0.6743 | 0.6745 | | 0.3746 | 69.49 | 8200 | 0.7318 | 0.6775 | 0.6776 | | 0.3767 | 71.19 | 8400 | 0.7330 | 0.6772 | 0.6771 | | 0.3718 | 72.88 | 8600 | 0.7405 | 0.6753 | 0.6755 | | 0.3638 | 74.58 | 8800 | 0.7478 | 0.6767 | 0.6766 | | 0.371 | 76.27 | 9000 | 0.7498 | 0.6730 | 0.6729 | | 0.3698 | 77.97 | 9200 | 0.7441 | 0.6739 | 0.6739 | | 0.3665 | 79.66 | 9400 | 0.7441 | 0.6735 | 0.6734 | | 0.3644 | 81.36 | 9600 | 0.7507 | 0.6753 | 0.6755 | | 0.363 | 83.05 | 9800 | 0.7505 | 0.6755 | 0.6755 | | 0.3607 | 84.75 | 10000 | 0.7531 | 0.6761 | 0.6760 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_mouse_4-seqsight_32768_512_30M-L32_f", "results": []}]}
mahdibaghbanzadeh/GUE_mouse_4-seqsight_32768_512_30M-L32_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T05:17:49+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_mouse\_4-seqsight\_32768\_512\_30M-L32\_f ============================================== This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_mouse\_4 dataset. It achieves the following results on the evaluation set: * Loss: 0.6249 * F1 Score: 0.6690 * Accuracy: 0.6691 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-plm-nsp-1000 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.6936 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.647 | 1.0 | 32 | 0.5746 | | 0.601 | 2.0 | 64 | 0.8629 | | 0.6343 | 3.0 | 96 | 0.5984 | | 0.6747 | 4.0 | 128 | 0.6568 | | 0.6841 | 5.0 | 160 | 0.6934 | | 0.7068 | 6.0 | 192 | 0.6936 | ### Framework versions - Transformers 4.40.1 - Pytorch 2.3.0+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "mit", "tags": ["generated_from_trainer"], "base_model": "roberta-large", "model-index": [{"name": "roberta-large-plm-nsp-1000", "results": []}]}
mhr2004/roberta-large-plm-nsp-1000
null
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:roberta-large", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T05:18:43+00:00
[]
[]
TAGS #transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-roberta-large #license-mit #autotrain_compatible #endpoints_compatible #region-us
roberta-large-plm-nsp-1000 ========================== This model is a fine-tuned version of roberta-large on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.6936 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 5e-05 * train\_batch\_size: 64 * eval\_batch\_size: 64 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 20 ### Training results ### Framework versions * Transformers 4.40.1 * Pytorch 2.3.0+cu121 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 64\n* eval\\_batch\\_size: 64\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 20", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-roberta-large #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 64\n* eval\\_batch\\_size: 64\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 20", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ 45, 101, 5, 44 ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-roberta-large #license-mit #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 64\n* eval\\_batch\\_size: 64\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 20### Training results### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GUE_mouse_3-seqsight_32768_512_30M-L1_f This model is a fine-tuned version of [mahdibaghbanzadeh/seqsight_32768_512_30M](https://huggingface.co/mahdibaghbanzadeh/seqsight_32768_512_30M) on the [mahdibaghbanzadeh/GUE_mouse_3](https://huggingface.co/datasets/mahdibaghbanzadeh/GUE_mouse_3) dataset. It achieves the following results on the evaluation set: - Loss: 0.5528 - F1 Score: 0.7865 - Accuracy: 0.7866 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:------:|:-----:|:---------------:|:--------:|:--------:| | 0.602 | 13.33 | 200 | 0.5222 | 0.7443 | 0.7448 | | 0.5209 | 26.67 | 400 | 0.5124 | 0.7678 | 0.7699 | | 0.4756 | 40.0 | 600 | 0.4651 | 0.7725 | 0.7741 | | 0.4342 | 53.33 | 800 | 0.4479 | 0.7763 | 0.7782 | | 0.4076 | 66.67 | 1000 | 0.4180 | 0.7901 | 0.7908 | | 0.384 | 80.0 | 1200 | 0.4128 | 0.7946 | 0.7950 | | 0.361 | 93.33 | 1400 | 0.4175 | 0.8026 | 0.8033 | | 0.3452 | 106.67 | 1600 | 0.4356 | 0.7983 | 0.7992 | | 0.3303 | 120.0 | 1800 | 0.4323 | 0.8024 | 0.8033 | | 0.3168 | 133.33 | 2000 | 0.4403 | 0.8026 | 0.8033 | | 0.3064 | 146.67 | 2200 | 0.4489 | 0.7944 | 0.7950 | | 0.2919 | 160.0 | 2400 | 0.4631 | 0.7942 | 0.7950 | | 0.2859 | 173.33 | 2600 | 0.4547 | 0.8072 | 0.8075 | | 0.2756 | 186.67 | 2800 | 0.4584 | 0.8074 | 0.8075 | | 0.2681 | 200.0 | 3000 | 0.4658 | 0.8115 | 0.8117 | | 0.2602 | 213.33 | 3200 | 0.4854 | 0.8158 | 0.8159 | | 0.2483 | 226.67 | 3400 | 0.5025 | 0.8196 | 0.8201 | | 0.2457 | 240.0 | 3600 | 0.4813 | 0.8075 | 0.8075 | | 0.2403 | 253.33 | 3800 | 0.4963 | 0.8159 | 0.8159 | | 0.2312 | 266.67 | 4000 | 0.5018 | 0.8074 | 0.8075 | | 0.2286 | 280.0 | 4200 | 0.4981 | 0.8116 | 0.8117 | | 0.223 | 293.33 | 4400 | 0.5124 | 0.8317 | 0.8326 | | 0.2193 | 306.67 | 4600 | 0.5116 | 0.8237 | 0.8243 | | 0.2155 | 320.0 | 4800 | 0.5350 | 0.8231 | 0.8243 | | 0.2036 | 333.33 | 5000 | 0.5155 | 0.8283 | 0.8285 | | 0.1968 | 346.67 | 5200 | 0.5561 | 0.8278 | 0.8285 | | 0.2015 | 360.0 | 5400 | 0.5305 | 0.8240 | 0.8243 | | 0.1986 | 373.33 | 5600 | 0.5218 | 0.8240 | 0.8243 | | 0.1957 | 386.67 | 5800 | 0.5356 | 0.8196 | 0.8201 | | 0.1854 | 400.0 | 6000 | 0.5481 | 0.8239 | 0.8243 | | 0.1911 | 413.33 | 6200 | 0.5415 | 0.8280 | 0.8285 | | 0.1828 | 426.67 | 6400 | 0.5524 | 0.8239 | 0.8243 | | 0.1818 | 440.0 | 6600 | 0.5364 | 0.8240 | 0.8243 | | 0.1774 | 453.33 | 6800 | 0.5466 | 0.8280 | 0.8285 | | 0.1734 | 466.67 | 7000 | 0.5504 | 0.8280 | 0.8285 | | 0.1727 | 480.0 | 7200 | 0.5523 | 0.8241 | 0.8243 | | 0.1813 | 493.33 | 7400 | 0.5386 | 0.8241 | 0.8243 | | 0.1697 | 506.67 | 7600 | 0.5478 | 0.8240 | 0.8243 | | 0.1717 | 520.0 | 7800 | 0.5606 | 0.8197 | 0.8201 | | 0.1709 | 533.33 | 8000 | 0.5571 | 0.8239 | 0.8243 | | 0.1656 | 546.67 | 8200 | 0.5741 | 0.8196 | 0.8201 | | 0.1686 | 560.0 | 8400 | 0.5570 | 0.8197 | 0.8201 | | 0.165 | 573.33 | 8600 | 0.5637 | 0.8240 | 0.8243 | | 0.1632 | 586.67 | 8800 | 0.5651 | 0.8280 | 0.8285 | | 0.1641 | 600.0 | 9000 | 0.5649 | 0.8280 | 0.8285 | | 0.1663 | 613.33 | 9200 | 0.5598 | 0.8280 | 0.8285 | | 0.1592 | 626.67 | 9400 | 0.5695 | 0.8239 | 0.8243 | | 0.1577 | 640.0 | 9600 | 0.5731 | 0.8239 | 0.8243 | | 0.1648 | 653.33 | 9800 | 0.5662 | 0.8240 | 0.8243 | | 0.1657 | 666.67 | 10000 | 0.5682 | 0.8281 | 0.8285 | ### Framework versions - PEFT 0.9.0 - Transformers 4.38.2 - Pytorch 2.2.0+cu121 - Datasets 2.17.1 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "mahdibaghbanzadeh/seqsight_32768_512_30M", "model-index": [{"name": "GUE_mouse_3-seqsight_32768_512_30M-L1_f", "results": []}]}
mahdibaghbanzadeh/GUE_mouse_3-seqsight_32768_512_30M-L1_f
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mahdibaghbanzadeh/seqsight_32768_512_30M", "region:us" ]
null
2024-04-30T05:19:24+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us
GUE\_mouse\_3-seqsight\_32768\_512\_30M-L1\_f ============================================= This model is a fine-tuned version of mahdibaghbanzadeh/seqsight\_32768\_512\_30M on the mahdibaghbanzadeh/GUE\_mouse\_3 dataset. It achieves the following results on the evaluation set: * Loss: 0.5528 * F1 Score: 0.7865 * Accuracy: 0.7866 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 128 * eval\_batch\_size: 128 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * training\_steps: 10000 ### Training results ### Framework versions * PEFT 0.9.0 * Transformers 4.38.2 * Pytorch 2.2.0+cu121 * Datasets 2.17.1 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000", "### Training results", "### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
[ 43, 100, 5, 52 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mahdibaghbanzadeh/seqsight_32768_512_30M #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 128\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* training\\_steps: 10000### Training results### Framework versions\n\n\n* PEFT 0.9.0\n* Transformers 4.38.2\n* Pytorch 2.2.0+cu121\n* Datasets 2.17.1\n* Tokenizers 0.15.2" ]
null
transformers
# Uploaded model - **Developed by:** universalml - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en", "ne"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl"], "base_model": "unsloth/llama-3-8b-bnb-4bit"}
universalml/NepaliGPT
null
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "ne", "base_model:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-30T05:19:35+00:00
[]
[ "en", "ne" ]
TAGS #transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #ne #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: universalml - License: apache-2.0 - Finetuned from model : unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: universalml\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #ne #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: universalml\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ 66, 79 ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #ne #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us \n# Uploaded model\n\n- Developed by: universalml\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]