pipeline_tag
stringclasses
48 values
library_name
stringclasses
198 values
text
stringlengths
1
900k
metadata
stringlengths
2
438k
id
stringlengths
5
122
last_modified
null
tags
sequencelengths
1
1.84k
sha
null
created_at
stringlengths
25
25
arxiv
sequencelengths
0
201
languages
sequencelengths
0
1.83k
tags_str
stringlengths
17
9.34k
text_str
stringlengths
0
389k
text_lists
sequencelengths
0
722
processed_texts
sequencelengths
1
723
tokens_length
sequencelengths
1
723
input_texts
sequencelengths
1
1
text2text-generation
transformers
# Slavic T5 Base Aim of this model is to reach the best results for the Slavic laguages with Latin script. It is suitable for tasks such as: - summarization, - extractive question answering, - machine translation between slavic languages in Latin script. The model is trained on the selected parts of OSCAR corpus and MaCoCu corpus. It supports this languages: Czech, Croatian, Polish , Slovak, Slovenian, Vocabulary has 120 000 tokens, contains capital letters.
{"language": ["cs", "cr", "hr", "pl", "sl", "sk"], "datasets": ["oscar", "hieronymusa/MaCoCu-dataset-250k"]}
TUKE-KEMT/slavic-t5-base
null
[ "transformers", "safetensors", "t5", "text2text-generation", "cs", "cr", "hr", "pl", "sl", "sk", "dataset:oscar", "dataset:hieronymusa/MaCoCu-dataset-250k", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T17:52:35+00:00
[]
[ "cs", "cr", "hr", "pl", "sl", "sk" ]
TAGS #transformers #safetensors #t5 #text2text-generation #cs #cr #hr #pl #sl #sk #dataset-oscar #dataset-hieronymusa/MaCoCu-dataset-250k #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Slavic T5 Base Aim of this model is to reach the best results for the Slavic laguages with Latin script. It is suitable for tasks such as: - summarization, - extractive question answering, - machine translation between slavic languages in Latin script. The model is trained on the selected parts of OSCAR corpus and MaCoCu corpus. It supports this languages: Czech, Croatian, Polish , Slovak, Slovenian, Vocabulary has 120 000 tokens, contains capital letters.
[ "# Slavic T5 Base\n\nAim of this model is to reach the best results for the Slavic laguages with Latin script.\n\nIt is suitable for tasks such as:\n\n- summarization,\n- extractive question answering,\n- machine translation between slavic languages in Latin script.\n\nThe model is trained on the selected parts of OSCAR corpus and MaCoCu corpus.\n\nIt supports this languages: Czech, Croatian, Polish , Slovak, Slovenian, \n\nVocabulary has 120 000 tokens, contains capital letters." ]
[ "TAGS\n#transformers #safetensors #t5 #text2text-generation #cs #cr #hr #pl #sl #sk #dataset-oscar #dataset-hieronymusa/MaCoCu-dataset-250k #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Slavic T5 Base\n\nAim of this model is to reach the best results for the Slavic laguages with Latin script.\n\nIt is suitable for tasks such as:\n\n- summarization,\n- extractive question answering,\n- machine translation between slavic languages in Latin script.\n\nThe model is trained on the selected parts of OSCAR corpus and MaCoCu corpus.\n\nIt supports this languages: Czech, Croatian, Polish , Slovak, Slovenian, \n\nVocabulary has 120 000 tokens, contains capital letters." ]
[ 71, 97 ]
[ "TAGS\n#transformers #safetensors #t5 #text2text-generation #cs #cr #hr #pl #sl #sk #dataset-oscar #dataset-hieronymusa/MaCoCu-dataset-250k #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Slavic T5 Base\n\nAim of this model is to reach the best results for the Slavic laguages with Latin script.\n\nIt is suitable for tasks such as:\n\n- summarization,\n- extractive question answering,\n- machine translation between slavic languages in Latin script.\n\nThe model is trained on the selected parts of OSCAR corpus and MaCoCu corpus.\n\nIt supports this languages: Czech, Croatian, Polish , Slovak, Slovenian, \n\nVocabulary has 120 000 tokens, contains capital letters." ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
huiang/model_imdb
null
[ "transformers", "safetensors", "gpt2", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T17:52:40+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #gpt2 #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #gpt2 #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 45, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #gpt2 #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
null
# Kleiner Astronaut - 126M ![Astronaut holding balloon](cover.png) Model pre-trained on the german [Kleiner Astronaut dataset](https://huggingface.co/datasets/Jotschi/kleiner-astronaut). Example from Dataset: > Eines Tages traten der kleine Astronaut Max und sein neues Roboter-Freundchen, Astro, in ihr schattiges Wohnzimmer… * Architecture: NanoGPT / Custom Arch - Based on GPT-2 * License: Apache-2.0 ## Training Sources: https://github.com/Jotschi/nanoGPT/tree/kleiner-astronaut-126M Hyperparams used to train this model: | Parameter | Value | |----------------|-------| | n_layer | 12 | | n_head | 12 | | n_embd | 780 | | dropout | 0.2 | | batch_size | 32 | | block_size | 256 | | gradient_accumulation_steps | 1 | | learning_rate | 1e-5 | | max_iters | 500000 | | lr_decay_iters | 5000 | | min_lr | 1e-6 | | beta2 | 0.99 | | warmup_iters | 100 | ## Example ``` Die Galaxie war voller ungewöhnlicher Dinge, die man noch nie gesehen hatte – riesige Sterne, die wie kleine Diamanten am Himmel glitzerten, und Sternschnuppen, die wie feine Seifenblasen funkelten. Max und Luna waren so erstaunt, dass sie sich auf einem großen Festmahl herumdrehten, begegneten ihnen über einer Reihe von Galaxien, wo jedes Sternchen einzigartige Geschichten hörte. Als sie zu einem großen Planet umschwommen waren, bemerkten sie einen kleinen Planet, der vor ihnen aufgegangen war. "Wow", murmelte Max, "das ist so großartig!" Er hatte eine kleine Stadt in Form von Sternen darauf und war begeistert von den schönsten Welten. Auf dieser neuen Welt trafen sie auf viele andere kleine Astronauten, die ebenfalls dort lebten. Diese Kinder waren so freundlich wie Max, aber sie trugen mit ihren Hüften und schauten ihnen zu. "Wir sind die Sternengötter", sagte einer der kleinen Sternchen, "und wir sind auf unserer Reise durch den Kosmos." Max lachte und dachte nach, wie er diese neue Welt entdecken würde. Mit einem Sprung in seine kleine Raumkapsel setzte Max los und fuhr durch die Sternenschleife, um nach dem Abenteuer zu suchen. Er kletterte über Flüssigkeiten, die wie flüssige Stücke aussahen und sah wie kleine Sterne auf einer großen Festfeier tanzten. ``` ## Usage The custom model implementation can be found in `model.py`. ```py pip install -r requirements.txt python example.py ``` The model is not compatible with common inference servers.
{"datasets": ["jotschi/kleiner-astronaut"]}
Jotschi/kleiner-astronaut-126M
null
[ "pytorch", "dataset:jotschi/kleiner-astronaut", "region:us" ]
null
2024-04-30T17:57:21+00:00
[]
[]
TAGS #pytorch #dataset-jotschi/kleiner-astronaut #region-us
Kleiner Astronaut - 126M ======================== !Astronaut holding balloon Model pre-trained on the german Kleiner Astronaut dataset. Example from Dataset: > > Eines Tages traten der kleine Astronaut Max und sein neues Roboter-Freundchen, Astro, in ihr schattiges Wohnzimmer… > > > * Architecture: NanoGPT / Custom Arch - Based on GPT-2 * License: Apache-2.0 Training -------- Sources: URL Hyperparams used to train this model: Example ------- Usage ----- The custom model implementation can be found in 'URL'. The model is not compatible with common inference servers.
[]
[ "TAGS\n#pytorch #dataset-jotschi/kleiner-astronaut #region-us \n" ]
[ 22 ]
[ "TAGS\n#pytorch #dataset-jotschi/kleiner-astronaut #region-us \n" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
OwOOwO/finalupdate2
null
[ "transformers", "safetensors", "stablelm", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T17:57:39+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 41, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
audio-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilhubert-bass-classifier6 This model is a fine-tuned version of [ntu-spml/distilhubert](https://huggingface.co/ntu-spml/distilhubert) on the TheDuyx/augmented_bass_sounds dataset. It achieves the following results on the evaluation set: - Loss: 0.0110 - Accuracy: 0.9985 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.3079 | 1.0 | 3828 | 0.1400 | 0.9724 | | 0.2019 | 2.0 | 7656 | 0.1161 | 0.9803 | | 0.0002 | 3.0 | 11484 | 0.0765 | 0.9897 | | 0.0002 | 4.0 | 15312 | 0.0347 | 0.9959 | | 0.0 | 5.0 | 19140 | 0.0110 | 0.9985 | ### Framework versions - Transformers 4.39.2 - Pytorch 2.2.2 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["augmented_bass_sounds"], "metrics": ["accuracy"], "base_model": "ntu-spml/distilhubert", "model-index": [{"name": "distilhubert-bass-classifier6", "results": [{"task": {"type": "audio-classification", "name": "Audio Classification"}, "dataset": {"name": "TheDuyx/augmented_bass_sounds", "type": "augmented_bass_sounds"}, "metrics": [{"type": "accuracy", "value": 0.9985302763080541, "name": "Accuracy"}]}]}]}
TheDuyx/distilhubert-bass-classifier6
null
[ "transformers", "safetensors", "hubert", "audio-classification", "generated_from_trainer", "dataset:augmented_bass_sounds", "base_model:ntu-spml/distilhubert", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
null
2024-04-30T17:57:40+00:00
[]
[]
TAGS #transformers #safetensors #hubert #audio-classification #generated_from_trainer #dataset-augmented_bass_sounds #base_model-ntu-spml/distilhubert #license-apache-2.0 #model-index #endpoints_compatible #region-us
distilhubert-bass-classifier6 ============================= This model is a fine-tuned version of ntu-spml/distilhubert on the TheDuyx/augmented\_bass\_sounds dataset. It achieves the following results on the evaluation set: * Loss: 0.0110 * Accuracy: 0.9985 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0005 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * lr\_scheduler\_warmup\_ratio: 0.1 * num\_epochs: 5 ### Training results ### Framework versions * Transformers 4.39.2 * Pytorch 2.2.2 * Datasets 2.18.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 5", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.2\n* Pytorch 2.2.2\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #hubert #audio-classification #generated_from_trainer #dataset-augmented_bass_sounds #base_model-ntu-spml/distilhubert #license-apache-2.0 #model-index #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 5", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.2\n* Pytorch 2.2.2\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
[ 65, 119, 5, 40 ]
[ "TAGS\n#transformers #safetensors #hubert #audio-classification #generated_from_trainer #dataset-augmented_bass_sounds #base_model-ntu-spml/distilhubert #license-apache-2.0 #model-index #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0005\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 5### Training results### Framework versions\n\n\n* Transformers 4.39.2\n* Pytorch 2.2.2\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
codevang/fast_small_15
null
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:00:09+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 26, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
visual-question-answering
transformers
# llama3-vision-alpha projection module trained to add vision capabilties to Llama 3 using SigLIP. built by [@yeswondwerr](https://x.com/yeswondwerr) and [@qtnx_](https://x.com/qtnx_) usable directly in Transformers, **warning: bitsandbytes quantization is currently not supported!** **usage** ``` pip install torch transformers pillow ``` ```python import torch from PIL import Image from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "qresearch/llama-3-vision-alpha-hf" model = AutoModelForCausalLM.from_pretrained( model_id, trust_remote_code=True, torch_dtype=torch.float16 ).to("cuda") tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, ) image = Image.open("image_path") print( tokenizer.decode( model.answer_question(image, "question", tokenizer), skip_special_tokens=True, ) ) ``` **examples** | Image | Examples | | --- | --- | | <img src="assets/demo-1.jpg" width="300"/> | **What is the title of this book? answer briefly**<br>The title of the book is "The Little Book of Deep Learning".<br><br>**Where is the person standing? answer briefly**<br>The person is standing on the balcony.<br><br>**Describe the image**<br>The image shows a person holding a book with a cityscape visible through the window behind them. The book has a cover with a title that reads "The Little Book of Deep Learning" in bold letters. | | <img src="assets/demo-2.jpg" width="300"/> | **What type of food is the girl holding? answer briefly**<br>A hamburger!<br><br>**What color is the woman's hair? answer briefly**<br>It's white!<br><br>**Describe the image**<br>The image is of a young girl with short, curly hair and a sweet smile, holding a giant hamburger in her hand. She's sitting at a table with a festive dinner setting, surrounded by candles and a warm glow. Her eyes are shining with excitement and contentment as she takes a big bite of the burger. | **acknowledgements** - Liu et al. : [LLaVA](https://arxiv.org/abs/2304.08485) - Moon et al. : [AnyMAL](https://arxiv.org/abs/2309.16058) - vikhyatk : moondream, test images ``` .x+=:. z` ^% .uef^" .u . . <k .u . :d88E .u@u .d88B :@8c .u .@8Ned8" .u u .d88B :@8c . `888E .zWF8888bx ="8888f8888r ud8888. .@^%8888" ud8888. us888u. ="8888f8888r .udR88N 888E .z8k .888 9888 4888>'88" :888'8888. x88: `)8b. :888'8888. .@88 "8888" 4888>'88" <888'888k 888E~?888L I888 9888 4888> ' d888 '88%" 8888N=*8888 d888 '88%" 9888 9888 4888> ' 9888 'Y" 888E 888E I888 9888 4888> 8888.+" %8" R88 8888.+" 9888 9888 4888> 9888 888E 888E I888 9888 .d888L .+ 8888L @8Wou 9% 8888L 9888 9888 .d888L .+ 9888 888E 888E `888Nx?888 ^"8888*" '8888c. .+ .888888P` '8888c. .+ 9888 9888 ^"8888*" ?8888u../ 888E 888E "88" '888 "Y" "88888% ` ^"F "88888% "888*""888" "Y" "8888P' m888N= 888> 88E "YP' "YP' ^Y" ^Y' "P' `Y" 888 98> J88" '8 @% ` :" ```
{"language": ["en"], "license": "llama3", "datasets": ["liuhaotian/LLaVA-CC3M-Pretrain-595K"], "pipeline_tag": "visual-question-answering"}
qresearch/llama-3-vision-alpha-hf
null
[ "transformers", "safetensors", "llamavision", "text-generation", "visual-question-answering", "custom_code", "en", "dataset:liuhaotian/LLaVA-CC3M-Pretrain-595K", "arxiv:2304.08485", "arxiv:2309.16058", "license:llama3", "autotrain_compatible", "region:us" ]
null
2024-04-30T18:00:17+00:00
[ "2304.08485", "2309.16058" ]
[ "en" ]
TAGS #transformers #safetensors #llamavision #text-generation #visual-question-answering #custom_code #en #dataset-liuhaotian/LLaVA-CC3M-Pretrain-595K #arxiv-2304.08485 #arxiv-2309.16058 #license-llama3 #autotrain_compatible #region-us
llama3-vision-alpha =================== projection module trained to add vision capabilties to Llama 3 using SigLIP. built by @yeswondwerr and @qtnx\_ usable directly in Transformers, warning: bitsandbytes quantization is currently not supported! usage examples acknowledgements * Liu et al. : LLaVA * Moon et al. : AnyMAL * vikhyatk : moondream, test images
[]
[ "TAGS\n#transformers #safetensors #llamavision #text-generation #visual-question-answering #custom_code #en #dataset-liuhaotian/LLaVA-CC3M-Pretrain-595K #arxiv-2304.08485 #arxiv-2309.16058 #license-llama3 #autotrain_compatible #region-us \n" ]
[ 85 ]
[ "TAGS\n#transformers #safetensors #llamavision #text-generation #visual-question-answering #custom_code #en #dataset-liuhaotian/LLaVA-CC3M-Pretrain-595K #arxiv-2304.08485 #arxiv-2309.16058 #license-llama3 #autotrain_compatible #region-us \n" ]
null
peft
## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - _load_in_8bit: False - _load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float16 - load_in_4bit: True - load_in_8bit: False ### Framework versions - PEFT 0.4.0
{"library_name": "peft"}
moiseserg/llama-2-7b-datyra
null
[ "peft", "region:us" ]
null
2024-04-30T18:01:30+00:00
[]
[]
TAGS #peft #region-us
## Training procedure The following 'bitsandbytes' quantization config was used during training: - quant_method: bitsandbytes - _load_in_8bit: False - _load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float16 - load_in_4bit: True - load_in_8bit: False ### Framework versions - PEFT 0.4.0
[ "## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- _load_in_8bit: False\n- _load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: False\n- bnb_4bit_compute_dtype: float16\n- load_in_4bit: True\n- load_in_8bit: False", "### Framework versions\n\n\n- PEFT 0.4.0" ]
[ "TAGS\n#peft #region-us \n", "## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- _load_in_8bit: False\n- _load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: False\n- bnb_4bit_compute_dtype: float16\n- load_in_4bit: True\n- load_in_8bit: False", "### Framework versions\n\n\n- PEFT 0.4.0" ]
[ 8, 174, 13 ]
[ "TAGS\n#peft #region-us \n## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- quant_method: bitsandbytes\n- _load_in_8bit: False\n- _load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: False\n- bnb_4bit_compute_dtype: float16\n- load_in_4bit: True\n- load_in_8bit: False### Framework versions\n\n\n- PEFT 0.4.0" ]
null
transformers
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: --> <!-- ### vocab_type: --> weighted/imatrix quants of https://huggingface.co/DevsDoCode/Llama-3-8B-Instruct-1048k <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF/resolve/main/Llama-3-8B-Instruct-1048k.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
{"language": ["en"], "library_name": "transformers", "base_model": "DevsDoCode/Llama-3-8B-Instruct-1048k", "quantized_by": "mradermacher"}
mradermacher/Llama-3-8B-Instruct-1048k-i1-GGUF
null
[ "transformers", "gguf", "en", "base_model:DevsDoCode/Llama-3-8B-Instruct-1048k", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:01:57+00:00
[]
[ "en" ]
TAGS #transformers #gguf #en #base_model-DevsDoCode/Llama-3-8B-Instruct-1048k #endpoints_compatible #region-us
About ----- weighted/imatrix quants of URL static quants are available at URL Usage ----- If you are unsure how to use GGUF files, refer to one of TheBloke's READMEs for more details, including on how to concatenate multi-part files. Provided Quants --------------- (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): !URL And here are Artefact2's thoughts on the matter: URL FAQ / Model Request ------------------- See URL for some answers to questions you might have and/or if you want some other model quantized. Thanks ------ I thank my company, nethype GmbH, for letting me use its servers and providing upgrades to my workstation to enable this work in my free time.
[]
[ "TAGS\n#transformers #gguf #en #base_model-DevsDoCode/Llama-3-8B-Instruct-1048k #endpoints_compatible #region-us \n" ]
[ 42 ]
[ "TAGS\n#transformers #gguf #en #base_model-DevsDoCode/Llama-3-8B-Instruct-1048k #endpoints_compatible #region-us \n" ]
reinforcement-learning
sample-factory
A(n) **APPO** model trained on the **GDY-PowerGrid** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r metta-ai/baseline.v0.1.1 ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m <path.to.enjoy.module> --algo=APPO --env=GDY-PowerGrid --train_dir=./train_dir --experiment=baseline.v0.1.1 ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m <path.to.train.module> --algo=APPO --env=GDY-PowerGrid --train_dir=./train_dir --experiment=baseline.v0.1.1 --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
{"library_name": "sample-factory", "tags": ["deep-reinforcement-learning", "reinforcement-learning", "sample-factory"]}
metta-ai/baseline.v0.1.1
null
[ "sample-factory", "tensorboard", "deep-reinforcement-learning", "reinforcement-learning", "region:us" ]
null
2024-04-30T18:02:31+00:00
[]
[]
TAGS #sample-factory #tensorboard #deep-reinforcement-learning #reinforcement-learning #region-us
A(n) APPO model trained on the GDY-PowerGrid environment. This model was trained using Sample-Factory 2.0: URL Documentation for how to use Sample-Factory can be found at URL ## Downloading the model After installing Sample-Factory, download the model with: ## Using the model To run the model after download, use the 'enjoy' script corresponding to this environment: You can also upload models to the Hugging Face Hub using the same script with the '--push_to_hub' flag. See URL for more details ## Training with this model To continue training with this model, use the 'train' script corresponding to this environment: Note, you may have to adjust '--train_for_env_steps' to a suitably high number as the experiment will resume at the number of steps it concluded at.
[ "## Downloading the model\n\nAfter installing Sample-Factory, download the model with:", "## Using the model\n\nTo run the model after download, use the 'enjoy' script corresponding to this environment:\n\n\n\nYou can also upload models to the Hugging Face Hub using the same script with the '--push_to_hub' flag.\nSee URL for more details", "## Training with this model\n\nTo continue training with this model, use the 'train' script corresponding to this environment:\n\n\nNote, you may have to adjust '--train_for_env_steps' to a suitably high number as the experiment will resume at the number of steps it concluded at." ]
[ "TAGS\n#sample-factory #tensorboard #deep-reinforcement-learning #reinforcement-learning #region-us \n", "## Downloading the model\n\nAfter installing Sample-Factory, download the model with:", "## Using the model\n\nTo run the model after download, use the 'enjoy' script corresponding to this environment:\n\n\n\nYou can also upload models to the Hugging Face Hub using the same script with the '--push_to_hub' flag.\nSee URL for more details", "## Training with this model\n\nTo continue training with this model, use the 'train' script corresponding to this environment:\n\n\nNote, you may have to adjust '--train_for_env_steps' to a suitably high number as the experiment will resume at the number of steps it concluded at." ]
[ 22, 17, 57, 63 ]
[ "TAGS\n#sample-factory #tensorboard #deep-reinforcement-learning #reinforcement-learning #region-us \n## Downloading the model\n\nAfter installing Sample-Factory, download the model with:## Using the model\n\nTo run the model after download, use the 'enjoy' script corresponding to this environment:\n\n\n\nYou can also upload models to the Hugging Face Hub using the same script with the '--push_to_hub' flag.\nSee URL for more details## Training with this model\n\nTo continue training with this model, use the 'train' script corresponding to this environment:\n\n\nNote, you may have to adjust '--train_for_env_steps' to a suitably high number as the experiment will resume at the number of steps it concluded at." ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
Nyxmmz/lora
null
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:04:51+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 26, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": ["llama-factory"]}
justinj92/phi3-orpo
null
[ "transformers", "safetensors", "phi3", "text-generation", "llama-factory", "conversational", "custom_code", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us", "has_space" ]
null
2024-04-30T18:05:10+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #phi3 #text-generation #llama-factory #conversational #custom_code #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us #has_space
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #phi3 #text-generation #llama-factory #conversational #custom_code #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us #has_space \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 54, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #phi3 #text-generation #llama-factory #conversational #custom_code #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us #has_space \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
gytdau/mazegpt
null
[ "transformers", "safetensors", "gpt2", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:05:20+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #gpt2 #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #gpt2 #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 45, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #gpt2 #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
mlx
# mlx-community/Meta-Llama-3-70B-Instruct-mlx-unquantized This model was converted to MLX format from [`meta-llama/Meta-Llama-3-70B-Instruct`]() using mlx-lm version **0.12.1**. Refer to the [original model card](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) for more details on the model. ## Use with mlx ```bash pip install mlx-lm ``` ```python from mlx_lm import load, generate model, tokenizer = load("mlx-community/Meta-Llama-3-70B-Instruct-mlx-unquantized") response = generate(model, tokenizer, prompt="hello", verbose=True) ```
{"language": ["en"], "license": "other", "tags": ["facebook", "meta", "pytorch", "llama", "llama-3", "mlx"], "pipeline_tag": "text-generation", "license_name": "llama3", "license_link": "LICENSE", "extra_gated_prompt": "### META LLAMA 3 COMMUNITY LICENSE AGREEMENT\nMeta Llama 3 Version Release Date: April 18, 2024\n\"Agreement\" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein.\n\"Documentation\" means the specifications, manuals and documentation accompanying Meta Llama 3 distributed by Meta at https://llama.meta.com/get-started/.\n\"Licensee\" or \"you\" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity\u2019s behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.\n\"Meta Llama 3\" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at https://llama.meta.com/llama-downloads.\n\"Llama Materials\" means, collectively, Meta\u2019s proprietary Meta Llama 3 and Documentation (and any portion thereof) made available under this Agreement.\n\"Meta\" or \"we\" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland).\n \n1. License Rights and Redistribution.\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Meta\u2019s intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials.\nb. Redistribution and Use.\ni. If you distribute or make available the Llama Materials (or any derivative works thereof), or a product or service that uses any of them, including another AI model, you shall (A) provide a copy of this Agreement with any such Llama Materials; and (B) prominently display \u201cBuilt with Meta Llama 3\u201d on a related website, user interface, blogpost, about page, or product documentation. If you use the Llama Materials to create, train, fine tune, or otherwise improve an AI model, which is distributed or made available, you shall also include \u201cLlama 3\u201d at the beginning of any such AI model name.\nii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you.\niii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a \u201cNotice\u201d text file distributed as a part of such copies: \u201cMeta Llama 3 is licensed under the Meta Llama 3 Community License, Copyright \u00a9 Meta Platforms, Inc. All Rights Reserved.\u201d\niv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://llama.meta.com/llama3/use-policy), which is hereby incorporated by reference into this Agreement.\nv. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Meta Llama 3 or derivative works thereof).\n2. Additional Commercial Terms. If, on the Meta Llama 3 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee\u2019s affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN \u201cAS IS\u201d BASIS, WITHOUT WARRANTIES OF ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\na. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials or as set forth in this Section 5(a). Meta hereby grants you a license to use \u201cLlama 3\u201d (the \u201cMark\u201d) solely as required to comply with the last sentence of Section 1.b.i. You will comply with Meta\u2019s brand guidelines (currently accessible at https://about.meta.com/brand/resources/meta/company-brand/ ). All goodwill arising out of your use of the Mark will inure to the benefit of Meta.\nb. Subject to Meta\u2019s ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications.\nc. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Meta Llama 3 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials.\n6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement.\n7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement.\n### Meta Llama 3 Acceptable Use Policy\nMeta is committed to promoting safe and fair use of its tools and features, including Meta Llama 3. If you access or use Meta Llama 3, you agree to this Acceptable Use Policy (\u201cPolicy\u201d). The most recent copy of this policy can be found at [https://llama.meta.com/llama3/use-policy](https://llama.meta.com/llama3/use-policy)\n#### Prohibited Uses\nWe want everyone to use Meta Llama 3 safely and responsibly. You agree you will not use, or allow others to use, Meta Llama 3 to: 1. Violate the law or others\u2019 rights, including to:\n 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n 1. Violence or terrorism\n 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n 3. Human trafficking, exploitation, and sexual violence\n 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n 5. Sexual solicitation\n 6. Any other criminal activity\n 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws\n 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama Materials\n 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Meta Llama 3 related to the following:\n 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State\n 2. Guns and illegal weapons (including weapon development)\n 3. Illegal drugs and regulated/controlled substances\n 4. Operation of critical infrastructure, transportation technologies, or heavy machinery\n 5. Self-harm or harm to others, including suicide, cutting, and eating disorders\n 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Meta Llama 3 related to the following:\n 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n 3. Generating, promoting, or further distributing spam\n 4. Impersonating another individual without consent, authorization, or legal right\n 5. Representing that the use of Meta Llama 3 or outputs are human-generated\n 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement\n4. Fail to appropriately disclose to end users any known dangers of your AI system\nPlease report any violation of this Policy, software \u201cbug,\u201d or other problems that could lead to a violation of this Policy through one of the following means:\n * Reporting issues with the model: [https://github.com/meta-llama/llama3](https://github.com/meta-llama/llama3)\n * Reporting risky content generated by the model:\n developers.facebook.com/llama_output_feedback\n * Reporting bugs and security concerns: facebook.com/whitehat/info\n * Reporting violations of the Acceptable Use Policy or unlicensed uses of Meta Llama 3: [email protected]", "extra_gated_fields": {"First Name": "text", "Last Name": "text", "Date of birth": "date_picker", "Country": "country", "Affiliation": "text", "geo": "ip_location", "By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy": "checkbox"}, "extra_gated_description": "The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).", "extra_gated_button_content": "Submit", "widget": [{"example_title": "Winter holidays", "messages": [{"role": "system", "content": "You are a helpful and honest assistant. Please, respond concisely and truthfully."}, {"role": "user", "content": "Can you recommend a good destination for Winter holidays?"}]}, {"example_title": "Programming assistant", "messages": [{"role": "system", "content": "You are a helpful and honest code and programming assistant. Please, respond concisely and truthfully."}, {"role": "user", "content": "Write a function that computes the nth fibonacci number."}]}], "inference": {"parameters": {"max_new_tokens": 300, "stop": ["<|end_of_text|>", "<|eot_id|>"]}}}
mlx-community/Meta-Llama-3-70B-Instruct-mlx-unquantized
null
[ "mlx", "safetensors", "llama", "facebook", "meta", "pytorch", "llama-3", "text-generation", "conversational", "en", "license:other", "region:us" ]
null
2024-04-30T18:05:50+00:00
[]
[ "en" ]
TAGS #mlx #safetensors #llama #facebook #meta #pytorch #llama-3 #text-generation #conversational #en #license-other #region-us
# mlx-community/Meta-Llama-3-70B-Instruct-mlx-unquantized This model was converted to MLX format from ['meta-llama/Meta-Llama-3-70B-Instruct']() using mlx-lm version 0.12.1. Refer to the original model card for more details on the model. ## Use with mlx
[ "# mlx-community/Meta-Llama-3-70B-Instruct-mlx-unquantized\nThis model was converted to MLX format from ['meta-llama/Meta-Llama-3-70B-Instruct']() using mlx-lm version 0.12.1.\nRefer to the original model card for more details on the model.", "## Use with mlx" ]
[ "TAGS\n#mlx #safetensors #llama #facebook #meta #pytorch #llama-3 #text-generation #conversational #en #license-other #region-us \n", "# mlx-community/Meta-Llama-3-70B-Instruct-mlx-unquantized\nThis model was converted to MLX format from ['meta-llama/Meta-Llama-3-70B-Instruct']() using mlx-lm version 0.12.1.\nRefer to the original model card for more details on the model.", "## Use with mlx" ]
[ 42, 84, 6 ]
[ "TAGS\n#mlx #safetensors #llama #facebook #meta #pytorch #llama-3 #text-generation #conversational #en #license-other #region-us \n# mlx-community/Meta-Llama-3-70B-Instruct-mlx-unquantized\nThis model was converted to MLX format from ['meta-llama/Meta-Llama-3-70B-Instruct']() using mlx-lm version 0.12.1.\nRefer to the original model card for more details on the model.## Use with mlx" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"language": ["my"], "library_name": "transformers", "pipeline_tag": "text-generation"}
Turtle344/mistral-health-faq-v1
null
[ "transformers", "safetensors", "mistral", "text-generation", "my", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:06:36+00:00
[ "1910.09700" ]
[ "my" ]
TAGS #transformers #safetensors #mistral #text-generation #my #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #my #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 46, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #my #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # uae-climate-multi-classifier-weighted This model is a fine-tuned version of [alex-miller/ODABert](https://huggingface.co/alex-miller/ODABert) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.0936 - Accuracy: 0.9197 - F1: 0.7059 - Precision: 0.7 - Recall: 0.7119 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-06 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:| | 1.2281 | 1.0 | 246 | 1.4142 | 0.8670 | 0.0333 | 1.0 | 0.0169 | | 1.1785 | 2.0 | 492 | 1.3474 | 0.8830 | 0.3014 | 0.7857 | 0.1864 | | 1.0939 | 3.0 | 738 | 1.1641 | 0.8784 | 0.4421 | 0.5833 | 0.3559 | | 0.9503 | 4.0 | 984 | 1.1304 | 0.9083 | 0.5455 | 0.8276 | 0.4068 | | 0.8772 | 5.0 | 1230 | 1.1369 | 0.9197 | 0.6154 | 0.875 | 0.4746 | | 1.0567 | 6.0 | 1476 | 0.9456 | 0.9151 | 0.6667 | 0.7115 | 0.6271 | | 0.9089 | 7.0 | 1722 | 0.9752 | 0.9174 | 0.6727 | 0.7255 | 0.6271 | | 0.9149 | 8.0 | 1968 | 1.1084 | 0.9197 | 0.6667 | 0.7609 | 0.5932 | | 0.8285 | 9.0 | 2214 | 0.8487 | 0.9037 | 0.6818 | 0.6164 | 0.7627 | | 0.8322 | 10.0 | 2460 | 1.1982 | 0.9174 | 0.6471 | 0.7674 | 0.5593 | | 0.7955 | 11.0 | 2706 | 1.0507 | 0.9174 | 0.6667 | 0.7347 | 0.6102 | | 0.7942 | 12.0 | 2952 | 0.9965 | 0.9197 | 0.7059 | 0.7 | 0.7119 | | 0.7152 | 13.0 | 3198 | 1.0222 | 0.9197 | 0.7009 | 0.7069 | 0.6949 | | 0.7015 | 14.0 | 3444 | 0.9579 | 0.9197 | 0.7244 | 0.6765 | 0.7797 | | 0.5537 | 15.0 | 3690 | 1.0659 | 0.9220 | 0.7069 | 0.7193 | 0.6949 | | 0.6379 | 16.0 | 3936 | 1.0921 | 0.9197 | 0.6957 | 0.7143 | 0.6780 | | 0.5977 | 17.0 | 4182 | 1.0491 | 0.9220 | 0.7213 | 0.6984 | 0.7458 | | 0.6792 | 18.0 | 4428 | 1.0872 | 0.9197 | 0.7059 | 0.7 | 0.7119 | | 0.5509 | 19.0 | 4674 | 1.0763 | 0.9220 | 0.7167 | 0.7049 | 0.7288 | | 0.579 | 20.0 | 4920 | 1.0936 | 0.9197 | 0.7059 | 0.7 | 0.7119 | ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.2+cu121 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy", "f1", "precision", "recall"], "base_model": "alex-miller/ODABert", "model-index": [{"name": "uae-climate-multi-classifier-weighted", "results": []}]}
alex-miller/uae-climate-multi-classifier-weighted
null
[ "transformers", "safetensors", "bert", "text-classification", "generated_from_trainer", "base_model:alex-miller/ODABert", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:06:49+00:00
[]
[]
TAGS #transformers #safetensors #bert #text-classification #generated_from_trainer #base_model-alex-miller/ODABert #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
uae-climate-multi-classifier-weighted ===================================== This model is a fine-tuned version of alex-miller/ODABert on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 1.0936 * Accuracy: 0.9197 * F1: 0.7059 * Precision: 0.7 * Recall: 0.7119 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 1e-06 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 20 ### Training results ### Framework versions * Transformers 4.39.3 * Pytorch 2.2.2+cu121 * Datasets 2.18.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-06\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 20", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.3\n* Pytorch 2.2.2+cu121\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #bert #text-classification #generated_from_trainer #base_model-alex-miller/ODABert #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-06\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 20", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.3\n* Pytorch 2.2.2+cu121\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
[ 53, 101, 5, 44 ]
[ "TAGS\n#transformers #safetensors #bert #text-classification #generated_from_trainer #base_model-alex-miller/ODABert #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-06\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 20### Training results### Framework versions\n\n\n* Transformers 4.39.3\n* Pytorch 2.2.2+cu121\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/mizoru/ORD/runs/t813gl5z) # Whisper Small Ru ORD 0.7 PEFT LoRA - Mizoru This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the ORD_0.7 dataset. It achieves the following results on the evaluation set: - eval_loss: 1.3021 - eval_wer: 60.8525 - eval_cer: 33.9271 - eval_clean_wer: 53.2332 - eval_clean_cer: 28.0130 - eval_runtime: 1732.3102 - eval_samples_per_second: 0.451 - eval_steps_per_second: 0.028 - epoch: 7.0 - step: 1372 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 50 - training_steps: 2000 - mixed_precision_training: Native AMP ### Framework versions - PEFT 0.10.1.dev0 - Transformers 4.41.0.dev0 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.19.1
{"language": ["ru"], "license": "apache-2.0", "library_name": "peft", "tags": ["generated_from_trainer"], "base_model": "openai/whisper-small", "model-index": [{"name": "Whisper Small Ru ORD 0.7 PEFT LoRA - Mizoru ", "results": []}]}
mizoru/whisper-small-ru-ORD_0.7_peft_0.3
null
[ "peft", "safetensors", "generated_from_trainer", "ru", "base_model:openai/whisper-small", "license:apache-2.0", "region:us" ]
null
2024-04-30T18:06:59+00:00
[]
[ "ru" ]
TAGS #peft #safetensors #generated_from_trainer #ru #base_model-openai/whisper-small #license-apache-2.0 #region-us
<img src="URL alt="Visualize in Weights & Biases" width="200" height="32"/> # Whisper Small Ru ORD 0.7 PEFT LoRA - Mizoru This model is a fine-tuned version of openai/whisper-small on the ORD_0.7 dataset. It achieves the following results on the evaluation set: - eval_loss: 1.3021 - eval_wer: 60.8525 - eval_cer: 33.9271 - eval_clean_wer: 53.2332 - eval_clean_cer: 28.0130 - eval_runtime: 1732.3102 - eval_samples_per_second: 0.451 - eval_steps_per_second: 0.028 - epoch: 7.0 - step: 1372 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 50 - training_steps: 2000 - mixed_precision_training: Native AMP ### Framework versions - PEFT 0.10.1.dev0 - Transformers 4.41.0.dev0 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.19.1
[ "# Whisper Small Ru ORD 0.7 PEFT LoRA - Mizoru \n\nThis model is a fine-tuned version of openai/whisper-small on the ORD_0.7 dataset.\nIt achieves the following results on the evaluation set:\n- eval_loss: 1.3021\n- eval_wer: 60.8525\n- eval_cer: 33.9271\n- eval_clean_wer: 53.2332\n- eval_clean_cer: 28.0130\n- eval_runtime: 1732.3102\n- eval_samples_per_second: 0.451\n- eval_steps_per_second: 0.028\n- epoch: 7.0\n- step: 1372", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.001\n- train_batch_size: 16\n- eval_batch_size: 16\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_steps: 50\n- training_steps: 2000\n- mixed_precision_training: Native AMP", "### Framework versions\n\n- PEFT 0.10.1.dev0\n- Transformers 4.41.0.dev0\n- Pytorch 2.1.2\n- Datasets 2.18.0\n- Tokenizers 0.19.1" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #ru #base_model-openai/whisper-small #license-apache-2.0 #region-us \n", "# Whisper Small Ru ORD 0.7 PEFT LoRA - Mizoru \n\nThis model is a fine-tuned version of openai/whisper-small on the ORD_0.7 dataset.\nIt achieves the following results on the evaluation set:\n- eval_loss: 1.3021\n- eval_wer: 60.8525\n- eval_cer: 33.9271\n- eval_clean_wer: 53.2332\n- eval_clean_cer: 28.0130\n- eval_runtime: 1732.3102\n- eval_samples_per_second: 0.451\n- eval_steps_per_second: 0.028\n- epoch: 7.0\n- step: 1372", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.001\n- train_batch_size: 16\n- eval_batch_size: 16\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_steps: 50\n- training_steps: 2000\n- mixed_precision_training: Native AMP", "### Framework versions\n\n- PEFT 0.10.1.dev0\n- Transformers 4.41.0.dev0\n- Pytorch 2.1.2\n- Datasets 2.18.0\n- Tokenizers 0.19.1" ]
[ 39, 162, 7, 9, 9, 4, 112, 54 ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #ru #base_model-openai/whisper-small #license-apache-2.0 #region-us \n# Whisper Small Ru ORD 0.7 PEFT LoRA - Mizoru \n\nThis model is a fine-tuned version of openai/whisper-small on the ORD_0.7 dataset.\nIt achieves the following results on the evaluation set:\n- eval_loss: 1.3021\n- eval_wer: 60.8525\n- eval_cer: 33.9271\n- eval_clean_wer: 53.2332\n- eval_clean_cer: 28.0130\n- eval_runtime: 1732.3102\n- eval_samples_per_second: 0.451\n- eval_steps_per_second: 0.028\n- epoch: 7.0\n- step: 1372## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.001\n- train_batch_size: 16\n- eval_batch_size: 16\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_steps: 50\n- training_steps: 2000\n- mixed_precision_training: Native AMP### Framework versions\n\n- PEFT 0.10.1.dev0\n- Transformers 4.41.0.dev0\n- Pytorch 2.1.2\n- Datasets 2.18.0\n- Tokenizers 0.19.1" ]
text-generation
transformers
# GreenBit LLMs This is GreenBitAI's pretrained **low-bit** LLMs with extreme compression yet still strong performance. Please refer to our [Github page](https://github.com/GreenBitAI/green-bit-llm) for the code to run the model and more information.
{"license": "apache-2.0"}
GreenBitAI/Llama-3-70B-instruct-layer-mix-bpw-4.0
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:08:55+00:00
[]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# GreenBit LLMs This is GreenBitAI's pretrained low-bit LLMs with extreme compression yet still strong performance. Please refer to our Github page for the code to run the model and more information.
[ "# GreenBit LLMs\n\nThis is GreenBitAI's pretrained low-bit LLMs with extreme compression yet still strong performance.\n\nPlease refer to our Github page for the code to run the model and more information." ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# GreenBit LLMs\n\nThis is GreenBitAI's pretrained low-bit LLMs with extreme compression yet still strong performance.\n\nPlease refer to our Github page for the code to run the model and more information." ]
[ 45, 47 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# GreenBit LLMs\n\nThis is GreenBitAI's pretrained low-bit LLMs with extreme compression yet still strong performance.\n\nPlease refer to our Github page for the code to run the model and more information." ]
token-classification
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
matthewleechen/test_ner_patentspecs
null
[ "transformers", "safetensors", "bert", "token-classification", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:09:45+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #bert #token-classification #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #bert #token-classification #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 37, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #bert #token-classification #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
SeemalT/gemma2b-finetuned
null
[ "transformers", "safetensors", "gemma", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:11:36+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #gemma #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #gemma #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 46, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #gemma #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # eurus-7b-cost-UC-5e-7 This model is a fine-tuned version of [openbmb/Eurus-7b-sft](https://huggingface.co/openbmb/Eurus-7b-sft) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.6920 - Rewards/chosen: -0.5152 - Rewards/rejected: -0.5522 - Rewards/accuracies: 0.5560 - Rewards/margins: 0.0369 - Rewards/margins Max: 0.5671 - Rewards/margins Min: -0.4943 - Rewards/margins Std: 0.3483 - Logps/rejected: -312.9047 - Logps/chosen: -326.6244 - Logits/rejected: -2.1863 - Logits/chosen: -2.3047 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-07 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - gradient_accumulation_steps: 4 - total_train_batch_size: 16 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Rewards/margins Max | Rewards/margins Min | Rewards/margins Std | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen | |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:-------------------:|:-------------------:|:-------------------:|:--------------:|:------------:|:---------------:|:-------------:| | 0.3977 | 1.0 | 968 | 0.6920 | -0.5152 | -0.5522 | 0.5560 | 0.0369 | 0.5671 | -0.4943 | 0.3483 | -312.9047 | -326.6244 | -2.1863 | -2.3047 | ### Framework versions - PEFT 0.7.1 - Transformers 4.39.0.dev0 - Pytorch 2.1.2+cu121 - Datasets 2.14.6 - Tokenizers 0.15.2
{"license": "apache-2.0", "library_name": "peft", "tags": ["trl", "dpo", "generated_from_trainer"], "base_model": "openbmb/Eurus-7b-sft", "model-index": [{"name": "eurus-7b-cost-UC-5e-7", "results": []}]}
just1nseo/eurus-7b-cost-UC-5e-7
null
[ "peft", "safetensors", "trl", "dpo", "generated_from_trainer", "base_model:openbmb/Eurus-7b-sft", "license:apache-2.0", "region:us" ]
null
2024-04-30T18:11:52+00:00
[]
[]
TAGS #peft #safetensors #trl #dpo #generated_from_trainer #base_model-openbmb/Eurus-7b-sft #license-apache-2.0 #region-us
eurus-7b-cost-UC-5e-7 ===================== This model is a fine-tuned version of openbmb/Eurus-7b-sft on the None dataset. It achieves the following results on the evaluation set: * Loss: 0.6920 * Rewards/chosen: -0.5152 * Rewards/rejected: -0.5522 * Rewards/accuracies: 0.5560 * Rewards/margins: 0.0369 * Rewards/margins Max: 0.5671 * Rewards/margins Min: -0.4943 * Rewards/margins Std: 0.3483 * Logps/rejected: -312.9047 * Logps/chosen: -326.6244 * Logits/rejected: -2.1863 * Logits/chosen: -2.3047 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 5e-07 * train\_batch\_size: 2 * eval\_batch\_size: 8 * seed: 42 * distributed\_type: multi-GPU * num\_devices: 2 * gradient\_accumulation\_steps: 4 * total\_train\_batch\_size: 16 * total\_eval\_batch\_size: 16 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: cosine * lr\_scheduler\_warmup\_ratio: 0.1 * num\_epochs: 1 ### Training results ### Framework versions * PEFT 0.7.1 * Transformers 4.39.0.dev0 * Pytorch 2.1.2+cu121 * Datasets 2.14.6 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-07\n* train\\_batch\\_size: 2\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 2\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 16\n* total\\_eval\\_batch\\_size: 16\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* PEFT 0.7.1\n* Transformers 4.39.0.dev0\n* Pytorch 2.1.2+cu121\n* Datasets 2.14.6\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #trl #dpo #generated_from_trainer #base_model-openbmb/Eurus-7b-sft #license-apache-2.0 #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-07\n* train\\_batch\\_size: 2\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 2\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 16\n* total\\_eval\\_batch\\_size: 16\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* PEFT 0.7.1\n* Transformers 4.39.0.dev0\n* Pytorch 2.1.2+cu121\n* Datasets 2.14.6\n* Tokenizers 0.15.2" ]
[ 49, 176, 5, 55 ]
[ "TAGS\n#peft #safetensors #trl #dpo #generated_from_trainer #base_model-openbmb/Eurus-7b-sft #license-apache-2.0 #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-07\n* train\\_batch\\_size: 2\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 2\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 16\n* total\\_eval\\_batch\\_size: 16\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 1### Training results### Framework versions\n\n\n* PEFT 0.7.1\n* Transformers 4.39.0.dev0\n* Pytorch 2.1.2+cu121\n* Datasets 2.14.6\n* Tokenizers 0.15.2" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
hyojin99/whisper-medium
null
[ "transformers", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:13:12+00:00
[ "1910.09700" ]
[]
TAGS #transformers #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 22, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-3 This model is a fine-tuned version of [EleutherAI/pythia-70m](https://huggingface.co/EleutherAI/pythia-70m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 3 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "base_model": "EleutherAI/pythia-70m", "model-index": [{"name": "robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-3", "results": []}]}
AlignmentResearch/robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-3
null
[ "transformers", "tensorboard", "safetensors", "gpt_neox", "text-classification", "generated_from_trainer", "base_model:EleutherAI/pythia-70m", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:14:31+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-70m #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-3 This model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 3 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-3\n\nThis model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 3\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-70m #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-3\n\nThis model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 3\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ 70, 58, 7, 9, 9, 4, 93, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-70m #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-3\n\nThis model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 3\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-2 This model is a fine-tuned version of [EleutherAI/pythia-70m](https://huggingface.co/EleutherAI/pythia-70m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "base_model": "EleutherAI/pythia-70m", "model-index": [{"name": "robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-2", "results": []}]}
AlignmentResearch/robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-2
null
[ "transformers", "tensorboard", "safetensors", "gpt_neox", "text-classification", "generated_from_trainer", "base_model:EleutherAI/pythia-70m", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:14:34+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-70m #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-2 This model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-2\n\nThis model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-70m #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-2\n\nThis model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ 70, 58, 7, 9, 9, 4, 93, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-70m #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-2\n\nThis model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
token-classification
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
nlpproject/NER_distilBERT
null
[ "transformers", "safetensors", "distilbert", "token-classification", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:15:47+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #distilbert #token-classification #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #distilbert #token-classification #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 39, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #distilbert #token-classification #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # adapter_ThaiSC_LLM_Scamper This model is a fine-tuned version of [openthaigpt/openthaigpt-1.0.0-7b-chat](https://huggingface.co/openthaigpt/openthaigpt-1.0.0-7b-chat) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 8e-05 - train_batch_size: 8 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.03 - num_epochs: 3 ### Training results ### Framework versions - PEFT 0.8.2 - Transformers 4.38.0 - Pytorch 2.2.1+cu121 - Datasets 2.17.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "library_name": "peft", "tags": ["trl", "sft", "generated_from_trainer"], "base_model": "openthaigpt/openthaigpt-1.0.0-7b-chat", "model-index": [{"name": "adapter_ThaiSC_LLM_Scamper", "results": []}]}
SuperkingbasSKB/adapter_ThaiSC_LLM_Scamper
null
[ "peft", "tensorboard", "safetensors", "trl", "sft", "generated_from_trainer", "base_model:openthaigpt/openthaigpt-1.0.0-7b-chat", "license:apache-2.0", "region:us" ]
null
2024-04-30T18:16:35+00:00
[]
[]
TAGS #peft #tensorboard #safetensors #trl #sft #generated_from_trainer #base_model-openthaigpt/openthaigpt-1.0.0-7b-chat #license-apache-2.0 #region-us
# adapter_ThaiSC_LLM_Scamper This model is a fine-tuned version of openthaigpt/openthaigpt-1.0.0-7b-chat on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 8e-05 - train_batch_size: 8 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.03 - num_epochs: 3 ### Training results ### Framework versions - PEFT 0.8.2 - Transformers 4.38.0 - Pytorch 2.2.1+cu121 - Datasets 2.17.0 - Tokenizers 0.15.2
[ "# adapter_ThaiSC_LLM_Scamper\n\nThis model is a fine-tuned version of openthaigpt/openthaigpt-1.0.0-7b-chat on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 8e-05\n- train_batch_size: 8\n- eval_batch_size: 4\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_ratio: 0.03\n- num_epochs: 3", "### Training results", "### Framework versions\n\n- PEFT 0.8.2\n- Transformers 4.38.0\n- Pytorch 2.2.1+cu121\n- Datasets 2.17.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#peft #tensorboard #safetensors #trl #sft #generated_from_trainer #base_model-openthaigpt/openthaigpt-1.0.0-7b-chat #license-apache-2.0 #region-us \n", "# adapter_ThaiSC_LLM_Scamper\n\nThis model is a fine-tuned version of openthaigpt/openthaigpt-1.0.0-7b-chat on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 8e-05\n- train_batch_size: 8\n- eval_batch_size: 4\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_ratio: 0.03\n- num_epochs: 3", "### Training results", "### Framework versions\n\n- PEFT 0.8.2\n- Transformers 4.38.0\n- Pytorch 2.2.1+cu121\n- Datasets 2.17.0\n- Tokenizers 0.15.2" ]
[ 60, 48, 7, 9, 9, 4, 108, 5, 52 ]
[ "TAGS\n#peft #tensorboard #safetensors #trl #sft #generated_from_trainer #base_model-openthaigpt/openthaigpt-1.0.0-7b-chat #license-apache-2.0 #region-us \n# adapter_ThaiSC_LLM_Scamper\n\nThis model is a fine-tuned version of openthaigpt/openthaigpt-1.0.0-7b-chat on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 8e-05\n- train_batch_size: 8\n- eval_batch_size: 4\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_ratio: 0.03\n- num_epochs: 3### Training results### Framework versions\n\n- PEFT 0.8.2\n- Transformers 4.38.0\n- Pytorch 2.2.1+cu121\n- Datasets 2.17.0\n- Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # eurus-7b-cost-UI-UC-5e-7 This model is a fine-tuned version of [openbmb/Eurus-7b-sft](https://huggingface.co/openbmb/Eurus-7b-sft) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.7012 - Rewards/chosen: -0.8455 - Rewards/rejected: -0.9219 - Rewards/accuracies: 0.5507 - Rewards/margins: 0.0764 - Rewards/margins Max: 0.9500 - Rewards/margins Min: -0.7111 - Rewards/margins Std: 0.5453 - Logps/rejected: -358.1681 - Logps/chosen: -363.9157 - Logits/rejected: -2.1126 - Logits/chosen: -2.2173 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-07 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - gradient_accumulation_steps: 4 - total_train_batch_size: 16 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Rewards/margins Max | Rewards/margins Min | Rewards/margins Std | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen | |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:-------------------:|:-------------------:|:-------------------:|:--------------:|:------------:|:---------------:|:-------------:| | 0.296 | 1.0 | 2616 | 0.7012 | -0.8455 | -0.9219 | 0.5507 | 0.0764 | 0.9500 | -0.7111 | 0.5453 | -358.1681 | -363.9157 | -2.1126 | -2.2173 | ### Framework versions - PEFT 0.7.1 - Transformers 4.39.0.dev0 - Pytorch 2.1.2+cu121 - Datasets 2.14.6 - Tokenizers 0.15.2
{"license": "apache-2.0", "library_name": "peft", "tags": ["trl", "dpo", "generated_from_trainer"], "base_model": "openbmb/Eurus-7b-sft", "model-index": [{"name": "eurus-7b-cost-UI-UC-5e-7", "results": []}]}
just1nseo/eurus-7b-cost-UI-UC-5e-7
null
[ "peft", "safetensors", "mistral", "trl", "dpo", "generated_from_trainer", "base_model:openbmb/Eurus-7b-sft", "license:apache-2.0", "4-bit", "region:us" ]
null
2024-04-30T18:18:30+00:00
[]
[]
TAGS #peft #safetensors #mistral #trl #dpo #generated_from_trainer #base_model-openbmb/Eurus-7b-sft #license-apache-2.0 #4-bit #region-us
eurus-7b-cost-UI-UC-5e-7 ======================== This model is a fine-tuned version of openbmb/Eurus-7b-sft on the None dataset. It achieves the following results on the evaluation set: * Loss: 0.7012 * Rewards/chosen: -0.8455 * Rewards/rejected: -0.9219 * Rewards/accuracies: 0.5507 * Rewards/margins: 0.0764 * Rewards/margins Max: 0.9500 * Rewards/margins Min: -0.7111 * Rewards/margins Std: 0.5453 * Logps/rejected: -358.1681 * Logps/chosen: -363.9157 * Logits/rejected: -2.1126 * Logits/chosen: -2.2173 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 5e-07 * train\_batch\_size: 2 * eval\_batch\_size: 8 * seed: 42 * distributed\_type: multi-GPU * num\_devices: 2 * gradient\_accumulation\_steps: 4 * total\_train\_batch\_size: 16 * total\_eval\_batch\_size: 16 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: cosine * lr\_scheduler\_warmup\_ratio: 0.1 * num\_epochs: 1 ### Training results ### Framework versions * PEFT 0.7.1 * Transformers 4.39.0.dev0 * Pytorch 2.1.2+cu121 * Datasets 2.14.6 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-07\n* train\\_batch\\_size: 2\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 2\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 16\n* total\\_eval\\_batch\\_size: 16\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* PEFT 0.7.1\n* Transformers 4.39.0.dev0\n* Pytorch 2.1.2+cu121\n* Datasets 2.14.6\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #mistral #trl #dpo #generated_from_trainer #base_model-openbmb/Eurus-7b-sft #license-apache-2.0 #4-bit #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-07\n* train\\_batch\\_size: 2\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 2\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 16\n* total\\_eval\\_batch\\_size: 16\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* PEFT 0.7.1\n* Transformers 4.39.0.dev0\n* Pytorch 2.1.2+cu121\n* Datasets 2.14.6\n* Tokenizers 0.15.2" ]
[ 56, 176, 5, 55 ]
[ "TAGS\n#peft #safetensors #mistral #trl #dpo #generated_from_trainer #base_model-openbmb/Eurus-7b-sft #license-apache-2.0 #4-bit #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-07\n* train\\_batch\\_size: 2\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 2\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 16\n* total\\_eval\\_batch\\_size: 16\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 1### Training results### Framework versions\n\n\n* PEFT 0.7.1\n* Transformers 4.39.0.dev0\n* Pytorch 2.1.2+cu121\n* Datasets 2.14.6\n* Tokenizers 0.15.2" ]
null
transformers
# Uploaded model - **Developed by:** zeeshanali01 - **License:** apache-2.0 - **Finetuned from model :** unsloth/Phi-3-mini-4k-instruct-bnb-4bit This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "mistral", "trl"], "base_model": "unsloth/Phi-3-mini-4k-instruct-bnb-4bit"}
zeeshanali01/lora_model
null
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "mistral", "trl", "en", "base_model:unsloth/Phi-3-mini-4k-instruct-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:20:08+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #text-generation-inference #unsloth #mistral #trl #en #base_model-unsloth/Phi-3-mini-4k-instruct-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: zeeshanali01 - License: apache-2.0 - Finetuned from model : unsloth/Phi-3-mini-4k-instruct-bnb-4bit This mistral model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: zeeshanali01\n- License: apache-2.0\n- Finetuned from model : unsloth/Phi-3-mini-4k-instruct-bnb-4bit\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #mistral #trl #en #base_model-unsloth/Phi-3-mini-4k-instruct-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: zeeshanali01\n- License: apache-2.0\n- Finetuned from model : unsloth/Phi-3-mini-4k-instruct-bnb-4bit\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ 68, 85 ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #mistral #trl #en #base_model-unsloth/Phi-3-mini-4k-instruct-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us \n# Uploaded model\n\n- Developed by: zeeshanali01\n- License: apache-2.0\n- Finetuned from model : unsloth/Phi-3-mini-4k-instruct-bnb-4bit\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
feature-extraction
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
claudios/unixcoder-base-nine
null
[ "transformers", "safetensors", "roberta", "feature-extraction", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:20:26+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #roberta #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #roberta #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 32, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #roberta #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
null
# Model: Grain Classification Model ### Description: This model is design to classify 4 different types of grains: noodles, rice, couscous, and oatmeal by using the FastAI library with Fastbook and ResNet-18 convolutional neural network architecture. <br> The model files can be found in the "Files and Versions" section. ### Training Data - Limited dataset of 400 labeled images of grains, with 100 images for each category. - Obtained using DuckDuckGo Images API - Resized to 128x128 pixels to reduce storage and computation usage. - 3 additional training epochs are performed to fine-tune the model for grain classification task. ### Metrics Model performance are evaluated using confusion matrix. Confusion matrix measures the metric of accuracy and precision. The prediction is in 4 categories: - True positive: Model correctly predicted positive class. - False Positive: Model incorrectly predicted when actual is negative class. - True Negative: Model correctly predict negative class. - False Negative: Model incorrectly predict negative class when actual is positive. ### Results In the images below, you can see that there are different shades. - The darker shade represents correct predictions - The lighter shade represents incorrect predictions. <table> <tr> <td>First training:</td> <td>Second training:</td> </tr> <tr> <td><img src="1GrainFirstTraining.png" alt="First training" width="300"/></td> <td><img src="2GrainRetrain.png" alt="Second training" width="300"/></td> </tr> </table> We can see that the model has improved after cleaning the data. - it improved in predicting the images correctly. - there is better accuracy in the second training compared to the first training. The number of darker shades is higher. - there is less confusion in the second training compared to the first training. The number of lighter shades is lower.
{}
jqjacq/Grain-Detection
null
[ "region:us" ]
null
2024-04-30T18:20:49+00:00
[]
[]
TAGS #region-us
Model: Grain Classification Model ================================= ### Description: This model is design to classify 4 different types of grains: noodles, rice, couscous, and oatmeal by using the FastAI library with Fastbook and ResNet-18 convolutional neural network architecture. The model files can be found in the "Files and Versions" section. ### Training Data * Limited dataset of 400 labeled images of grains, with 100 images for each category. * Obtained using DuckDuckGo Images API * Resized to 128x128 pixels to reduce storage and computation usage. * 3 additional training epochs are performed to fine-tune the model for grain classification task. ### Metrics Model performance are evaluated using confusion matrix. Confusion matrix measures the metric of accuracy and precision. The prediction is in 4 categories: * True positive: Model correctly predicted positive class. * False Positive: Model incorrectly predicted when actual is negative class. * True Negative: Model correctly predict negative class. * False Negative: Model incorrectly predict negative class when actual is positive. ### Results In the images below, you can see that there are different shades. * The darker shade represents correct predictions * The lighter shade represents incorrect predictions. We can see that the model has improved after cleaning the data. * it improved in predicting the images correctly. * there is better accuracy in the second training compared to the first training. The number of darker shades is higher. * there is less confusion in the second training compared to the first training. The number of lighter shades is lower.
[ "### Description:\n\n\nThis model is design to classify 4 different types of grains: noodles, rice, couscous, and oatmeal by using the FastAI library with Fastbook and ResNet-18 convolutional neural network architecture.\n \n\nThe model files can be found in the \"Files and Versions\" section.", "### Training Data\n\n\n* Limited dataset of 400 labeled images of grains, with 100 images for each category.\n* Obtained using DuckDuckGo Images API\n* Resized to 128x128 pixels to reduce storage and computation usage.\n* 3 additional training epochs are performed to fine-tune the model for grain classification task.", "### Metrics\n\n\nModel performance are evaluated using confusion matrix. Confusion matrix measures the metric of accuracy and precision. The prediction is in 4 categories:\n\n\n* True positive: Model correctly predicted positive class.\n* False Positive: Model incorrectly predicted when actual is negative class.\n* True Negative: Model correctly predict negative class.\n* False Negative: Model incorrectly predict negative class when actual is positive.", "### Results\n\n\nIn the images below, you can see that there are different shades.\n\n\n* The darker shade represents correct predictions\n* The lighter shade represents incorrect predictions.\n\n\n\nWe can see that the model has improved after cleaning the data.\n\n\n* it improved in predicting the images correctly.\n* there is better accuracy in the second training compared to the first training. The number of darker shades is higher.\n* there is less confusion in the second training compared to the first training. The number of lighter shades is lower." ]
[ "TAGS\n#region-us \n", "### Description:\n\n\nThis model is design to classify 4 different types of grains: noodles, rice, couscous, and oatmeal by using the FastAI library with Fastbook and ResNet-18 convolutional neural network architecture.\n \n\nThe model files can be found in the \"Files and Versions\" section.", "### Training Data\n\n\n* Limited dataset of 400 labeled images of grains, with 100 images for each category.\n* Obtained using DuckDuckGo Images API\n* Resized to 128x128 pixels to reduce storage and computation usage.\n* 3 additional training epochs are performed to fine-tune the model for grain classification task.", "### Metrics\n\n\nModel performance are evaluated using confusion matrix. Confusion matrix measures the metric of accuracy and precision. The prediction is in 4 categories:\n\n\n* True positive: Model correctly predicted positive class.\n* False Positive: Model incorrectly predicted when actual is negative class.\n* True Negative: Model correctly predict negative class.\n* False Negative: Model incorrectly predict negative class when actual is positive.", "### Results\n\n\nIn the images below, you can see that there are different shades.\n\n\n* The darker shade represents correct predictions\n* The lighter shade represents incorrect predictions.\n\n\n\nWe can see that the model has improved after cleaning the data.\n\n\n* it improved in predicting the images correctly.\n* there is better accuracy in the second training compared to the first training. The number of darker shades is higher.\n* there is less confusion in the second training compared to the first training. The number of lighter shades is lower." ]
[ 5, 68, 67, 77, 101 ]
[ "TAGS\n#region-us \n### Description:\n\n\nThis model is design to classify 4 different types of grains: noodles, rice, couscous, and oatmeal by using the FastAI library with Fastbook and ResNet-18 convolutional neural network architecture.\n \n\nThe model files can be found in the \"Files and Versions\" section.### Training Data\n\n\n* Limited dataset of 400 labeled images of grains, with 100 images for each category.\n* Obtained using DuckDuckGo Images API\n* Resized to 128x128 pixels to reduce storage and computation usage.\n* 3 additional training epochs are performed to fine-tune the model for grain classification task.### Metrics\n\n\nModel performance are evaluated using confusion matrix. Confusion matrix measures the metric of accuracy and precision. The prediction is in 4 categories:\n\n\n* True positive: Model correctly predicted positive class.\n* False Positive: Model incorrectly predicted when actual is negative class.\n* True Negative: Model correctly predict negative class.\n* False Negative: Model incorrectly predict negative class when actual is positive.### Results\n\n\nIn the images below, you can see that there are different shades.\n\n\n* The darker shade represents correct predictions\n* The lighter shade represents incorrect predictions.\n\n\n\nWe can see that the model has improved after cleaning the data.\n\n\n* it improved in predicting the images correctly.\n* there is better accuracy in the second training compared to the first training. The number of darker shades is higher.\n* there is less confusion in the second training compared to the first training. The number of lighter shades is lower." ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-3 This model is a fine-tuned version of [EleutherAI/pythia-31m](https://huggingface.co/EleutherAI/pythia-31m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 3 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
{"tags": ["generated_from_trainer"], "base_model": "EleutherAI/pythia-31m", "model-index": [{"name": "robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-3", "results": []}]}
AlignmentResearch/robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-3
null
[ "transformers", "tensorboard", "safetensors", "gpt_neox", "text-classification", "generated_from_trainer", "base_model:EleutherAI/pythia-31m", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:21:09+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-31m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-3 This model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 3 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-3\n\nThis model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 3\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-31m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-3\n\nThis model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 3\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ 62, 58, 7, 9, 9, 4, 93, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-31m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-3\n\nThis model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 3\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-4 This model is a fine-tuned version of [EleutherAI/pythia-31m](https://huggingface.co/EleutherAI/pythia-31m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
{"tags": ["generated_from_trainer"], "base_model": "EleutherAI/pythia-31m", "model-index": [{"name": "robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-4", "results": []}]}
AlignmentResearch/robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-4
null
[ "transformers", "tensorboard", "safetensors", "gpt_neox", "text-classification", "generated_from_trainer", "base_model:EleutherAI/pythia-31m", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:21:58+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-31m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-4 This model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-4\n\nThis model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 4\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-31m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-4\n\nThis model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 4\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ 62, 58, 7, 9, 9, 4, 93, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-31m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-4\n\nThis model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 4\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
pvyas96/mistral_7b_med
null
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:22:50+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 26, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
transformers
This model is a proof of concept where regular LLM are finetuned to anonimise data. Using a systemprompt: ```` Anonymize : ```` and a user prompt the LLM will try to replace sensitive data by the character "^" # Targets for the anonimisation The model should be able to recognise: * Addresses * emails * phone numbers * Religious events * Gendre * IBAN numbers * Social security numbers * Phone numbers * Bban numbers * swift numbers * a small size of company * credit cards * passports * name # Example #### Input My name is Mike Smith I live in Texas in the Street Rosenberg, Texas(TX), 77471. You can contact me over at [email protected] or via phone +13548783461. #### Expected output My name is ^^^^ ^^^^^ I live in Texas in the Street ^^^^^^^^^, ^^^^^^^^^, ^^^^^. You can contact me over at ^^^^@^^^^^.com or via phone +^^^^^^^^^^^.
{}
BalGu/mistral-7b-anonimiser
null
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:23:55+00:00
[]
[]
TAGS #transformers #safetensors #mistral #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
This model is a proof of concept where regular LLM are finetuned to anonimise data. Using a systemprompt: ' and a user prompt the LLM will try to replace sensitive data by the character "^" # Targets for the anonimisation The model should be able to recognise: * Addresses * emails * phone numbers * Religious events * Gendre * IBAN numbers * Social security numbers * Phone numbers * Bban numbers * swift numbers * a small size of company * credit cards * passports * name # Example #### Input My name is Mike Smith I live in Texas in the Street Rosenberg, Texas(TX), 77471. You can contact me over at mike@URL or via phone +13548783461. #### Expected output My name is ^^^^ ^^^^^ I live in Texas in the Street ^^^^^^^^^, ^^^^^^^^^, ^^^^^. You can contact me over at ^^^^@^^^^^.com or via phone +^^^^^^^^^^^.
[ "# Targets for the anonimisation\nThe model should be able to recognise:\n* Addresses\n* emails\n* phone numbers\n* Religious events\n* Gendre\n* IBAN numbers\n* Social security numbers\n* Phone numbers\n* Bban numbers\n* swift numbers\n* a small size of company\n* credit cards\n* passports\n* name", "# Example", "#### Input\nMy name is Mike Smith I live in Texas in the Street Rosenberg, Texas(TX), 77471. You can contact me over at mike@URL or via phone +13548783461.", "#### Expected output\nMy name is ^^^^ ^^^^^ I live in Texas in the Street ^^^^^^^^^, ^^^^^^^^^, ^^^^^. You can contact me over at ^^^^@^^^^^.com or via phone +^^^^^^^^^^^." ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Targets for the anonimisation\nThe model should be able to recognise:\n* Addresses\n* emails\n* phone numbers\n* Religious events\n* Gendre\n* IBAN numbers\n* Social security numbers\n* Phone numbers\n* Bban numbers\n* swift numbers\n* a small size of company\n* credit cards\n* passports\n* name", "# Example", "#### Input\nMy name is Mike Smith I live in Texas in the Street Rosenberg, Texas(TX), 77471. You can contact me over at mike@URL or via phone +13548783461.", "#### Expected output\nMy name is ^^^^ ^^^^^ I live in Texas in the Street ^^^^^^^^^, ^^^^^^^^^, ^^^^^. You can contact me over at ^^^^@^^^^^.com or via phone +^^^^^^^^^^^." ]
[ 37, 60, 2, 50, 85 ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Targets for the anonimisation\nThe model should be able to recognise:\n* Addresses\n* emails\n* phone numbers\n* Religious events\n* Gendre\n* IBAN numbers\n* Social security numbers\n* Phone numbers\n* Bban numbers\n* swift numbers\n* a small size of company\n* credit cards\n* passports\n* name# Example#### Input\nMy name is Mike Smith I live in Texas in the Street Rosenberg, Texas(TX), 77471. You can contact me over at mike@URL or via phone +13548783461.#### Expected output\nMy name is ^^^^ ^^^^^ I live in Texas in the Street ^^^^^^^^^, ^^^^^^^^^, ^^^^^. You can contact me over at ^^^^@^^^^^.com or via phone +^^^^^^^^^^^." ]
null
transformers
# Uploaded model - **Developed by:** traderpedroso - **License:** apache-2.0 - **Finetuned from model :** artificialguybr/llama3-8b-alpacadata-ptbr This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl"], "base_model": "artificialguybr/llama3-8b-alpacadata-ptbr"}
traderpedroso/llama3-8b-lora
null
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:artificialguybr/llama3-8b-alpacadata-ptbr", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:24:51+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-artificialguybr/llama3-8b-alpacadata-ptbr #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: traderpedroso - License: apache-2.0 - Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: traderpedroso\n- License: apache-2.0\n- Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-artificialguybr/llama3-8b-alpacadata-ptbr #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: traderpedroso\n- License: apache-2.0\n- Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ 66, 83 ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-artificialguybr/llama3-8b-alpacadata-ptbr #license-apache-2.0 #endpoints_compatible #region-us \n# Uploaded model\n\n- Developed by: traderpedroso\n- License: apache-2.0\n- Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
multiple-choice
transformers
<!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # Amrit05/swagdata_bert_finetuned This model is a fine-tuned version of [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.3487 - Validation Loss: 0.5780 - Train Accuracy: 0.7975 - Epoch: 1 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'module': 'keras.optimizers.schedules', 'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 5e-05, 'decay_steps': 9192, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered_name': None}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.8216 | 0.5667 | 0.7833 | 0 | | 0.3487 | 0.5780 | 0.7975 | 1 | ### Framework versions - Transformers 4.39.3 - TensorFlow 2.15.0 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["generated_from_keras_callback"], "base_model": "google-bert/bert-base-uncased", "model-index": [{"name": "Amrit05/swagdata_bert_finetuned", "results": []}]}
Amrit05/swagdata_bert_finetuned
null
[ "transformers", "tf", "bert", "multiple-choice", "generated_from_keras_callback", "base_model:google-bert/bert-base-uncased", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:25:08+00:00
[]
[]
TAGS #transformers #tf #bert #multiple-choice #generated_from_keras_callback #base_model-google-bert/bert-base-uncased #license-apache-2.0 #endpoints_compatible #region-us
Amrit05/swagdata\_bert\_finetuned ================================= This model is a fine-tuned version of google-bert/bert-base-uncased on an unknown dataset. It achieves the following results on the evaluation set: * Train Loss: 0.3487 * Validation Loss: 0.5780 * Train Accuracy: 0.7975 * Epoch: 1 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * optimizer: {'name': 'Adam', 'weight\_decay': None, 'clipnorm': None, 'global\_clipnorm': None, 'clipvalue': None, 'use\_ema': False, 'ema\_momentum': 0.99, 'ema\_overwrite\_frequency': None, 'jit\_compile': True, 'is\_legacy\_optimizer': False, 'learning\_rate': {'module': 'keras.optimizers.schedules', 'class\_name': 'PolynomialDecay', 'config': {'initial\_learning\_rate': 5e-05, 'decay\_steps': 9192, 'end\_learning\_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered\_name': None}, 'beta\_1': 0.9, 'beta\_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} * training\_precision: float32 ### Training results ### Framework versions * Transformers 4.39.3 * TensorFlow 2.15.0 * Datasets 2.18.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* optimizer: {'name': 'Adam', 'weight\\_decay': None, 'clipnorm': None, 'global\\_clipnorm': None, 'clipvalue': None, 'use\\_ema': False, 'ema\\_momentum': 0.99, 'ema\\_overwrite\\_frequency': None, 'jit\\_compile': True, 'is\\_legacy\\_optimizer': False, 'learning\\_rate': {'module': 'keras.optimizers.schedules', 'class\\_name': 'PolynomialDecay', 'config': {'initial\\_learning\\_rate': 5e-05, 'decay\\_steps': 9192, 'end\\_learning\\_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered\\_name': None}, 'beta\\_1': 0.9, 'beta\\_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False}\n* training\\_precision: float32", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.3\n* TensorFlow 2.15.0\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tf #bert #multiple-choice #generated_from_keras_callback #base_model-google-bert/bert-base-uncased #license-apache-2.0 #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* optimizer: {'name': 'Adam', 'weight\\_decay': None, 'clipnorm': None, 'global\\_clipnorm': None, 'clipvalue': None, 'use\\_ema': False, 'ema\\_momentum': 0.99, 'ema\\_overwrite\\_frequency': None, 'jit\\_compile': True, 'is\\_legacy\\_optimizer': False, 'learning\\_rate': {'module': 'keras.optimizers.schedules', 'class\\_name': 'PolynomialDecay', 'config': {'initial\\_learning\\_rate': 5e-05, 'decay\\_steps': 9192, 'end\\_learning\\_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered\\_name': None}, 'beta\\_1': 0.9, 'beta\\_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False}\n* training\\_precision: float32", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.3\n* TensorFlow 2.15.0\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
[ 54, 292, 5, 38 ]
[ "TAGS\n#transformers #tf #bert #multiple-choice #generated_from_keras_callback #base_model-google-bert/bert-base-uncased #license-apache-2.0 #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* optimizer: {'name': 'Adam', 'weight\\_decay': None, 'clipnorm': None, 'global\\_clipnorm': None, 'clipvalue': None, 'use\\_ema': False, 'ema\\_momentum': 0.99, 'ema\\_overwrite\\_frequency': None, 'jit\\_compile': True, 'is\\_legacy\\_optimizer': False, 'learning\\_rate': {'module': 'keras.optimizers.schedules', 'class\\_name': 'PolynomialDecay', 'config': {'initial\\_learning\\_rate': 5e-05, 'decay\\_steps': 9192, 'end\\_learning\\_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered\\_name': None}, 'beta\\_1': 0.9, 'beta\\_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False}\n* training\\_precision: float32### Training results### Framework versions\n\n\n* Transformers 4.39.3\n* TensorFlow 2.15.0\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
text-generation
transformers
# Uploaded model - **Developed by:** traderpedroso - **License:** apache-2.0 - **Finetuned from model :** artificialguybr/llama3-8b-alpacadata-ptbr This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl", "sft"], "base_model": "artificialguybr/llama3-8b-alpacadata-ptbr"}
traderpedroso/llama3
null
[ "transformers", "safetensors", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "sft", "conversational", "en", "base_model:artificialguybr/llama3-8b-alpacadata-ptbr", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:25:09+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #llama #text-generation #text-generation-inference #unsloth #trl #sft #conversational #en #base_model-artificialguybr/llama3-8b-alpacadata-ptbr #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
# Uploaded model - Developed by: traderpedroso - License: apache-2.0 - Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: traderpedroso\n- License: apache-2.0\n- Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #text-generation-inference #unsloth #trl #sft #conversational #en #base_model-artificialguybr/llama3-8b-alpacadata-ptbr #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: traderpedroso\n- License: apache-2.0\n- Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ 81, 83 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #text-generation-inference #unsloth #trl #sft #conversational #en #base_model-artificialguybr/llama3-8b-alpacadata-ptbr #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n# Uploaded model\n\n- Developed by: traderpedroso\n- License: apache-2.0\n- Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/emillykkejensen/LLM-instruct/runs/do7hs1j9) # Phi-3-mini-4k-instruct-dansk This model is a fine-tuned version of [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) on the [kobprof/skolegpt-instruct](https://huggingface.co/datasets/kobprof/skolegpt-instruct) dataset. It achieves the following results on the evaluation set: - Loss: 1.5493 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - num_devices: 4 - total_train_batch_size: 16 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.2 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.41.0.dev0 - Pytorch 2.2.0 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "mit", "tags": ["trl", "sft", "generated_from_trainer", "danish"], "datasets": ["kobprof/skolegpt-instruct"], "base_model": "microsoft/Phi-3-mini-4k-instruct", "model-index": [{"name": "Phi-3-mini-4k-instruct-dansk", "results": []}]}
emillykkejensen/Phi-3-mini-4k-instruct-dansk
null
[ "transformers", "safetensors", "phi3", "text-generation", "trl", "sft", "generated_from_trainer", "danish", "conversational", "custom_code", "dataset:kobprof/skolegpt-instruct", "base_model:microsoft/Phi-3-mini-4k-instruct", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:25:14+00:00
[]
[]
TAGS #transformers #safetensors #phi3 #text-generation #trl #sft #generated_from_trainer #danish #conversational #custom_code #dataset-kobprof/skolegpt-instruct #base_model-microsoft/Phi-3-mini-4k-instruct #license-mit #autotrain_compatible #endpoints_compatible #region-us
<img src="URL alt="Visualize in Weights & Biases" width="200" height="32"/> # Phi-3-mini-4k-instruct-dansk This model is a fine-tuned version of microsoft/Phi-3-mini-4k-instruct on the kobprof/skolegpt-instruct dataset. It achieves the following results on the evaluation set: - Loss: 1.5493 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - num_devices: 4 - total_train_batch_size: 16 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.2 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.41.0.dev0 - Pytorch 2.2.0 - Datasets 2.19.0 - Tokenizers 0.19.1
[ "# Phi-3-mini-4k-instruct-dansk\n\nThis model is a fine-tuned version of microsoft/Phi-3-mini-4k-instruct on the kobprof/skolegpt-instruct dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 1.5493", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 4\n- eval_batch_size: 4\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 4\n- total_train_batch_size: 16\n- total_eval_batch_size: 16\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.2\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.41.0.dev0\n- Pytorch 2.2.0\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #safetensors #phi3 #text-generation #trl #sft #generated_from_trainer #danish #conversational #custom_code #dataset-kobprof/skolegpt-instruct #base_model-microsoft/Phi-3-mini-4k-instruct #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "# Phi-3-mini-4k-instruct-dansk\n\nThis model is a fine-tuned version of microsoft/Phi-3-mini-4k-instruct on the kobprof/skolegpt-instruct dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 1.5493", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 4\n- eval_batch_size: 4\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 4\n- total_train_batch_size: 16\n- total_eval_batch_size: 16\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.2\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.41.0.dev0\n- Pytorch 2.2.0\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
[ 87, 73, 7, 9, 9, 4, 147, 5, 43 ]
[ "TAGS\n#transformers #safetensors #phi3 #text-generation #trl #sft #generated_from_trainer #danish #conversational #custom_code #dataset-kobprof/skolegpt-instruct #base_model-microsoft/Phi-3-mini-4k-instruct #license-mit #autotrain_compatible #endpoints_compatible #region-us \n# Phi-3-mini-4k-instruct-dansk\n\nThis model is a fine-tuned version of microsoft/Phi-3-mini-4k-instruct on the kobprof/skolegpt-instruct dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 1.5493## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 4\n- eval_batch_size: 4\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 4\n- total_train_batch_size: 16\n- total_eval_batch_size: 16\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.2\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.41.0.dev0\n- Pytorch 2.2.0\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
feature-extraction
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
claudios/unixcoder-base
null
[ "transformers", "safetensors", "roberta", "feature-extraction", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:28:11+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #roberta #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #roberta #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 32, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #roberta #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
automatic-speech-recognition
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper This model is a fine-tuned version of [openai/whisper-tiny.en](https://huggingface.co/openai/whisper-tiny.en) on the tiny dataset. It achieves the following results on the evaluation set: - Loss: 3.9871 - Wer: 83.6108 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 64 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:------:|:----:|:---------------:|:-------:| | 3.965 | 0.1408 | 10 | 3.9871 | 83.6108 | ### Framework versions - Transformers 4.40.1 - Pytorch 2.2.1+cu121 - Datasets 2.19.1.dev0 - Tokenizers 0.19.1
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["wer"], "base_model": "openai/whisper-tiny.en", "model-index": [{"name": "whisper", "results": []}]}
khaingsmon/whisper
null
[ "transformers", "tensorboard", "safetensors", "whisper", "automatic-speech-recognition", "generated_from_trainer", "base_model:openai/whisper-tiny.en", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:28:18+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #whisper #automatic-speech-recognition #generated_from_trainer #base_model-openai/whisper-tiny.en #license-apache-2.0 #endpoints_compatible #region-us
whisper ======= This model is a fine-tuned version of openai/URL on the tiny dataset. It achieves the following results on the evaluation set: * Loss: 3.9871 * Wer: 83.6108 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 1e-05 * train\_batch\_size: 64 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * lr\_scheduler\_warmup\_steps: 500 * training\_steps: 10 ### Training results ### Framework versions * Transformers 4.40.1 * Pytorch 2.2.1+cu121 * Datasets 2.19.1.dev0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 64\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 500\n* training\\_steps: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.1.dev0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #tensorboard #safetensors #whisper #automatic-speech-recognition #generated_from_trainer #base_model-openai/whisper-tiny.en #license-apache-2.0 #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 64\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 500\n* training\\_steps: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.1.dev0\n* Tokenizers 0.19.1" ]
[ 54, 115, 5, 47 ]
[ "TAGS\n#transformers #tensorboard #safetensors #whisper #automatic-speech-recognition #generated_from_trainer #base_model-openai/whisper-tiny.en #license-apache-2.0 #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 64\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 500\n* training\\_steps: 10### Training results### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.1.dev0\n* Tokenizers 0.19.1" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2903 - Accuracy: 0.88 - F1: 0.8808 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.30.0 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.13.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy", "f1"], "model-index": [{"name": "finetuning-sentiment-model-3000-samples", "results": []}]}
abrario/finetuning-sentiment-model-3000-samples
null
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:29:50+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #distilbert #text-classification #generated_from_trainer #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
# finetuning-sentiment-model-3000-samples This model is a fine-tuned version of distilbert-base-uncased on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2903 - Accuracy: 0.88 - F1: 0.8808 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.30.0 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.13.3
[ "# finetuning-sentiment-model-3000-samples\n\nThis model is a fine-tuned version of distilbert-base-uncased on an unknown dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.2903\n- Accuracy: 0.88\n- F1: 0.8808", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 16\n- eval_batch_size: 16\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 2", "### Training results", "### Framework versions\n\n- Transformers 4.30.0\n- Pytorch 2.2.1+cu121\n- Datasets 2.19.0\n- Tokenizers 0.13.3" ]
[ "TAGS\n#transformers #pytorch #tensorboard #distilbert #text-classification #generated_from_trainer #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "# finetuning-sentiment-model-3000-samples\n\nThis model is a fine-tuned version of distilbert-base-uncased on an unknown dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.2903\n- Accuracy: 0.88\n- F1: 0.8808", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 16\n- eval_batch_size: 16\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 2", "### Training results", "### Framework versions\n\n- Transformers 4.30.0\n- Pytorch 2.2.1+cu121\n- Datasets 2.19.0\n- Tokenizers 0.13.3" ]
[ 47, 66, 7, 9, 9, 4, 93, 5, 44 ]
[ "TAGS\n#transformers #pytorch #tensorboard #distilbert #text-classification #generated_from_trainer #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n# finetuning-sentiment-model-3000-samples\n\nThis model is a fine-tuned version of distilbert-base-uncased on an unknown dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.2903\n- Accuracy: 0.88\n- F1: 0.8808## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 16\n- eval_batch_size: 16\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 2### Training results### Framework versions\n\n- Transformers 4.30.0\n- Pytorch 2.2.1+cu121\n- Datasets 2.19.0\n- Tokenizers 0.13.3" ]
null
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # segformer-b0-scene-parse-150 This model is a fine-tuned version of [nvidia/mit-b0](https://huggingface.co/nvidia/mit-b0) on the scene_parse_150 dataset. It achieves the following results on the evaluation set: - Loss: 1.8839 - Mean Iou: 0.0900 - Mean Accuracy: 0.1366 - Overall Accuracy: 0.3644 - Accuracy Wall: nan - Accuracy Building: nan - Accuracy Sky: nan - Accuracy Floor: 0.8407 - Accuracy Tree: nan - Accuracy Ceiling: 0.5637 - Accuracy Road: nan - Accuracy Bed : nan - Accuracy Windowpane: 0.4489 - Accuracy Grass: nan - Accuracy Cabinet: 0.0771 - Accuracy Sidewalk: nan - Accuracy Person: nan - Accuracy Earth: nan - Accuracy Door: 0.3647 - Accuracy Table: 0.0 - Accuracy Mountain: nan - Accuracy Plant: 0.0 - Accuracy Curtain: 0.0604 - Accuracy Chair: 0.0 - Accuracy Car: nan - Accuracy Water: nan - Accuracy Painting: 0.0 - Accuracy Sofa: nan - Accuracy Shelf: nan - Accuracy House: nan - Accuracy Sea: nan - Accuracy Mirror: 0.2133 - Accuracy Rug: 0.0 - Accuracy Field: nan - Accuracy Armchair: nan - Accuracy Seat: nan - Accuracy Fence: nan - Accuracy Desk: nan - Accuracy Rock: nan - Accuracy Wardrobe: nan - Accuracy Lamp: 0.0 - Accuracy Bathtub: 0.3360 - Accuracy Railing: nan - Accuracy Cushion: nan - Accuracy Base: nan - Accuracy Box: 0.0 - Accuracy Column: nan - Accuracy Signboard: nan - Accuracy Chest of drawers: nan - Accuracy Counter: nan - Accuracy Sand: nan - Accuracy Sink: 0.2925 - Accuracy Skyscraper: nan - Accuracy Fireplace: nan - Accuracy Refrigerator: nan - Accuracy Grandstand: nan - Accuracy Path: nan - Accuracy Stairs: nan - Accuracy Runway: nan - Accuracy Case: nan - Accuracy Pool table: nan - Accuracy Pillow: nan - Accuracy Screen door: nan - Accuracy Stairway: nan - Accuracy River: nan - Accuracy Bridge: nan - Accuracy Bookcase: nan - Accuracy Blind: nan - Accuracy Coffee table: nan - Accuracy Toilet: 0.3142 - Accuracy Flower: 0.0 - Accuracy Book: nan - Accuracy Hill: nan - Accuracy Bench: nan - Accuracy Countertop: 0.0 - Accuracy Stove: nan - Accuracy Palm: nan - Accuracy Kitchen island: nan - Accuracy Computer: nan - Accuracy Swivel chair: nan - Accuracy Boat: nan - Accuracy Bar: nan - Accuracy Arcade machine: nan - Accuracy Hovel: nan - Accuracy Bus: nan - Accuracy Towel: 0.1774 - Accuracy Light: 0.0 - Accuracy Truck: nan - Accuracy Tower: nan - Accuracy Chandelier: nan - Accuracy Awning: nan - Accuracy Streetlight: nan - Accuracy Booth: nan - Accuracy Television receiver: nan - Accuracy Airplane: nan - Accuracy Dirt track: nan - Accuracy Apparel: nan - Accuracy Pole: nan - Accuracy Land: nan - Accuracy Bannister: nan - Accuracy Escalator: nan - Accuracy Ottoman: nan - Accuracy Bottle: nan - Accuracy Buffet: nan - Accuracy Poster: nan - Accuracy Stage: nan - Accuracy Van: nan - Accuracy Ship: nan - Accuracy Fountain: nan - Accuracy Conveyer belt: nan - Accuracy Canopy: nan - Accuracy Washer: nan - Accuracy Plaything: nan - Accuracy Swimming pool: nan - Accuracy Stool: nan - Accuracy Barrel: nan - Accuracy Basket: 0.0 - Accuracy Waterfall: nan - Accuracy Tent: nan - Accuracy Bag: 0.0 - Accuracy Minibike: nan - Accuracy Cradle: nan - Accuracy Oven: nan - Accuracy Ball: nan - Accuracy Food: nan - Accuracy Step: nan - Accuracy Tank: nan - Accuracy Trade name: nan - Accuracy Microwave: nan - Accuracy Pot: nan - Accuracy Animal: nan - Accuracy Bicycle: nan - Accuracy Lake: nan - Accuracy Dishwasher: nan - Accuracy Screen: nan - Accuracy Blanket: nan - Accuracy Sculpture: nan - Accuracy Hood: nan - Accuracy Sconce: 0.0 - Accuracy Vase: 0.0 - Accuracy Traffic light: nan - Accuracy Tray: nan - Accuracy Ashcan: nan - Accuracy Fan: nan - Accuracy Pier: nan - Accuracy Crt screen: nan - Accuracy Plate: nan - Accuracy Monitor: nan - Accuracy Bulletin board: nan - Accuracy Shower: 0.0 - Accuracy Radiator: nan - Accuracy Glass: 0.0 - Accuracy Clock: nan - Accuracy Flag: nan - Iou Wall: 0.0 - Iou Building: nan - Iou Sky: nan - Iou Floor: 0.7245 - Iou Tree: nan - Iou Ceiling: 0.5304 - Iou Road: nan - Iou Bed : nan - Iou Windowpane: 0.3857 - Iou Grass: nan - Iou Cabinet: 0.0317 - Iou Sidewalk: nan - Iou Person: 0.0 - Iou Earth: nan - Iou Door: 0.2767 - Iou Table: 0.0 - Iou Mountain: nan - Iou Plant: 0.0 - Iou Curtain: 0.0558 - Iou Chair: 0.0 - Iou Car: nan - Iou Water: nan - Iou Painting: 0.0 - Iou Sofa: nan - Iou Shelf: nan - Iou House: nan - Iou Sea: nan - Iou Mirror: 0.0824 - Iou Rug: 0.0 - Iou Field: nan - Iou Armchair: nan - Iou Seat: nan - Iou Fence: nan - Iou Desk: nan - Iou Rock: nan - Iou Wardrobe: nan - Iou Lamp: 0.0 - Iou Bathtub: 0.2056 - Iou Railing: nan - Iou Cushion: nan - Iou Base: nan - Iou Box: 0.0 - Iou Column: nan - Iou Signboard: nan - Iou Chest of drawers: nan - Iou Counter: nan - Iou Sand: nan - Iou Sink: 0.1332 - Iou Skyscraper: nan - Iou Fireplace: nan - Iou Refrigerator: nan - Iou Grandstand: nan - Iou Path: nan - Iou Stairs: nan - Iou Runway: nan - Iou Case: nan - Iou Pool table: nan - Iou Pillow: nan - Iou Screen door: 0.0 - Iou Stairway: nan - Iou River: nan - Iou Bridge: nan - Iou Bookcase: nan - Iou Blind: nan - Iou Coffee table: nan - Iou Toilet: 0.1932 - Iou Flower: 0.0 - Iou Book: nan - Iou Hill: nan - Iou Bench: nan - Iou Countertop: 0.0 - Iou Stove: nan - Iou Palm: nan - Iou Kitchen island: nan - Iou Computer: nan - Iou Swivel chair: nan - Iou Boat: nan - Iou Bar: nan - Iou Arcade machine: nan - Iou Hovel: nan - Iou Bus: nan - Iou Towel: 0.0818 - Iou Light: 0.0 - Iou Truck: nan - Iou Tower: nan - Iou Chandelier: nan - Iou Awning: nan - Iou Streetlight: nan - Iou Booth: nan - Iou Television receiver: nan - Iou Airplane: nan - Iou Dirt track: nan - Iou Apparel: nan - Iou Pole: nan - Iou Land: nan - Iou Bannister: nan - Iou Escalator: nan - Iou Ottoman: nan - Iou Bottle: nan - Iou Buffet: nan - Iou Poster: nan - Iou Stage: nan - Iou Van: nan - Iou Ship: nan - Iou Fountain: nan - Iou Conveyer belt: nan - Iou Canopy: nan - Iou Washer: nan - Iou Plaything: nan - Iou Swimming pool: nan - Iou Stool: nan - Iou Barrel: nan - Iou Basket: 0.0 - Iou Waterfall: nan - Iou Tent: nan - Iou Bag: 0.0 - Iou Minibike: nan - Iou Cradle: nan - Iou Oven: nan - Iou Ball: nan - Iou Food: nan - Iou Step: nan - Iou Tank: nan - Iou Trade name: nan - Iou Microwave: nan - Iou Pot: nan - Iou Animal: nan - Iou Bicycle: nan - Iou Lake: nan - Iou Dishwasher: nan - Iou Screen: nan - Iou Blanket: nan - Iou Sculpture: nan - Iou Hood: nan - Iou Sconce: 0.0 - Iou Vase: 0.0 - Iou Traffic light: nan - Iou Tray: nan - Iou Ashcan: nan - Iou Fan: nan - Iou Pier: nan - Iou Crt screen: nan - Iou Plate: nan - Iou Monitor: nan - Iou Bulletin board: nan - Iou Shower: 0.0 - Iou Radiator: nan - Iou Glass: 0.0 - Iou Clock: nan - Iou Flag: nan ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 6e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Mean Iou | Mean Accuracy | Overall Accuracy | Accuracy Wall | Accuracy Building | Accuracy Sky | Accuracy Floor | Accuracy Tree | Accuracy Ceiling | Accuracy Road | Accuracy Bed | Accuracy Windowpane | Accuracy Grass | Accuracy Cabinet | Accuracy Sidewalk | Accuracy Person | Accuracy Earth | Accuracy Door | Accuracy Table | Accuracy Mountain | Accuracy Plant | Accuracy Curtain | Accuracy Chair | Accuracy Car | Accuracy Water | Accuracy Painting | Accuracy Sofa | Accuracy Shelf | Accuracy House | Accuracy Sea | Accuracy Mirror | Accuracy Rug | Accuracy Field | Accuracy Armchair | Accuracy Seat | Accuracy Fence | Accuracy Desk | Accuracy Rock | Accuracy Wardrobe | Accuracy Lamp | Accuracy Bathtub | Accuracy Railing | Accuracy Cushion | Accuracy Base | Accuracy Box | Accuracy Column | Accuracy Signboard | Accuracy Chest of drawers | Accuracy Counter | Accuracy Sand | Accuracy Sink | Accuracy Skyscraper | Accuracy Fireplace | Accuracy Refrigerator | Accuracy Grandstand | Accuracy Path | Accuracy Stairs | Accuracy Runway | Accuracy Case | Accuracy Pool table | Accuracy Pillow | Accuracy Screen door | Accuracy Stairway | Accuracy River | Accuracy Bridge | Accuracy Bookcase | Accuracy Blind | Accuracy Coffee table | Accuracy Toilet | Accuracy Flower | Accuracy Book | Accuracy Hill | Accuracy Bench | Accuracy Countertop | Accuracy Stove | Accuracy Palm | Accuracy Kitchen island | Accuracy Computer | Accuracy Swivel chair | Accuracy Boat | Accuracy Bar | Accuracy Arcade machine | Accuracy Hovel | Accuracy Bus | Accuracy Towel | Accuracy Light | Accuracy Truck | Accuracy Tower | Accuracy Chandelier | Accuracy Awning | Accuracy Streetlight | Accuracy Booth | Accuracy Television receiver | Accuracy Airplane | Accuracy Dirt track | Accuracy Apparel | Accuracy Pole | Accuracy Land | Accuracy Bannister | Accuracy Escalator | Accuracy Ottoman | Accuracy Bottle | Accuracy Buffet | Accuracy Poster | Accuracy Stage | Accuracy Van | Accuracy Ship | Accuracy Fountain | Accuracy Conveyer belt | Accuracy Canopy | Accuracy Washer | Accuracy Plaything | Accuracy Swimming pool | Accuracy Stool | Accuracy Barrel | Accuracy Basket | Accuracy Waterfall | Accuracy Tent | Accuracy Bag | Accuracy Minibike | Accuracy Cradle | Accuracy Oven | Accuracy Ball | Accuracy Food | Accuracy Step | Accuracy Tank | Accuracy Trade name | Accuracy Microwave | Accuracy Pot | Accuracy Animal | Accuracy Bicycle | Accuracy Lake | Accuracy Dishwasher | Accuracy Screen | Accuracy Blanket | Accuracy Sculpture | Accuracy Hood | Accuracy Sconce | Accuracy Vase | Accuracy Traffic light | Accuracy Tray | Accuracy Ashcan | Accuracy Fan | Accuracy Pier | Accuracy Crt screen | Accuracy Plate | Accuracy Monitor | Accuracy Bulletin board | Accuracy Shower | Accuracy Radiator | Accuracy Glass | Accuracy Clock | Accuracy Flag | Iou Wall | Iou Building | Iou Sky | Iou Floor | Iou Tree | Iou Ceiling | Iou Road | Iou Bed | Iou Windowpane | Iou Grass | Iou Cabinet | Iou Sidewalk | Iou Person | Iou Earth | Iou Door | Iou Table | Iou Mountain | Iou Plant | Iou Curtain | Iou Chair | Iou Car | Iou Water | Iou Painting | Iou Sofa | Iou Shelf | Iou House | Iou Sea | Iou Mirror | Iou Rug | Iou Field | Iou Armchair | Iou Seat | Iou Fence | Iou Desk | Iou Rock | Iou Wardrobe | Iou Lamp | Iou Bathtub | Iou Railing | Iou Cushion | Iou Base | Iou Box | Iou Column | Iou Signboard | Iou Chest of drawers | Iou Counter | Iou Sand | Iou Sink | Iou Skyscraper | Iou Fireplace | Iou Refrigerator | Iou Grandstand | Iou Path | Iou Stairs | Iou Runway | Iou Case | Iou Pool table | Iou Pillow | Iou Screen door | Iou Stairway | Iou River | Iou Bridge | Iou Bookcase | Iou Blind | Iou Coffee table | Iou Toilet | Iou Flower | Iou Book | Iou Hill | Iou Bench | Iou Countertop | Iou Stove | Iou Palm | Iou Kitchen island | Iou Computer | Iou Swivel chair | Iou Boat | Iou Bar | Iou Arcade machine | Iou Hovel | Iou Bus | Iou Towel | Iou Light | Iou Truck | Iou Tower | Iou Chandelier | Iou Awning | Iou Streetlight | Iou Booth | Iou Television receiver | Iou Airplane | Iou Dirt track | Iou Apparel | Iou Pole | Iou Land | Iou Bannister | Iou Escalator | Iou Ottoman | Iou Bottle | Iou Buffet | Iou Poster | Iou Stage | Iou Van | Iou Ship | Iou Fountain | Iou Conveyer belt | Iou Canopy | Iou Washer | Iou Plaything | Iou Swimming pool | Iou Stool | Iou Barrel | Iou Basket | Iou Waterfall | Iou Tent | Iou Bag | Iou Minibike | Iou Cradle | Iou Oven | Iou Ball | Iou Food | Iou Step | Iou Tank | Iou Trade name | Iou Microwave | Iou Pot | Iou Animal | Iou Bicycle | Iou Lake | Iou Dishwasher | Iou Screen | Iou Blanket | Iou Sculpture | Iou Hood | Iou Sconce | Iou Vase | Iou Traffic light | Iou Tray | Iou Ashcan | Iou Fan | Iou Pier | Iou Crt screen | Iou Plate | Iou Monitor | Iou Bulletin board | Iou Shower | Iou Radiator | Iou Glass | Iou Clock | Iou Flag | |:-------------:|:-----:|:----:|:---------------:|:--------:|:-------------:|:----------------:|:-------------:|:-----------------:|:------------:|:--------------:|:-------------:|:----------------:|:-------------:|:-------------:|:-------------------:|:--------------:|:----------------:|:-----------------:|:---------------:|:--------------:|:-------------:|:--------------:|:-----------------:|:--------------:|:----------------:|:--------------:|:------------:|:--------------:|:-----------------:|:-------------:|:--------------:|:--------------:|:------------:|:---------------:|:------------:|:--------------:|:-----------------:|:-------------:|:--------------:|:-------------:|:-------------:|:-----------------:|:-------------:|:----------------:|:----------------:|:----------------:|:-------------:|:------------:|:---------------:|:------------------:|:-------------------------:|:----------------:|:-------------:|:-------------:|:-------------------:|:------------------:|:---------------------:|:-------------------:|:-------------:|:---------------:|:---------------:|:-------------:|:-------------------:|:---------------:|:--------------------:|:-----------------:|:--------------:|:---------------:|:-----------------:|:--------------:|:---------------------:|:---------------:|:---------------:|:-------------:|:-------------:|:--------------:|:-------------------:|:--------------:|:-------------:|:-----------------------:|:-----------------:|:---------------------:|:-------------:|:------------:|:-----------------------:|:--------------:|:------------:|:--------------:|:--------------:|:--------------:|:--------------:|:-------------------:|:---------------:|:--------------------:|:--------------:|:----------------------------:|:-----------------:|:-------------------:|:----------------:|:-------------:|:-------------:|:------------------:|:------------------:|:----------------:|:---------------:|:---------------:|:---------------:|:--------------:|:------------:|:-------------:|:-----------------:|:----------------------:|:---------------:|:---------------:|:------------------:|:----------------------:|:--------------:|:---------------:|:---------------:|:------------------:|:-------------:|:------------:|:-----------------:|:---------------:|:-------------:|:-------------:|:-------------:|:-------------:|:-------------:|:-------------------:|:------------------:|:------------:|:---------------:|:----------------:|:-------------:|:-------------------:|:---------------:|:----------------:|:------------------:|:-------------:|:---------------:|:-------------:|:----------------------:|:-------------:|:---------------:|:------------:|:-------------:|:-------------------:|:--------------:|:----------------:|:-----------------------:|:---------------:|:-----------------:|:--------------:|:--------------:|:-------------:|:--------:|:------------:|:-------:|:---------:|:--------:|:-----------:|:--------:|:--------:|:--------------:|:---------:|:-----------:|:------------:|:----------:|:---------:|:--------:|:---------:|:------------:|:---------:|:-----------:|:---------:|:-------:|:---------:|:------------:|:--------:|:---------:|:---------:|:-------:|:----------:|:-------:|:---------:|:------------:|:--------:|:---------:|:--------:|:--------:|:------------:|:--------:|:-----------:|:-----------:|:-----------:|:--------:|:-------:|:----------:|:-------------:|:--------------------:|:-----------:|:--------:|:--------:|:--------------:|:-------------:|:----------------:|:--------------:|:--------:|:----------:|:----------:|:--------:|:--------------:|:----------:|:---------------:|:------------:|:---------:|:----------:|:------------:|:---------:|:----------------:|:----------:|:----------:|:--------:|:--------:|:---------:|:--------------:|:---------:|:--------:|:------------------:|:------------:|:----------------:|:--------:|:-------:|:------------------:|:---------:|:-------:|:---------:|:---------:|:---------:|:---------:|:--------------:|:----------:|:---------------:|:---------:|:-----------------------:|:------------:|:--------------:|:-----------:|:--------:|:--------:|:-------------:|:-------------:|:-----------:|:----------:|:----------:|:----------:|:---------:|:-------:|:--------:|:------------:|:-----------------:|:----------:|:----------:|:-------------:|:-----------------:|:---------:|:----------:|:----------:|:-------------:|:--------:|:-------:|:------------:|:----------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------------:|:-------------:|:-------:|:----------:|:-----------:|:--------:|:--------------:|:----------:|:-----------:|:-------------:|:--------:|:----------:|:--------:|:-----------------:|:--------:|:----------:|:-------:|:--------:|:--------------:|:---------:|:-----------:|:------------------:|:----------:|:------------:|:---------:|:---------:|:--------:| | 3.7307 | 1.0 | 20 | 3.6709 | 0.0762 | 0.1185 | 0.3892 | nan | nan | nan | 0.9337 | nan | 0.8394 | nan | nan | 0.2598 | nan | 0.1684 | nan | nan | nan | 0.5156 | 0.0 | nan | 0.0 | 0.0490 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.1231 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0879 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1270 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0450 | 0.0 | nan | nan | nan | 0.0166 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0349 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.6672 | nan | 0.7637 | nan | nan | 0.2318 | nan | 0.0408 | nan | nan | 0.0 | 0.3370 | 0.0 | nan | 0.0 | 0.0436 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0587 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0737 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.0573 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.0440 | 0.0 | nan | 0.0 | nan | 0.0162 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0293 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 3.4408 | 2.0 | 40 | 3.5943 | 0.0679 | 0.1166 | 0.3602 | nan | nan | nan | 0.8855 | nan | 0.9011 | nan | nan | 0.1773 | nan | 0.2616 | nan | nan | nan | 0.4185 | 0.0 | nan | 0.0 | 0.0319 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.1156 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0534 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2110 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0829 | 0.0 | nan | nan | nan | 0.0071 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0018 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | 0.0 | 0.6857 | nan | 0.7235 | nan | nan | 0.1594 | nan | 0.0316 | nan | nan | 0.0 | 0.2811 | 0.0 | nan | 0.0 | 0.0308 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0441 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0447 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.0852 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.0776 | 0.0 | nan | nan | nan | 0.0071 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0016 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 3.0273 | 3.0 | 60 | 3.2070 | 0.0781 | 0.1243 | 0.3756 | nan | nan | nan | 0.8704 | nan | 0.7804 | nan | nan | 0.2864 | nan | 0.2281 | nan | nan | nan | 0.4901 | 0.0 | nan | 0.0 | 0.0356 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.1337 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1160 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3318 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0682 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0157 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | 0.0 | 0.6933 | nan | 0.6975 | nan | nan | 0.2411 | nan | 0.0391 | nan | nan | nan | 0.3157 | 0.0 | nan | 0.0 | 0.0341 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0513 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0887 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1031 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.0651 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0148 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 3.0684 | 4.0 | 80 | 2.9131 | 0.0714 | 0.1060 | 0.3374 | nan | nan | nan | 0.8698 | nan | 0.6304 | nan | nan | 0.2153 | nan | 0.2075 | nan | nan | nan | 0.4244 | 0.0 | nan | 0.0 | 0.0103 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.1776 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0846 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1827 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0402 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0193 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.6823 | nan | 0.5799 | nan | nan | 0.1999 | nan | 0.0382 | nan | nan | nan | 0.3008 | 0.0 | nan | 0.0 | 0.0102 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0498 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0713 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.0785 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.0398 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0189 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 2.6015 | 5.0 | 100 | 2.9167 | 0.0798 | 0.1238 | 0.3824 | nan | nan | nan | 0.8717 | nan | 0.6554 | nan | nan | 0.3902 | nan | 0.2137 | nan | nan | nan | 0.5598 | 0.0 | nan | 0.0 | 0.0131 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.1308 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1105 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3069 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0683 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0223 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.6862 | nan | 0.5693 | nan | nan | 0.2938 | nan | 0.0339 | nan | nan | nan | 0.3749 | 0.0 | nan | 0.0 | 0.0129 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0585 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0866 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1158 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.0631 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0202 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 2.5206 | 6.0 | 120 | 2.4916 | 0.0678 | 0.0976 | 0.2930 | nan | nan | nan | 0.8590 | nan | 0.5255 | nan | nan | 0.2684 | nan | 0.1911 | nan | nan | nan | 0.2381 | 0.0 | nan | 0.0 | 0.0192 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0792 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1297 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2600 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0538 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0102 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.6894 | nan | 0.4895 | nan | nan | 0.2423 | nan | 0.0371 | nan | nan | nan | 0.1928 | 0.0 | nan | 0.0 | 0.0186 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0312 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1013 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1011 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.0523 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0101 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 2.4743 | 7.0 | 140 | 2.5774 | 0.0715 | 0.1082 | 0.3384 | nan | nan | nan | 0.8529 | nan | 0.3673 | nan | nan | 0.3321 | nan | 0.1759 | nan | nan | nan | 0.4891 | 0.0 | nan | 0.0 | 0.0115 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0925 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1779 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3724 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0408 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0079 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.6904 | nan | 0.3441 | nan | nan | 0.2802 | nan | 0.0586 | nan | nan | nan | 0.3329 | 0.0 | nan | 0.0 | 0.0113 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0473 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1345 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1269 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.0405 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0077 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 2.1776 | 8.0 | 160 | 2.7845 | 0.0814 | 0.1254 | 0.3777 | nan | nan | nan | 0.9036 | nan | 0.7922 | nan | nan | 0.3098 | nan | 0.2159 | nan | nan | nan | 0.4430 | 0.0 | nan | 0.0 | 0.0366 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.1856 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1604 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2706 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0573 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0099 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.6793 | nan | 0.7057 | nan | nan | 0.2582 | nan | 0.0386 | nan | nan | nan | 0.2999 | 0.0 | nan | 0.0 | 0.0337 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0568 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1159 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1090 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.0542 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0097 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 2.0472 | 9.0 | 180 | 2.6400 | 0.0833 | 0.1278 | 0.3932 | nan | nan | nan | 0.9040 | nan | 0.6452 | nan | nan | 0.3903 | nan | 0.0944 | nan | nan | nan | 0.5439 | 0.0 | nan | 0.0 | 0.0058 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2171 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2368 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3499 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0536 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0100 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7010 | nan | 0.5817 | nan | nan | 0.3207 | nan | 0.0256 | nan | nan | nan | 0.3572 | 0.0 | nan | 0.0 | 0.0057 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0736 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1681 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1222 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.0493 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0097 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 2.3246 | 10.0 | 200 | 2.5884 | 0.0768 | 0.1182 | 0.3592 | nan | nan | nan | 0.8991 | nan | 0.5614 | nan | nan | 0.2854 | nan | 0.1735 | nan | nan | nan | 0.4508 | 0.0 | nan | 0.0 | 0.0084 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.1819 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2253 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3243 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0599 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0212 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.6649 | nan | 0.5264 | nan | nan | 0.2491 | nan | 0.0407 | nan | nan | nan | 0.3112 | 0.0 | nan | 0.0 | 0.0083 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0634 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1574 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1304 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.0576 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0190 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 2.4483 | 11.0 | 220 | 2.4969 | 0.0797 | 0.1181 | 0.3600 | nan | nan | nan | 0.8680 | nan | 0.5599 | nan | nan | 0.3850 | nan | 0.1539 | nan | nan | nan | 0.4462 | 0.0 | nan | 0.0 | 0.0142 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.1963 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2713 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2448 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0406 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0085 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7220 | nan | 0.5192 | nan | nan | 0.3350 | nan | 0.0497 | nan | nan | nan | 0.2905 | 0.0 | nan | 0.0 | 0.0139 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0669 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1728 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.0949 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.0390 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0083 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 2.1924 | 12.0 | 240 | 2.3775 | 0.0803 | 0.1191 | 0.3624 | nan | nan | nan | 0.8623 | nan | 0.5137 | nan | nan | 0.3939 | nan | 0.1541 | nan | nan | nan | 0.4755 | 0.0 | nan | 0.0 | 0.0214 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.1826 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2605 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2788 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0686 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0042 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7170 | nan | 0.4816 | nan | nan | 0.3507 | nan | 0.0398 | nan | nan | nan | 0.3125 | 0.0 | nan | 0.0 | 0.0201 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0631 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1739 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1024 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.0627 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0040 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.7922 | 13.0 | 260 | 2.6384 | 0.0772 | 0.1202 | 0.3536 | nan | nan | nan | 0.8945 | nan | 0.6338 | nan | nan | 0.2847 | nan | 0.1794 | nan | nan | nan | 0.3634 | 0.0 | nan | 0.0 | 0.0030 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2979 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2994 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2060 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0593 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0246 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7121 | nan | 0.5905 | nan | nan | 0.2261 | nan | 0.0450 | nan | nan | nan | 0.2516 | 0.0 | nan | 0.0 | 0.0029 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0648 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1923 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.0759 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.0546 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0238 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.9423 | 14.0 | 280 | 2.2627 | 0.0690 | 0.1022 | 0.3079 | nan | nan | nan | 0.7994 | nan | 0.4117 | nan | nan | 0.2951 | nan | 0.1557 | nan | nan | nan | 0.3473 | 0.0 | nan | 0.0 | 0.0081 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2878 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2343 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1684 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0493 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0029 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7077 | nan | 0.3900 | nan | nan | 0.2541 | nan | 0.0544 | nan | nan | nan | 0.2497 | 0.0 | nan | 0.0 | 0.0080 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0659 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1522 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.0709 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.0455 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0029 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.6361 | 15.0 | 300 | 2.3295 | 0.0764 | 0.1206 | 0.3597 | nan | nan | nan | 0.9168 | nan | 0.3962 | nan | nan | 0.2983 | nan | 0.1203 | nan | nan | nan | 0.4207 | 0.0 | nan | 0.0 | 0.0624 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2511 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3480 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3068 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1194 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0155 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.6984 | nan | 0.3713 | nan | nan | 0.2573 | nan | 0.0376 | nan | nan | nan | 0.2910 | 0.0 | nan | 0.0 | 0.0600 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0686 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2036 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1160 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.0973 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0149 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 2.1016 | 16.0 | 320 | 2.1744 | 0.0823 | 0.1192 | 0.3604 | nan | nan | nan | 0.8993 | nan | 0.5917 | nan | nan | 0.3581 | nan | 0.1279 | nan | nan | nan | 0.4045 | 0.0 | nan | 0.0 | 0.0219 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.1784 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2816 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2195 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1256 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0104 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7188 | nan | 0.5521 | nan | nan | 0.3140 | nan | 0.0493 | nan | nan | nan | 0.2723 | 0.0 | nan | 0.0 | 0.0212 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0722 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1674 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1035 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1069 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0100 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.733 | 17.0 | 340 | 2.2739 | 0.0927 | 0.1402 | 0.4066 | nan | nan | nan | 0.8683 | nan | 0.7568 | nan | nan | 0.4403 | nan | 0.1621 | nan | nan | nan | 0.5139 | 0.0 | nan | 0.0 | 0.0309 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2857 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3135 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1905 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.2077 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0153 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7014 | nan | 0.6739 | nan | nan | 0.3534 | nan | 0.0468 | nan | nan | nan | 0.3401 | 0.0 | nan | 0.0 | 0.0297 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0883 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2005 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.0859 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1550 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0147 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.1858 | 18.0 | 360 | 2.1393 | 0.0740 | 0.1145 | 0.3248 | nan | nan | nan | 0.8637 | nan | 0.3618 | nan | nan | 0.2877 | nan | 0.1278 | nan | nan | nan | 0.3133 | 0.0 | nan | 0.0 | 0.0285 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.3640 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3416 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2189 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1729 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0101 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7213 | nan | 0.3430 | nan | nan | 0.2506 | nan | 0.0444 | nan | nan | nan | 0.2418 | 0.0 | nan | 0.0 | 0.0283 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0971 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1943 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.0896 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1252 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0100 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.1789 | 19.0 | 380 | 2.0453 | 0.0784 | 0.1183 | 0.3378 | nan | nan | nan | 0.8792 | nan | 0.3694 | nan | nan | 0.3834 | nan | 0.1103 | nan | nan | nan | 0.3530 | 0.0 | nan | 0.0 | 0.0076 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2411 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3444 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2493 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.2350 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0209 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7164 | nan | 0.3504 | nan | nan | 0.3335 | nan | 0.0385 | nan | nan | nan | 0.2577 | 0.0 | nan | 0.0 | 0.0075 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0910 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2023 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1081 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1487 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0197 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 2.1029 | 20.0 | 400 | 2.0401 | 0.0808 | 0.1198 | 0.3342 | nan | nan | nan | 0.8540 | nan | 0.2567 | nan | nan | 0.5222 | nan | 0.0692 | nan | nan | nan | 0.3618 | 0.0 | nan | 0.0 | 0.0364 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2521 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2792 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3148 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.2615 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0269 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7270 | nan | 0.2456 | nan | nan | 0.4455 | nan | 0.0283 | nan | nan | nan | 0.2664 | 0.0 | nan | 0.0 | 0.0359 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0979 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1634 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1488 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1582 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0248 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.9199 | 21.0 | 420 | 2.0343 | 0.0867 | 0.1289 | 0.3561 | nan | nan | nan | 0.8729 | nan | 0.5154 | nan | nan | 0.3884 | nan | 0.0932 | nan | nan | nan | 0.3565 | 0.0 | nan | 0.0 | 0.0551 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2016 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3379 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3751 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.2417 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0419 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7042 | nan | 0.4849 | nan | nan | 0.3426 | nan | 0.0328 | nan | nan | nan | 0.2556 | 0.0 | nan | 0.0 | 0.0543 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0841 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1930 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1753 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1498 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0380 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.3104 | 22.0 | 440 | 2.1901 | 0.0951 | 0.1491 | 0.4017 | nan | nan | nan | 0.8589 | nan | 0.5141 | nan | nan | 0.5042 | nan | 0.1357 | nan | nan | nan | 0.5095 | 0.0 | nan | 0.0 | 0.0451 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2190 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.4032 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.4488 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3279 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0598 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7316 | nan | 0.4826 | nan | nan | 0.4098 | nan | 0.0376 | nan | nan | nan | 0.3160 | 0.0 | nan | 0.0 | 0.0437 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0879 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2197 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1928 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1890 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0471 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.2897 | 23.0 | 460 | 2.0365 | 0.0801 | 0.1127 | 0.3254 | nan | nan | nan | 0.8312 | nan | 0.3108 | nan | nan | 0.3736 | nan | 0.0641 | nan | nan | nan | 0.3423 | 0.0 | nan | 0.0 | 0.1225 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2081 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2688 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2975 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1883 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0352 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7205 | nan | 0.2957 | nan | nan | 0.3317 | nan | 0.0243 | nan | nan | nan | 0.2739 | 0.0 | nan | 0.0 | 0.1134 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0861 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1589 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1475 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1375 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0331 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.203 | 24.0 | 480 | 2.0482 | 0.0893 | 0.1401 | 0.3735 | nan | nan | nan | 0.8474 | nan | 0.5972 | nan | nan | 0.4314 | nan | 0.1605 | nan | nan | nan | 0.3670 | 0.0 | nan | 0.0 | 0.0987 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2848 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3406 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2681 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3341 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0524 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7184 | nan | 0.5601 | nan | nan | 0.3518 | nan | 0.0527 | nan | 0.0 | nan | 0.2691 | 0.0 | nan | 0.0 | 0.0940 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0875 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2008 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1232 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1786 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0434 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.9328 | 25.0 | 500 | 1.9838 | 0.0926 | 0.1380 | 0.3787 | nan | nan | nan | 0.8721 | nan | 0.5630 | nan | nan | 0.4753 | nan | 0.0706 | nan | nan | nan | 0.3878 | 0.0 | nan | 0.0 | 0.0980 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2447 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3440 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3089 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3021 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0587 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7093 | nan | 0.5292 | nan | nan | 0.3888 | nan | 0.0268 | nan | nan | nan | 0.2880 | 0.0 | nan | 0.0 | 0.0936 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0913 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1900 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1464 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1748 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0470 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.2277 | 26.0 | 520 | 1.9951 | 0.0783 | 0.1172 | 0.3428 | nan | nan | nan | 0.8447 | nan | 0.3917 | nan | nan | 0.3695 | nan | 0.0198 | nan | nan | nan | 0.3938 | 0.0 | nan | 0.0 | 0.0602 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2196 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3090 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1950 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.2716 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0891 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7165 | nan | 0.3712 | nan | nan | 0.3165 | nan | 0.0079 | nan | 0.0 | nan | 0.2866 | 0.0 | nan | 0.0 | 0.0583 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0820 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1700 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.0964 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1791 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0644 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.5592 | 27.0 | 540 | 1.9680 | 0.0946 | 0.1407 | 0.3773 | nan | nan | nan | 0.8522 | nan | 0.5544 | nan | nan | 0.5550 | nan | 0.1558 | nan | nan | nan | 0.3897 | 0.0 | nan | 0.0 | 0.0567 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2495 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3453 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2300 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3083 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1016 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7214 | nan | 0.5181 | nan | nan | 0.4630 | nan | 0.0491 | nan | nan | nan | 0.2917 | 0.0 | nan | 0.0 | 0.0554 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0721 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2299 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1054 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1642 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0724 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.0019 | 28.0 | 560 | 1.9639 | 0.0908 | 0.1334 | 0.3647 | nan | nan | nan | 0.8695 | nan | 0.3846 | nan | nan | 0.3890 | nan | 0.0948 | nan | nan | nan | 0.4244 | 0.0 | nan | 0.0 | 0.0984 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.1844 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2985 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3986 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3432 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1166 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7342 | nan | 0.3616 | nan | nan | 0.3444 | nan | 0.0414 | nan | nan | nan | 0.3065 | 0.0 | nan | 0.0 | 0.0924 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0979 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1968 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1932 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1915 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0733 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.6683 | 29.0 | 580 | 1.9628 | 0.0870 | 0.1361 | 0.3595 | nan | nan | nan | 0.8653 | nan | 0.4252 | nan | nan | 0.4137 | nan | 0.1184 | nan | nan | nan | 0.3397 | 0.0 | nan | 0.0 | 0.1231 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2211 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3693 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3765 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3196 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1029 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7198 | nan | 0.4009 | nan | nan | 0.3494 | nan | 0.0389 | nan | 0.0 | nan | 0.2722 | 0.0 | nan | 0.0 | 0.1120 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0787 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2286 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1712 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1665 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0707 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.0931 | 30.0 | 600 | 1.9805 | 0.0834 | 0.1201 | 0.3378 | nan | nan | nan | 0.8342 | nan | 0.4017 | nan | nan | 0.3281 | nan | 0.0643 | nan | nan | nan | 0.3552 | 0.0 | nan | 0.0 | 0.0952 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2371 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3072 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2482 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.2749 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0973 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7272 | nan | 0.3793 | nan | nan | 0.2982 | nan | 0.0239 | nan | nan | nan | 0.2712 | 0.0 | nan | 0.0 | 0.0890 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0761 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1829 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1246 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1809 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0655 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.7162 | 31.0 | 620 | 1.9101 | 0.0782 | 0.1200 | 0.3269 | nan | nan | nan | 0.8463 | nan | 0.2432 | nan | nan | 0.4627 | nan | 0.0613 | nan | nan | nan | 0.3400 | 0.0 | nan | 0.0 | 0.0639 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2061 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2557 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3412 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3306 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0882 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7210 | nan | 0.2317 | nan | nan | 0.4057 | nan | 0.0268 | nan | 0.0 | nan | 0.2729 | 0.0 | nan | 0.0 | 0.0599 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0797 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1583 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1602 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1683 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0613 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.7559 | 32.0 | 640 | 1.9667 | 0.0807 | 0.1221 | 0.3382 | nan | nan | nan | 0.8258 | nan | 0.5556 | nan | nan | 0.3414 | nan | 0.0658 | nan | nan | nan | 0.3382 | 0.0 | nan | 0.0 | 0.0290 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2145 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2896 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2792 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.2712 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0870 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7148 | nan | 0.5219 | nan | nan | 0.2901 | nan | 0.0265 | nan | 0.0 | nan | 0.2626 | 0.0 | nan | 0.0 | 0.0272 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0779 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1736 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1232 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1441 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0580 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.9704 | 33.0 | 660 | 1.9224 | 0.0942 | 0.1408 | 0.3701 | nan | nan | nan | 0.8615 | nan | 0.5229 | nan | nan | 0.5370 | nan | 0.0845 | nan | nan | nan | 0.3486 | 0.0 | nan | 0.0 | 0.0808 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2055 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3493 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3414 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3190 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1498 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7270 | nan | 0.4935 | nan | nan | 0.4231 | nan | 0.0345 | nan | nan | nan | 0.2685 | 0.0 | nan | 0.0 | 0.0741 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0844 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1898 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1647 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1832 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0889 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.7972 | 34.0 | 680 | 1.9500 | 0.0896 | 0.1396 | 0.3767 | nan | nan | nan | 0.8568 | nan | 0.5347 | nan | nan | 0.5623 | nan | 0.0888 | nan | nan | nan | 0.3952 | 0.0 | nan | 0.0 | 0.0358 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2119 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3740 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2438 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3139 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1521 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7273 | nan | 0.5049 | nan | nan | 0.4432 | nan | 0.0348 | nan | 0.0 | nan | 0.2889 | 0.0 | nan | 0.0 | 0.0337 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0727 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2302 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1045 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1665 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0803 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 1.1935 | 35.0 | 700 | 1.9230 | 0.0835 | 0.1237 | 0.3451 | nan | nan | nan | 0.8381 | nan | 0.6068 | nan | nan | 0.3931 | nan | 0.0558 | nan | nan | nan | 0.3149 | 0.0 | nan | 0.0 | 0.0454 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2198 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3283 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2288 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.2040 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1054 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7173 | nan | 0.5718 | nan | nan | 0.3338 | nan | 0.0202 | nan | 0.0 | nan | 0.2580 | 0.0 | nan | 0.0 | 0.0416 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0809 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1847 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1000 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1322 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0662 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.6306 | 36.0 | 720 | 1.9467 | 0.0823 | 0.1229 | 0.3397 | nan | nan | nan | 0.8467 | nan | 0.4489 | nan | nan | 0.4282 | nan | 0.0682 | nan | nan | nan | 0.3121 | 0.0 | nan | 0.0 | 0.0854 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2312 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3093 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2315 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.2415 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1149 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7226 | nan | 0.4246 | nan | nan | 0.3735 | nan | 0.0271 | nan | 0.0 | nan | 0.2496 | 0.0 | nan | 0.0 | 0.0788 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0804 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1913 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1025 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1466 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0721 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.8992 | 37.0 | 740 | 1.8487 | 0.0907 | 0.1386 | 0.3742 | nan | nan | nan | 0.8576 | nan | 0.6168 | nan | nan | 0.4130 | nan | 0.0173 | nan | nan | nan | 0.3706 | 0.0 | nan | 0.0 | 0.0834 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2069 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3601 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3178 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3352 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1629 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7198 | nan | 0.5800 | nan | nan | 0.3604 | nan | 0.0077 | nan | 0.0 | nan | 0.2786 | 0.0 | nan | 0.0 | 0.0762 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0930 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1968 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1421 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1817 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0839 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.8158 | 38.0 | 760 | 1.9149 | 0.0883 | 0.1332 | 0.3662 | nan | nan | nan | 0.8463 | nan | 0.5861 | nan | nan | 0.3940 | nan | 0.0855 | nan | nan | nan | 0.3850 | 0.0 | nan | 0.0 | 0.0762 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2210 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3116 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2392 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.2965 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1541 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7200 | nan | 0.5510 | nan | nan | 0.3442 | nan | 0.0385 | nan | 0.0 | nan | 0.2853 | 0.0 | nan | 0.0 | 0.0687 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0686 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2015 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1123 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1792 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0807 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.7208 | 39.0 | 780 | 1.8926 | 0.0865 | 0.1324 | 0.3476 | nan | nan | nan | 0.8363 | nan | 0.4742 | nan | nan | 0.4646 | nan | 0.0691 | nan | nan | nan | 0.3187 | 0.0 | nan | 0.0 | 0.0807 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2120 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3116 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3915 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.2574 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1590 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7189 | nan | 0.4473 | nan | nan | 0.3985 | nan | 0.0288 | nan | 0.0 | nan | 0.2638 | 0.0 | nan | 0.0 | 0.0738 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0889 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1828 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1593 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1521 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0801 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.4526 | 40.0 | 800 | 1.8917 | 0.0861 | 0.1293 | 0.3495 | nan | nan | nan | 0.8316 | nan | 0.5129 | nan | nan | 0.4115 | nan | 0.0770 | nan | nan | nan | 0.3463 | 0.0 | nan | 0.0 | 0.0731 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2139 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3009 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2952 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.2768 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1523 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7220 | nan | 0.4838 | nan | nan | 0.3564 | nan | 0.0299 | nan | 0.0 | nan | 0.2737 | 0.0 | nan | 0.0 | 0.0663 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0774 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1923 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1332 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1703 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0776 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.9559 | 41.0 | 820 | 1.9104 | 0.0943 | 0.1433 | 0.3756 | nan | nan | nan | 0.8392 | nan | 0.7125 | nan | nan | 0.5193 | nan | 0.0690 | nan | nan | nan | 0.3553 | 0.0 | nan | 0.0 | 0.0323 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2138 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3192 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3271 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3195 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1611 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7211 | nan | 0.6652 | nan | nan | 0.4339 | nan | 0.0285 | nan | 0.0 | nan | 0.2707 | 0.0 | nan | 0.0 | 0.0299 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0813 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2025 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1477 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1693 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0796 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.5018 | 42.0 | 840 | 1.8932 | 0.0911 | 0.1378 | 0.3681 | nan | nan | nan | 0.8567 | nan | 0.5492 | nan | nan | 0.5049 | nan | 0.0660 | nan | nan | nan | 0.3575 | 0.0 | nan | 0.0 | 0.0726 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2151 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3230 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2782 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3271 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1694 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7282 | nan | 0.5161 | nan | nan | 0.4305 | nan | 0.0302 | nan | 0.0 | nan | 0.2769 | 0.0 | nan | 0.0 | 0.0672 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0795 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2015 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1328 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1898 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0817 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.947 | 43.0 | 860 | 1.8887 | 0.0912 | 0.1382 | 0.3714 | nan | nan | nan | 0.8465 | nan | 0.6056 | nan | nan | 0.4546 | nan | 0.0483 | nan | nan | nan | 0.3662 | 0.0 | nan | 0.0 | 0.0851 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2132 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3330 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2809 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3135 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1848 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7239 | nan | 0.5688 | nan | nan | 0.3905 | nan | 0.0220 | nan | 0.0 | nan | 0.2813 | 0.0 | nan | 0.0 | 0.0773 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0800 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1950 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1272 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1874 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0826 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.7007 | 44.0 | 880 | 1.9000 | 0.0875 | 0.1308 | 0.3529 | nan | nan | nan | 0.8304 | nan | 0.6049 | nan | nan | 0.4529 | nan | 0.0449 | nan | nan | nan | 0.3287 | 0.0 | nan | 0.0 | 0.0427 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2159 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3147 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2486 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.2906 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1560 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7220 | nan | 0.5691 | nan | nan | 0.3859 | nan | 0.0198 | nan | 0.0 | nan | 0.2559 | 0.0 | nan | 0.0 | 0.0402 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0767 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1898 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1140 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1753 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0765 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.785 | 45.0 | 900 | 1.8730 | 0.0914 | 0.1371 | 0.3646 | nan | nan | nan | 0.8517 | nan | 0.5682 | nan | nan | 0.4879 | nan | 0.0613 | nan | nan | nan | 0.3267 | 0.0 | nan | 0.0 | 0.1077 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2109 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3246 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2757 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3243 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1616 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7265 | nan | 0.5348 | nan | nan | 0.4103 | nan | 0.0269 | nan | 0.0 | nan | 0.2638 | 0.0 | nan | 0.0 | 0.0980 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0828 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.1984 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1301 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1903 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0798 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.5925 | 46.0 | 920 | 1.8882 | 0.0887 | 0.1340 | 0.3635 | nan | nan | nan | 0.8662 | nan | 0.5111 | nan | nan | 0.4748 | nan | 0.0518 | nan | nan | nan | 0.3426 | 0.0 | nan | 0.0 | 0.0990 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2182 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3307 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2379 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3069 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1792 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7269 | nan | 0.4821 | nan | nan | 0.3928 | nan | 0.0236 | nan | 0.0 | nan | 0.2737 | 0.0 | nan | 0.0 | 0.0904 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0766 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2065 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1130 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1915 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0841 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.7732 | 47.0 | 940 | 1.8782 | 0.0874 | 0.1339 | 0.3639 | nan | nan | nan | 0.8603 | nan | 0.5207 | nan | nan | 0.4583 | nan | 0.0531 | nan | nan | nan | 0.3575 | 0.0 | nan | 0.0 | 0.0779 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2191 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3398 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2444 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3134 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1698 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7239 | nan | 0.4904 | nan | nan | 0.3751 | nan | 0.0237 | nan | 0.0 | nan | 0.2797 | 0.0 | nan | 0.0 | 0.0719 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0733 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2097 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1109 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1839 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0782 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.8092 | 48.0 | 960 | 1.8683 | 0.0879 | 0.1348 | 0.3597 | nan | nan | nan | 0.8459 | nan | 0.5058 | nan | nan | 0.4731 | nan | 0.0656 | nan | nan | nan | 0.3583 | 0.0 | nan | 0.0 | 0.0554 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2133 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3335 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3061 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3165 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1650 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7226 | nan | 0.4771 | nan | nan | 0.3972 | nan | 0.0292 | nan | 0.0 | nan | 0.2743 | 0.0 | nan | 0.0 | 0.0517 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0812 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2042 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1374 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1835 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0776 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.583 | 49.0 | 980 | 1.8752 | 0.0902 | 0.1381 | 0.3690 | nan | nan | nan | 0.8486 | nan | 0.5583 | nan | nan | 0.4531 | nan | 0.0761 | nan | nan | nan | 0.3727 | 0.0 | nan | 0.0 | 0.0749 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2149 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3347 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.3022 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3180 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1743 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7232 | nan | 0.5253 | nan | nan | 0.3853 | nan | 0.0317 | nan | 0.0 | nan | 0.2828 | 0.0 | nan | 0.0 | 0.0688 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0794 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2090 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1334 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1867 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0796 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | | 0.8064 | 50.0 | 1000 | 1.8839 | 0.0900 | 0.1366 | 0.3644 | nan | nan | nan | 0.8407 | nan | 0.5637 | nan | nan | 0.4489 | nan | 0.0771 | nan | nan | nan | 0.3647 | 0.0 | nan | 0.0 | 0.0604 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.2133 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.3360 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.2925 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.3142 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.1774 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | 0.0 | nan | nan | 0.7245 | nan | 0.5304 | nan | nan | 0.3857 | nan | 0.0317 | nan | 0.0 | nan | 0.2767 | 0.0 | nan | 0.0 | 0.0558 | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | 0.0824 | 0.0 | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.2056 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | 0.1332 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | 0.1932 | 0.0 | nan | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0818 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | nan | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | 0.0 | nan | nan | nan | nan | nan | nan | nan | nan | nan | 0.0 | nan | 0.0 | nan | nan | ### Framework versions - Transformers 4.40.1 - Pytorch 2.3.0+cu118 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "other", "tags": ["generated_from_trainer"], "datasets": ["scene_parse_150"], "base_model": "nvidia/mit-b0", "model-index": [{"name": "segformer-b0-scene-parse-150", "results": []}]}
DjoDja/segformer-b0-scene-parse-150
null
[ "transformers", "safetensors", "segformer", "generated_from_trainer", "dataset:scene_parse_150", "base_model:nvidia/mit-b0", "license:other", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:30:03+00:00
[]
[]
TAGS #transformers #safetensors #segformer #generated_from_trainer #dataset-scene_parse_150 #base_model-nvidia/mit-b0 #license-other #endpoints_compatible #region-us
segformer-b0-scene-parse-150 ============================ This model is a fine-tuned version of nvidia/mit-b0 on the scene\_parse\_150 dataset. It achieves the following results on the evaluation set: * Loss: 1.8839 * Mean Iou: 0.0900 * Mean Accuracy: 0.1366 * Overall Accuracy: 0.3644 * Accuracy Wall: nan * Accuracy Building: nan * Accuracy Sky: nan * Accuracy Floor: 0.8407 * Accuracy Tree: nan * Accuracy Ceiling: 0.5637 * Accuracy Road: nan * Accuracy Bed : nan * Accuracy Windowpane: 0.4489 * Accuracy Grass: nan * Accuracy Cabinet: 0.0771 * Accuracy Sidewalk: nan * Accuracy Person: nan * Accuracy Earth: nan * Accuracy Door: 0.3647 * Accuracy Table: 0.0 * Accuracy Mountain: nan * Accuracy Plant: 0.0 * Accuracy Curtain: 0.0604 * Accuracy Chair: 0.0 * Accuracy Car: nan * Accuracy Water: nan * Accuracy Painting: 0.0 * Accuracy Sofa: nan * Accuracy Shelf: nan * Accuracy House: nan * Accuracy Sea: nan * Accuracy Mirror: 0.2133 * Accuracy Rug: 0.0 * Accuracy Field: nan * Accuracy Armchair: nan * Accuracy Seat: nan * Accuracy Fence: nan * Accuracy Desk: nan * Accuracy Rock: nan * Accuracy Wardrobe: nan * Accuracy Lamp: 0.0 * Accuracy Bathtub: 0.3360 * Accuracy Railing: nan * Accuracy Cushion: nan * Accuracy Base: nan * Accuracy Box: 0.0 * Accuracy Column: nan * Accuracy Signboard: nan * Accuracy Chest of drawers: nan * Accuracy Counter: nan * Accuracy Sand: nan * Accuracy Sink: 0.2925 * Accuracy Skyscraper: nan * Accuracy Fireplace: nan * Accuracy Refrigerator: nan * Accuracy Grandstand: nan * Accuracy Path: nan * Accuracy Stairs: nan * Accuracy Runway: nan * Accuracy Case: nan * Accuracy Pool table: nan * Accuracy Pillow: nan * Accuracy Screen door: nan * Accuracy Stairway: nan * Accuracy River: nan * Accuracy Bridge: nan * Accuracy Bookcase: nan * Accuracy Blind: nan * Accuracy Coffee table: nan * Accuracy Toilet: 0.3142 * Accuracy Flower: 0.0 * Accuracy Book: nan * Accuracy Hill: nan * Accuracy Bench: nan * Accuracy Countertop: 0.0 * Accuracy Stove: nan * Accuracy Palm: nan * Accuracy Kitchen island: nan * Accuracy Computer: nan * Accuracy Swivel chair: nan * Accuracy Boat: nan * Accuracy Bar: nan * Accuracy Arcade machine: nan * Accuracy Hovel: nan * Accuracy Bus: nan * Accuracy Towel: 0.1774 * Accuracy Light: 0.0 * Accuracy Truck: nan * Accuracy Tower: nan * Accuracy Chandelier: nan * Accuracy Awning: nan * Accuracy Streetlight: nan * Accuracy Booth: nan * Accuracy Television receiver: nan * Accuracy Airplane: nan * Accuracy Dirt track: nan * Accuracy Apparel: nan * Accuracy Pole: nan * Accuracy Land: nan * Accuracy Bannister: nan * Accuracy Escalator: nan * Accuracy Ottoman: nan * Accuracy Bottle: nan * Accuracy Buffet: nan * Accuracy Poster: nan * Accuracy Stage: nan * Accuracy Van: nan * Accuracy Ship: nan * Accuracy Fountain: nan * Accuracy Conveyer belt: nan * Accuracy Canopy: nan * Accuracy Washer: nan * Accuracy Plaything: nan * Accuracy Swimming pool: nan * Accuracy Stool: nan * Accuracy Barrel: nan * Accuracy Basket: 0.0 * Accuracy Waterfall: nan * Accuracy Tent: nan * Accuracy Bag: 0.0 * Accuracy Minibike: nan * Accuracy Cradle: nan * Accuracy Oven: nan * Accuracy Ball: nan * Accuracy Food: nan * Accuracy Step: nan * Accuracy Tank: nan * Accuracy Trade name: nan * Accuracy Microwave: nan * Accuracy Pot: nan * Accuracy Animal: nan * Accuracy Bicycle: nan * Accuracy Lake: nan * Accuracy Dishwasher: nan * Accuracy Screen: nan * Accuracy Blanket: nan * Accuracy Sculpture: nan * Accuracy Hood: nan * Accuracy Sconce: 0.0 * Accuracy Vase: 0.0 * Accuracy Traffic light: nan * Accuracy Tray: nan * Accuracy Ashcan: nan * Accuracy Fan: nan * Accuracy Pier: nan * Accuracy Crt screen: nan * Accuracy Plate: nan * Accuracy Monitor: nan * Accuracy Bulletin board: nan * Accuracy Shower: 0.0 * Accuracy Radiator: nan * Accuracy Glass: 0.0 * Accuracy Clock: nan * Accuracy Flag: nan * Iou Wall: 0.0 * Iou Building: nan * Iou Sky: nan * Iou Floor: 0.7245 * Iou Tree: nan * Iou Ceiling: 0.5304 * Iou Road: nan * Iou Bed : nan * Iou Windowpane: 0.3857 * Iou Grass: nan * Iou Cabinet: 0.0317 * Iou Sidewalk: nan * Iou Person: 0.0 * Iou Earth: nan * Iou Door: 0.2767 * Iou Table: 0.0 * Iou Mountain: nan * Iou Plant: 0.0 * Iou Curtain: 0.0558 * Iou Chair: 0.0 * Iou Car: nan * Iou Water: nan * Iou Painting: 0.0 * Iou Sofa: nan * Iou Shelf: nan * Iou House: nan * Iou Sea: nan * Iou Mirror: 0.0824 * Iou Rug: 0.0 * Iou Field: nan * Iou Armchair: nan * Iou Seat: nan * Iou Fence: nan * Iou Desk: nan * Iou Rock: nan * Iou Wardrobe: nan * Iou Lamp: 0.0 * Iou Bathtub: 0.2056 * Iou Railing: nan * Iou Cushion: nan * Iou Base: nan * Iou Box: 0.0 * Iou Column: nan * Iou Signboard: nan * Iou Chest of drawers: nan * Iou Counter: nan * Iou Sand: nan * Iou Sink: 0.1332 * Iou Skyscraper: nan * Iou Fireplace: nan * Iou Refrigerator: nan * Iou Grandstand: nan * Iou Path: nan * Iou Stairs: nan * Iou Runway: nan * Iou Case: nan * Iou Pool table: nan * Iou Pillow: nan * Iou Screen door: 0.0 * Iou Stairway: nan * Iou River: nan * Iou Bridge: nan * Iou Bookcase: nan * Iou Blind: nan * Iou Coffee table: nan * Iou Toilet: 0.1932 * Iou Flower: 0.0 * Iou Book: nan * Iou Hill: nan * Iou Bench: nan * Iou Countertop: 0.0 * Iou Stove: nan * Iou Palm: nan * Iou Kitchen island: nan * Iou Computer: nan * Iou Swivel chair: nan * Iou Boat: nan * Iou Bar: nan * Iou Arcade machine: nan * Iou Hovel: nan * Iou Bus: nan * Iou Towel: 0.0818 * Iou Light: 0.0 * Iou Truck: nan * Iou Tower: nan * Iou Chandelier: nan * Iou Awning: nan * Iou Streetlight: nan * Iou Booth: nan * Iou Television receiver: nan * Iou Airplane: nan * Iou Dirt track: nan * Iou Apparel: nan * Iou Pole: nan * Iou Land: nan * Iou Bannister: nan * Iou Escalator: nan * Iou Ottoman: nan * Iou Bottle: nan * Iou Buffet: nan * Iou Poster: nan * Iou Stage: nan * Iou Van: nan * Iou Ship: nan * Iou Fountain: nan * Iou Conveyer belt: nan * Iou Canopy: nan * Iou Washer: nan * Iou Plaything: nan * Iou Swimming pool: nan * Iou Stool: nan * Iou Barrel: nan * Iou Basket: 0.0 * Iou Waterfall: nan * Iou Tent: nan * Iou Bag: 0.0 * Iou Minibike: nan * Iou Cradle: nan * Iou Oven: nan * Iou Ball: nan * Iou Food: nan * Iou Step: nan * Iou Tank: nan * Iou Trade name: nan * Iou Microwave: nan * Iou Pot: nan * Iou Animal: nan * Iou Bicycle: nan * Iou Lake: nan * Iou Dishwasher: nan * Iou Screen: nan * Iou Blanket: nan * Iou Sculpture: nan * Iou Hood: nan * Iou Sconce: 0.0 * Iou Vase: 0.0 * Iou Traffic light: nan * Iou Tray: nan * Iou Ashcan: nan * Iou Fan: nan * Iou Pier: nan * Iou Crt screen: nan * Iou Plate: nan * Iou Monitor: nan * Iou Bulletin board: nan * Iou Shower: 0.0 * Iou Radiator: nan * Iou Glass: 0.0 * Iou Clock: nan * Iou Flag: nan Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 6e-05 * train\_batch\_size: 2 * eval\_batch\_size: 2 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 50 ### Training results ### Framework versions * Transformers 4.40.1 * Pytorch 2.3.0+cu118 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 6e-05\n* train\\_batch\\_size: 2\n* eval\\_batch\\_size: 2\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 50", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu118\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #safetensors #segformer #generated_from_trainer #dataset-scene_parse_150 #base_model-nvidia/mit-b0 #license-other #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 6e-05\n* train\\_batch\\_size: 2\n* eval\\_batch\\_size: 2\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 50", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu118\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ 54, 101, 5, 44 ]
[ "TAGS\n#transformers #safetensors #segformer #generated_from_trainer #dataset-scene_parse_150 #base_model-nvidia/mit-b0 #license-other #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 6e-05\n* train\\_batch\\_size: 2\n* eval\\_batch\\_size: 2\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 50### Training results### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu118\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
text-generation
transformers
# JSL-MedPhi2-2.7B [<img src="https://repository-images.githubusercontent.com/104670986/2e728700-ace4-11ea-9cfc-f3e060b25ddf">](http://www.johnsnowlabs.com) This model is developed by [John Snow Labs](https://www.johnsnowlabs.com/). This model is available under a [CC-BY-NC-ND](https://creativecommons.org/licenses/by-nc-nd/4.0/deed.en) license and must also conform to this [Acceptable Use Policy](https://huggingface.co/johnsnowlabs). If you need to license this model for commercial use, please contact us at [email protected]. ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "johnsnowlabs/JSL-MedPhi2-2.7B" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ``` ## 🏆 Evaluation | Tasks |Version|Filter|n-shot| Metric |Value | |Stderr| |-------------------------------|-------|------|-----:|--------|-----:|---|-----:| |stem |N/A |none | 0|acc_norm|0.3904|± |0.0066| | | |none | 0|acc |0.4402|± |0.0058| | - medmcqa |Yaml |none | 0|acc |0.3899|± |0.0075| | | |none | 0|acc_norm|0.3899|± |0.0075| | - medqa_4options |Yaml |none | 0|acc |0.3920|± |0.0137| | | |none | 0|acc_norm|0.3920|± |0.0137| | - anatomy (mmlu) | 0|none | 0|acc |0.4815|± |0.0432| | - clinical_knowledge (mmlu) | 0|none | 0|acc |0.6340|± |0.0296| | - college_biology (mmlu) | 0|none | 0|acc |0.6181|± |0.0406| | - college_medicine (mmlu) | 0|none | 0|acc |0.5665|± |0.0378| | - medical_genetics (mmlu) | 0|none | 0|acc |0.6300|± |0.0485| | - professional_medicine (mmlu)| 0|none | 0|acc |0.4522|± |0.0302| | - pubmedqa | 1|none | 0|acc |0.7300|± |0.0199| |Groups|Version|Filter|n-shot| Metric |Value | |Stderr| |------|-------|------|-----:|--------|-----:|---|-----:| |stem |N/A |none | 0|acc_norm|0.3904|± |0.0066| | | |none | 0|acc |0.4402|± |0.0058|
{"license": "cc-by-nc-nd-4.0", "tags": ["phi-2", "sft", "medical"], "base_model": ["microsoft/phi2"]}
johnsnowlabs/JSL-MedPhi2-2.7B
null
[ "transformers", "safetensors", "phi", "text-generation", "phi-2", "sft", "medical", "conversational", "custom_code", "base_model:microsoft/phi2", "license:cc-by-nc-nd-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:30:37+00:00
[]
[]
TAGS #transformers #safetensors #phi #text-generation #phi-2 #sft #medical #conversational #custom_code #base_model-microsoft/phi2 #license-cc-by-nc-nd-4.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
JSL-MedPhi2-2.7B ================ <img src="URL This model is developed by John Snow Labs. This model is available under a CC-BY-NC-ND license and must also conform to this Acceptable Use Policy. If you need to license this model for commercial use, please contact us at info@URL. Usage ----- Evaluation ----------
[]
[ "TAGS\n#transformers #safetensors #phi #text-generation #phi-2 #sft #medical #conversational #custom_code #base_model-microsoft/phi2 #license-cc-by-nc-nd-4.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n" ]
[ 73 ]
[ "TAGS\n#transformers #safetensors #phi #text-generation #phi-2 #sft #medical #conversational #custom_code #base_model-microsoft/phi2 #license-cc-by-nc-nd-4.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n" ]
null
transformers
# Uploaded model - **Developed by:** traderpedroso - **License:** apache-2.0 - **Finetuned from model :** artificialguybr/llama3-8b-alpacadata-ptbr This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl"], "base_model": "artificialguybr/llama3-8b-alpacadata-ptbr"}
traderpedroso/llama3-lora
null
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:artificialguybr/llama3-8b-alpacadata-ptbr", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:30:58+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-artificialguybr/llama3-8b-alpacadata-ptbr #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: traderpedroso - License: apache-2.0 - Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: traderpedroso\n- License: apache-2.0\n- Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-artificialguybr/llama3-8b-alpacadata-ptbr #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: traderpedroso\n- License: apache-2.0\n- Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ 66, 83 ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-artificialguybr/llama3-8b-alpacadata-ptbr #license-apache-2.0 #endpoints_compatible #region-us \n# Uploaded model\n\n- Developed by: traderpedroso\n- License: apache-2.0\n- Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
null
null
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # idefics2-8b-docvqa-finetuned-museum This model is a fine-tuned version of [HuggingFaceM4/idefics2-8b](https://huggingface.co/HuggingFaceM4/idefics2-8b) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 50 - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.40.1 - Pytorch 2.3.0+cu118 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "base_model": "HuggingFaceM4/idefics2-8b", "model-index": [{"name": "idefics2-8b-docvqa-finetuned-museum", "results": []}]}
ZacJQ/idefics2-8b-docvqa-finetuned-museum
null
[ "safetensors", "generated_from_trainer", "base_model:HuggingFaceM4/idefics2-8b", "license:apache-2.0", "region:us" ]
null
2024-04-30T18:31:09+00:00
[]
[]
TAGS #safetensors #generated_from_trainer #base_model-HuggingFaceM4/idefics2-8b #license-apache-2.0 #region-us
# idefics2-8b-docvqa-finetuned-museum This model is a fine-tuned version of HuggingFaceM4/idefics2-8b on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 50 - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.40.1 - Pytorch 2.3.0+cu118 - Datasets 2.19.0 - Tokenizers 0.19.1
[ "# idefics2-8b-docvqa-finetuned-museum\r\n\r\nThis model is a fine-tuned version of HuggingFaceM4/idefics2-8b on an unknown dataset.", "## Model description\r\n\r\nMore information needed", "## Intended uses & limitations\r\n\r\nMore information needed", "## Training and evaluation data\r\n\r\nMore information needed", "## Training procedure", "### Training hyperparameters\r\n\r\nThe following hyperparameters were used during training:\r\n- learning_rate: 0.0001\r\n- train_batch_size: 2\r\n- eval_batch_size: 8\r\n- seed: 42\r\n- gradient_accumulation_steps: 8\r\n- total_train_batch_size: 16\r\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\r\n- lr_scheduler_type: linear\r\n- lr_scheduler_warmup_steps: 50\r\n- num_epochs: 2\r\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\r\n\r\n- Transformers 4.40.1\r\n- Pytorch 2.3.0+cu118\r\n- Datasets 2.19.0\r\n- Tokenizers 0.19.1" ]
[ "TAGS\n#safetensors #generated_from_trainer #base_model-HuggingFaceM4/idefics2-8b #license-apache-2.0 #region-us \n", "# idefics2-8b-docvqa-finetuned-museum\r\n\r\nThis model is a fine-tuned version of HuggingFaceM4/idefics2-8b on an unknown dataset.", "## Model description\r\n\r\nMore information needed", "## Intended uses & limitations\r\n\r\nMore information needed", "## Training and evaluation data\r\n\r\nMore information needed", "## Training procedure", "### Training hyperparameters\r\n\r\nThe following hyperparameters were used during training:\r\n- learning_rate: 0.0001\r\n- train_batch_size: 2\r\n- eval_batch_size: 8\r\n- seed: 42\r\n- gradient_accumulation_steps: 8\r\n- total_train_batch_size: 16\r\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\r\n- lr_scheduler_type: linear\r\n- lr_scheduler_warmup_steps: 50\r\n- num_epochs: 2\r\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\r\n\r\n- Transformers 4.40.1\r\n- Pytorch 2.3.0+cu118\r\n- Datasets 2.19.0\r\n- Tokenizers 0.19.1" ]
[ 40, 45, 7, 9, 9, 4, 133, 5, 44 ]
[ "TAGS\n#safetensors #generated_from_trainer #base_model-HuggingFaceM4/idefics2-8b #license-apache-2.0 #region-us \n# idefics2-8b-docvqa-finetuned-museum\r\n\r\nThis model is a fine-tuned version of HuggingFaceM4/idefics2-8b on an unknown dataset.## Model description\r\n\r\nMore information needed## Intended uses & limitations\r\n\r\nMore information needed## Training and evaluation data\r\n\r\nMore information needed## Training procedure### Training hyperparameters\r\n\r\nThe following hyperparameters were used during training:\r\n- learning_rate: 0.0001\r\n- train_batch_size: 2\r\n- eval_batch_size: 8\r\n- seed: 42\r\n- gradient_accumulation_steps: 8\r\n- total_train_batch_size: 16\r\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\r\n- lr_scheduler_type: linear\r\n- lr_scheduler_warmup_steps: 50\r\n- num_epochs: 2\r\n- mixed_precision_training: Native AMP### Training results### Framework versions\r\n\r\n- Transformers 4.40.1\r\n- Pytorch 2.3.0+cu118\r\n- Datasets 2.19.0\r\n- Tokenizers 0.19.1" ]
feature-extraction
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
claudios/cotext-1-cc
null
[ "transformers", "safetensors", "t5", "feature-extraction", "arxiv:1910.09700", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:32:21+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #t5 #feature-extraction #arxiv-1910.09700 #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #t5 #feature-extraction #arxiv-1910.09700 #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 39, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #t5 #feature-extraction #arxiv-1910.09700 #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
transformers
# Uploaded model - **Developed by:** DattaBS - **License:** apache-2.0 - **Finetuned from model :** mistralai/Mistral-7B-Instruct-v0.2 This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "mistral", "trl"], "base_model": "mistralai/Mistral-7B-Instruct-v0.2"}
DattaBS/mistral_gsm8k
null
[ "transformers", "safetensors", "mistral", "text-generation", "text-generation-inference", "unsloth", "trl", "conversational", "en", "base_model:mistralai/Mistral-7B-Instruct-v0.2", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:32:31+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #mistral #text-generation #text-generation-inference #unsloth #trl #conversational #en #base_model-mistralai/Mistral-7B-Instruct-v0.2 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
# Uploaded model - Developed by: DattaBS - License: apache-2.0 - Finetuned from model : mistralai/Mistral-7B-Instruct-v0.2 This mistral model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: DattaBS\n- License: apache-2.0\n- Finetuned from model : mistralai/Mistral-7B-Instruct-v0.2\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #text-generation-inference #unsloth #trl #conversational #en #base_model-mistralai/Mistral-7B-Instruct-v0.2 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: DattaBS\n- License: apache-2.0\n- Finetuned from model : mistralai/Mistral-7B-Instruct-v0.2\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ 76, 80 ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #text-generation-inference #unsloth #trl #conversational #en #base_model-mistralai/Mistral-7B-Instruct-v0.2 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n# Uploaded model\n\n- Developed by: DattaBS\n- License: apache-2.0\n- Finetuned from model : mistralai/Mistral-7B-Instruct-v0.2\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
text2text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
SilvioLima/absa
null
[ "transformers", "safetensors", "bart", "text2text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us", "has_space" ]
null
2024-04-30T18:33:28+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #bart #text2text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us #has_space
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #bart #text2text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us #has_space \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 43, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #bart #text2text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us #has_space \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
nem012/gemma2b-1e-5r4
null
[ "transformers", "safetensors", "gemma", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:33:41+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #gemma #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #gemma #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 43, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #gemma #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-1 This model is a fine-tuned version of [EleutherAI/pythia-14m](https://huggingface.co/EleutherAI/pythia-14m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 1 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
{"tags": ["generated_from_trainer"], "base_model": "EleutherAI/pythia-14m", "model-index": [{"name": "robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-1", "results": []}]}
AlignmentResearch/robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-1
null
[ "transformers", "tensorboard", "safetensors", "gpt_neox", "text-classification", "generated_from_trainer", "base_model:EleutherAI/pythia-14m", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:34:24+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-14m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-1 This model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 1 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-1\n\nThis model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 1\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-14m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-1\n\nThis model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 1\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ 62, 58, 7, 9, 9, 4, 93, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-14m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-1\n\nThis model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 1\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-3 This model is a fine-tuned version of [EleutherAI/pythia-14m](https://huggingface.co/EleutherAI/pythia-14m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 3 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
{"tags": ["generated_from_trainer"], "base_model": "EleutherAI/pythia-14m", "model-index": [{"name": "robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-3", "results": []}]}
AlignmentResearch/robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-3
null
[ "transformers", "tensorboard", "safetensors", "gpt_neox", "text-classification", "generated_from_trainer", "base_model:EleutherAI/pythia-14m", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:35:49+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-14m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-3 This model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 3 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-3\n\nThis model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 3\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-14m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-3\n\nThis model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 3\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ 62, 58, 7, 9, 9, 4, 93, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-14m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-3\n\nThis model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 3\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-2 This model is a fine-tuned version of [EleutherAI/pythia-14m](https://huggingface.co/EleutherAI/pythia-14m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
{"tags": ["generated_from_trainer"], "base_model": "EleutherAI/pythia-14m", "model-index": [{"name": "robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-2", "results": []}]}
AlignmentResearch/robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-2
null
[ "transformers", "tensorboard", "safetensors", "gpt_neox", "text-classification", "generated_from_trainer", "base_model:EleutherAI/pythia-14m", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:35:54+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-14m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-2 This model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-2\n\nThis model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-14m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-2\n\nThis model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ 62, 58, 7, 9, 9, 4, 93, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-14m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-2\n\nThis model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
null
transformers
# Uploaded model - **Developed by:** traderpedroso - **License:** apache-2.0 - **Finetuned from model :** artificialguybr/llama3-8b-alpacadata-ptbr This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "gguf"], "base_model": "artificialguybr/llama3-8b-alpacadata-ptbr"}
traderpedroso/llama3-GGUF
null
[ "transformers", "gguf", "llama", "text-generation-inference", "unsloth", "en", "base_model:artificialguybr/llama3-8b-alpacadata-ptbr", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:36:04+00:00
[]
[ "en" ]
TAGS #transformers #gguf #llama #text-generation-inference #unsloth #en #base_model-artificialguybr/llama3-8b-alpacadata-ptbr #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: traderpedroso - License: apache-2.0 - Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: traderpedroso\n- License: apache-2.0\n- Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #gguf #llama #text-generation-inference #unsloth #en #base_model-artificialguybr/llama3-8b-alpacadata-ptbr #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: traderpedroso\n- License: apache-2.0\n- Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ 63, 83 ]
[ "TAGS\n#transformers #gguf #llama #text-generation-inference #unsloth #en #base_model-artificialguybr/llama3-8b-alpacadata-ptbr #license-apache-2.0 #endpoints_compatible #region-us \n# Uploaded model\n\n- Developed by: traderpedroso\n- License: apache-2.0\n- Finetuned from model : artificialguybr/llama3-8b-alpacadata-ptbr\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-0 This model is a fine-tuned version of [EleutherAI/pythia-14m](https://huggingface.co/EleutherAI/pythia-14m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
{"tags": ["generated_from_trainer"], "base_model": "EleutherAI/pythia-14m", "model-index": [{"name": "robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-0", "results": []}]}
AlignmentResearch/robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-0
null
[ "transformers", "tensorboard", "safetensors", "gpt_neox", "text-classification", "generated_from_trainer", "base_model:EleutherAI/pythia-14m", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:36:07+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-14m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-0 This model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-0\n\nThis model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 0\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-14m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-0\n\nThis model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 0\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ 62, 58, 7, 9, 9, 4, 93, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-14m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-0\n\nThis model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 0\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-0 This model is a fine-tuned version of [EleutherAI/pythia-70m](https://huggingface.co/EleutherAI/pythia-70m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "base_model": "EleutherAI/pythia-70m", "model-index": [{"name": "robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-0", "results": []}]}
AlignmentResearch/robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-0
null
[ "transformers", "tensorboard", "safetensors", "gpt_neox", "text-classification", "generated_from_trainer", "base_model:EleutherAI/pythia-70m", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:36:17+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-70m #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-0 This model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-0\n\nThis model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 0\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-70m #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-0\n\nThis model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 0\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ 70, 58, 7, 9, 9, 4, 93, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-70m #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-0\n\nThis model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 0\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-1 This model is a fine-tuned version of [EleutherAI/pythia-70m](https://huggingface.co/EleutherAI/pythia-70m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 1 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "base_model": "EleutherAI/pythia-70m", "model-index": [{"name": "robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-1", "results": []}]}
AlignmentResearch/robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-1
null
[ "transformers", "tensorboard", "safetensors", "gpt_neox", "text-classification", "generated_from_trainer", "base_model:EleutherAI/pythia-70m", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:37:27+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-70m #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-1 This model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 1 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-1\n\nThis model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 1\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-70m #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-1\n\nThis model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 1\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ 70, 58, 7, 9, 9, 4, 93, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-70m #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-1\n\nThis model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 1\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
feature-extraction
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
claudios/cotext-2-cc
null
[ "transformers", "safetensors", "t5", "feature-extraction", "arxiv:1910.09700", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:38:30+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #t5 #feature-extraction #arxiv-1910.09700 #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #t5 #feature-extraction #arxiv-1910.09700 #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 39, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #t5 #feature-extraction #arxiv-1910.09700 #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details This model is a Llama-2-7b-chat-hf model trained on ErAConD data, which is a dataset for grammar corrections for written dialogue conversations. This model is presented in Using Adaptive Empathetic Responses for Teaching English. Please refer to the paper and the corresponding repository for training detaills.
{"library_name": "transformers", "tags": []}
sylviali/eracond_llama_2_grammar
null
[ "transformers", "safetensors", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "region:us" ]
null
2024-04-30T18:39:27+00:00
[]
[]
TAGS #transformers #safetensors #llama #text-generation #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us
# Model Card for Model ID ## Model Details This model is a Llama-2-7b-chat-hf model trained on ErAConD data, which is a dataset for grammar corrections for written dialogue conversations. This model is presented in Using Adaptive Empathetic Responses for Teaching English. Please refer to the paper and the corresponding repository for training detaills.
[ "# Model Card for Model ID", "## Model Details\n\nThis model is a Llama-2-7b-chat-hf model trained on ErAConD data, which is a dataset for grammar corrections for written dialogue conversations. This model is presented in Using Adaptive Empathetic Responses for Teaching English. Please refer to the paper and the corresponding repository for training detaills." ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n", "# Model Card for Model ID", "## Model Details\n\nThis model is a Llama-2-7b-chat-hf model trained on ErAConD data, which is a dataset for grammar corrections for written dialogue conversations. This model is presented in Using Adaptive Empathetic Responses for Teaching English. Please refer to the paper and the corresponding repository for training detaills." ]
[ 38, 6, 70 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n# Model Card for Model ID## Model Details\n\nThis model is a Llama-2-7b-chat-hf model trained on ErAConD data, which is a dataset for grammar corrections for written dialogue conversations. This model is presented in Using Adaptive Empathetic Responses for Teaching English. Please refer to the paper and the corresponding repository for training detaills." ]
text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # zephyr-7b-gemma-sft-african-ultraalpaca This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - total_eval_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.0034 | 1.0 | 23628 | 1.0630 | | 0.6403 | 2.0 | 47257 | 0.8788 | | 0.2976 | 3.0 | 70884 | 0.8875 | ### Framework versions - Transformers 4.39.0.dev0 - Pytorch 2.2.1+cu121 - Datasets 2.14.6 - Tokenizers 0.15.2
{"license": "gemma", "tags": ["alignment-handbook", "trl", "sft", "generated_from_trainer", "trl", "sft", "generated_from_trainer"], "datasets": ["masakhane/african-ultrachat", "untrachat_en", "sd"], "base_model": "google/gemma-7b", "model-index": [{"name": "zephyr-7b-gemma-sft-african-ultraalpaca", "results": []}]}
masakhane/African-ultrachat-alpaca
null
[ "transformers", "safetensors", "gemma", "text-generation", "alignment-handbook", "trl", "sft", "generated_from_trainer", "conversational", "dataset:masakhane/african-ultrachat", "dataset:untrachat_en", "dataset:sd", "base_model:google/gemma-7b", "license:gemma", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:39:36+00:00
[]
[]
TAGS #transformers #safetensors #gemma #text-generation #alignment-handbook #trl #sft #generated_from_trainer #conversational #dataset-masakhane/african-ultrachat #dataset-untrachat_en #dataset-sd #base_model-google/gemma-7b #license-gemma #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
zephyr-7b-gemma-sft-african-ultraalpaca ======================================= This model is a fine-tuned version of google/gemma-7b Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 1e-05 * train\_batch\_size: 1 * eval\_batch\_size: 1 * seed: 42 * distributed\_type: multi-GPU * num\_devices: 8 * gradient\_accumulation\_steps: 2 * total\_train\_batch\_size: 16 * total\_eval\_batch\_size: 8 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: cosine * lr\_scheduler\_warmup\_ratio: 0.1 * num\_epochs: 3 ### Training results ### Framework versions * Transformers 4.39.0.dev0 * Pytorch 2.2.1+cu121 * Datasets 2.14.6 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 8\n* gradient\\_accumulation\\_steps: 2\n* total\\_train\\_batch\\_size: 16\n* total\\_eval\\_batch\\_size: 8\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 3", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.0.dev0\n* Pytorch 2.2.1+cu121\n* Datasets 2.14.6\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #gemma #text-generation #alignment-handbook #trl #sft #generated_from_trainer #conversational #dataset-masakhane/african-ultrachat #dataset-untrachat_en #dataset-sd #base_model-google/gemma-7b #license-gemma #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 8\n* gradient\\_accumulation\\_steps: 2\n* total\\_train\\_batch\\_size: 16\n* total\\_eval\\_batch\\_size: 8\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 3", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.0.dev0\n* Pytorch 2.2.1+cu121\n* Datasets 2.14.6\n* Tokenizers 0.15.2" ]
[ 95, 176, 5, 47 ]
[ "TAGS\n#transformers #safetensors #gemma #text-generation #alignment-handbook #trl #sft #generated_from_trainer #conversational #dataset-masakhane/african-ultrachat #dataset-untrachat_en #dataset-sd #base_model-google/gemma-7b #license-gemma #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 1e-05\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 8\n* gradient\\_accumulation\\_steps: 2\n* total\\_train\\_batch\\_size: 16\n* total\\_eval\\_batch\\_size: 8\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 3### Training results### Framework versions\n\n\n* Transformers 4.39.0.dev0\n* Pytorch 2.2.1+cu121\n* Datasets 2.14.6\n* Tokenizers 0.15.2" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # flan-t5-base-samsum This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.3698 - Rouge1: 47.0746 - Rouge2: 23.2656 - Rougel: 39.5723 - Rougelsum: 43.3245 - Gen Len: 17.2564 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 5 - eval_batch_size: 5 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 1.4194 | 1.0 | 2947 | 1.3698 | 47.0746 | 23.2656 | 39.5723 | 43.3245 | 17.2564 | ### Framework versions - Transformers 4.40.1 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["rouge"], "base_model": "google/flan-t5-base", "model-index": [{"name": "flan-t5-base-samsum", "results": []}]}
midnightGlow/flan-t5-base-samsum
null
[ "transformers", "tensorboard", "safetensors", "t5", "text2text-generation", "generated_from_trainer", "base_model:google/flan-t5-base", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:40:33+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #t5 #text2text-generation #generated_from_trainer #base_model-google/flan-t5-base #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
flan-t5-base-samsum =================== This model is a fine-tuned version of google/flan-t5-base on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 1.3698 * Rouge1: 47.0746 * Rouge2: 23.2656 * Rougel: 39.5723 * Rougelsum: 43.3245 * Gen Len: 17.2564 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 5e-05 * train\_batch\_size: 5 * eval\_batch\_size: 5 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 1 ### Training results ### Framework versions * Transformers 4.40.1 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 5\n* eval\\_batch\\_size: 5\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #tensorboard #safetensors #t5 #text2text-generation #generated_from_trainer #base_model-google/flan-t5-base #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 5\n* eval\\_batch\\_size: 5\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ 67, 101, 5, 44 ]
[ "TAGS\n#transformers #tensorboard #safetensors #t5 #text2text-generation #generated_from_trainer #base_model-google/flan-t5-base #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 5\n* eval\\_batch\\_size: 5\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1### Training results### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
jamesohe/casaudit3-4bit-p02-base
null
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "region:us" ]
null
2024-04-30T18:40:47+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 48, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
reinforcement-learning
null
# **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="bendupont/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
{"tags": ["FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation"], "model-index": [{"name": "q-FrozenLake-v1-4x4-noSlippery", "results": [{"task": {"type": "reinforcement-learning", "name": "reinforcement-learning"}, "dataset": {"name": "FrozenLake-v1-4x4-no_slippery", "type": "FrozenLake-v1-4x4-no_slippery"}, "metrics": [{"type": "mean_reward", "value": "1.00 +/- 0.00", "name": "mean_reward", "verified": false}]}]}]}
bendupont/q-FrozenLake-v1-4x4-noSlippery
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
null
2024-04-30T18:41:19+00:00
[]
[]
TAGS #FrozenLake-v1-4x4-no_slippery #q-learning #reinforcement-learning #custom-implementation #model-index #region-us
# Q-Learning Agent playing1 FrozenLake-v1 This is a trained model of a Q-Learning agent playing FrozenLake-v1 . ## Usage
[ "# Q-Learning Agent playing1 FrozenLake-v1\n This is a trained model of a Q-Learning agent playing FrozenLake-v1 .\n\n ## Usage" ]
[ "TAGS\n#FrozenLake-v1-4x4-no_slippery #q-learning #reinforcement-learning #custom-implementation #model-index #region-us \n", "# Q-Learning Agent playing1 FrozenLake-v1\n This is a trained model of a Q-Learning agent playing FrozenLake-v1 .\n\n ## Usage" ]
[ 35, 33 ]
[ "TAGS\n#FrozenLake-v1-4x4-no_slippery #q-learning #reinforcement-learning #custom-implementation #model-index #region-us \n# Q-Learning Agent playing1 FrozenLake-v1\n This is a trained model of a Q-Learning agent playing FrozenLake-v1 .\n\n ## Usage" ]
text-generation
transformers
![image/png](https://cdn-uploads.huggingface.co/production/uploads/641b435ba5f876fe30c5ae0a/WgAmE3R1j77gr3wLGSRkL.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/641b435ba5f876fe30c5ae0a/9C3lSMEWuJaaWnJbJ7Y_g.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/641b435ba5f876fe30c5ae0a/mCpSS4JkBpNLZ_SIZte6s.png) ## Metrics ``` TrainOutput( global_step=6732, training_loss=2.959243166963755, metrics={ 'train_runtime': 66894.0816, 'train_samples_per_second': 1.61, 'train_steps_per_second': 0.101, 'total_flos': 0.0, 'train_loss': 2.959243166963755, 'epoch': 2.0056606584239534 } ) ``` ## example output: ```py <bos><start_of_turn>system You are a helpful AI assistant. Its objective is to fulfill exactly the task that the user is asked of it. Use your learned knowledge<end_of_turn> <start_of_turn>user explicame yolo en solo 2 items, dame un ejemplo en python<end_of_turn> <start_of_turn>model El YOLO (YOLO: You Only Look Once) es un modelo de aprendizaje automático que se utiliza para detectar objetos en imágenes o videos sin necesidad de procesar toda la imagen o video de manera exhaustiva. El objetivo del YOLO es identificar los objetos con mayor probabilidad de ser presentes en una imagen que se ha precalificado con múltiples configuraciones del mismo. Para implementar el YOLO en Python, primero se debe instalar el módulo `yolo` si aún no lo hay, utilizando el siguiente comando en la línea de comandos: bash: pip install yolo Luego, se puede escribir el código en Python usando el módulo `yolo`: from yolo import YOLO import cv2 # Carga la configuración del YOLO config = YOLO(weights='yolov3s.pt5', classes=40) # Inicializa la cámara web camera = cv2.VideoCapture(0) # Continúa capturando imágenes hasta que se cierre la sesión while True: ret, frame = camera.read() if not ret: break # Preprocesamiento de la imagen img = cv2.resize(frame[0], (300, 300)) rgb_image = np.array(img).astype('float32') / 255.0 # Ejecuta el YOLO sobre la imagen results = config.predict(np.expand_dims(rgb_image, axis=[0, 1])) # Busca las salidas más relevantes class_id = int(results.argmax()) class_name = config.get_class_name(class_id) # Dibuja el cuadro alrededor del objeto encontrado cv2.rectangle(frame, (class_name, class_name + 5), (class_name + 5, class_name + 5), (0, 255, 0), 2) # Actualiza la salida de la captura de fotos cv2.imshow('Frame %d' % int(len(results)), frame) # Si se presiona 'Esc' se detiene la captura if cv2.waitKey(1) & 0xFF == ord('esc'): break # Cierra la cámara web camera.release() cv2.destroyAllWindows() Este código utiliza el módulo `yolo` para cargar una configuración del YOLO, luego capturar imágenes con la cámara web y detectarlos utilizando el YOLO. Los resultados son dibujados en la imagen resultante y se espera que se presione cualquier tecla 'Esc' para detener la captura.<end_of_turn> CPU times: user 30.7 s, sys: 7.27 ms, total: 30.7 s Wall time: 30.7 s ``` ## colab examples. ``` model_id= "NickyNicky/gemma-1.1-2b-it_oasst_format_chatML_unsloth_V1_orpo_V2" https://colab.research.google.com/drive/16qS7NMSu20LzcwvYCrBGVI7rd9Hr-vpN?usp=sharing ```
{"language": ["en", "es"], "license": "apache-2.0", "datasets": ["NickyNicky/oasst2_orpo_mix_tokenizer_phi_3_v1"]}
NickyNicky/gemma-1.1-2b-it_oasst_format_chatML_unsloth_V1_orpo_V2
null
[ "transformers", "safetensors", "gemma", "text-generation", "conversational", "en", "es", "dataset:NickyNicky/oasst2_orpo_mix_tokenizer_phi_3_v1", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:42:36+00:00
[]
[ "en", "es" ]
TAGS #transformers #safetensors #gemma #text-generation #conversational #en #es #dataset-NickyNicky/oasst2_orpo_mix_tokenizer_phi_3_v1 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
!image/png !image/png !image/png ## Metrics ## example output: ## colab examples.
[ "## Metrics", "## example output:", "## colab examples." ]
[ "TAGS\n#transformers #safetensors #gemma #text-generation #conversational #en #es #dataset-NickyNicky/oasst2_orpo_mix_tokenizer_phi_3_v1 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "## Metrics", "## example output:", "## colab examples." ]
[ 75, 4, 5, 6 ]
[ "TAGS\n#transformers #safetensors #gemma #text-generation #conversational #en #es #dataset-NickyNicky/oasst2_orpo_mix_tokenizer_phi_3_v1 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n## Metrics## example output:## colab examples." ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mistral_train_run1 This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 1 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - lr_scheduler_warmup_ratio: 0.03 - num_epochs: 3 ### Training results ### Framework versions - PEFT 0.7.2.dev0 - Transformers 4.36.2 - Pytorch 2.1.2+cu121 - Datasets 2.16.1 - Tokenizers 0.15.1
{"license": "apache-2.0", "library_name": "peft", "tags": ["trl", "sft", "generated_from_trainer"], "base_model": "mistralai/Mistral-7B-Instruct-v0.1", "model-index": [{"name": "mistral_train_run1", "results": []}]}
isaaclee/witness_count_mistral_train_run1
null
[ "peft", "safetensors", "trl", "sft", "generated_from_trainer", "base_model:mistralai/Mistral-7B-Instruct-v0.1", "license:apache-2.0", "region:us" ]
null
2024-04-30T18:42:49+00:00
[]
[]
TAGS #peft #safetensors #trl #sft #generated_from_trainer #base_model-mistralai/Mistral-7B-Instruct-v0.1 #license-apache-2.0 #region-us
# mistral_train_run1 This model is a fine-tuned version of mistralai/Mistral-7B-Instruct-v0.1 on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 1 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - lr_scheduler_warmup_ratio: 0.03 - num_epochs: 3 ### Training results ### Framework versions - PEFT 0.7.2.dev0 - Transformers 4.36.2 - Pytorch 2.1.2+cu121 - Datasets 2.16.1 - Tokenizers 0.15.1
[ "# mistral_train_run1\n\nThis model is a fine-tuned version of mistralai/Mistral-7B-Instruct-v0.1 on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0002\n- train_batch_size: 1\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: constant\n- lr_scheduler_warmup_ratio: 0.03\n- num_epochs: 3", "### Training results", "### Framework versions\n\n- PEFT 0.7.2.dev0\n- Transformers 4.36.2\n- Pytorch 2.1.2+cu121\n- Datasets 2.16.1\n- Tokenizers 0.15.1" ]
[ "TAGS\n#peft #safetensors #trl #sft #generated_from_trainer #base_model-mistralai/Mistral-7B-Instruct-v0.1 #license-apache-2.0 #region-us \n", "# mistral_train_run1\n\nThis model is a fine-tuned version of mistralai/Mistral-7B-Instruct-v0.1 on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0002\n- train_batch_size: 1\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: constant\n- lr_scheduler_warmup_ratio: 0.03\n- num_epochs: 3", "### Training results", "### Framework versions\n\n- PEFT 0.7.2.dev0\n- Transformers 4.36.2\n- Pytorch 2.1.2+cu121\n- Datasets 2.16.1\n- Tokenizers 0.15.1" ]
[ 54, 40, 7, 9, 9, 4, 126, 5, 55 ]
[ "TAGS\n#peft #safetensors #trl #sft #generated_from_trainer #base_model-mistralai/Mistral-7B-Instruct-v0.1 #license-apache-2.0 #region-us \n# mistral_train_run1\n\nThis model is a fine-tuned version of mistralai/Mistral-7B-Instruct-v0.1 on the None dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0002\n- train_batch_size: 1\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: constant\n- lr_scheduler_warmup_ratio: 0.03\n- num_epochs: 3### Training results### Framework versions\n\n- PEFT 0.7.2.dev0\n- Transformers 4.36.2\n- Pytorch 2.1.2+cu121\n- Datasets 2.16.1\n- Tokenizers 0.15.1" ]
text-generation
transformers
![image/png](https://cdn-uploads.huggingface.co/production/uploads/64c14f6b02e1f8f67c73bd05/pf4d6FA7DriRtVq5HCkxd.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64c14f6b02e1f8f67c73bd05/VcZWbW_eZkJAZZ5ricL4B.png) # Llama-3-Giraffe-70B Abacus.AI presents our longer-necked variant of Llama 3 70B! This model has an effective context length of approximately 128k. We have currently trained on ~1B tokens. This is an initial release and we are hoping to improve the heatmap below further as we continue training. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64c14f6b02e1f8f67c73bd05/_NVEuQ2ZT-sBtDBNjgmbt.png) ## Training Methodology The methodology for training uses [PoSE](https://arxiv.org/abs/2309.10400) and dynamic-NTK interpolation. ### NTK-scaling The scale factor for NTK is 4. Note that we also tried theta-scaling but this did not work as well as NTK scaling in our experiments. ### PoSE We utilise Positional Skip-wise Training (PoSE) with the following parameters: - **Number of Chunks**: 5 - **Max position ID**: 32768 ### Data We use on average ~8K long samples from [RedPajama](https://github.com/togethercomputer/RedPajama-Data). ### Hardware We train on 8xH100 GPUs with Deepspeed Zero Stage 3. ## Evaluation Methodology We use the [EasyContext](https://github.com/abacusai/EasyContext/blob/eval_runs/eval_needle.py) implementation of Needle-in-a-Haystack to evaluate Llama-3-Giraffe-70B. We evaluate with the following parameters: - **Min context length**: 2000 - **Max context length**: 128000 - **Context interval**: 4000 - **Depth interval**: 0.1 - **Num samples**: 2 - **Rnd number digits**: 7 - **Haystack dir**: PaulGrahamEssays
{"language": ["en"], "license": "llama3", "tags": ["meta", "llama-3"], "pipeline_tag": "text-generation"}
abacusai/Llama-3-Giraffe-70B
null
[ "transformers", "safetensors", "llama", "text-generation", "meta", "llama-3", "en", "arxiv:2309.10400", "license:llama3", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:43:01+00:00
[ "2309.10400" ]
[ "en" ]
TAGS #transformers #safetensors #llama #text-generation #meta #llama-3 #en #arxiv-2309.10400 #license-llama3 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
!image/png !image/png # Llama-3-Giraffe-70B Abacus.AI presents our longer-necked variant of Llama 3 70B! This model has an effective context length of approximately 128k. We have currently trained on ~1B tokens. This is an initial release and we are hoping to improve the heatmap below further as we continue training. !image/png ## Training Methodology The methodology for training uses PoSE and dynamic-NTK interpolation. ### NTK-scaling The scale factor for NTK is 4. Note that we also tried theta-scaling but this did not work as well as NTK scaling in our experiments. ### PoSE We utilise Positional Skip-wise Training (PoSE) with the following parameters: - Number of Chunks: 5 - Max position ID: 32768 ### Data We use on average ~8K long samples from RedPajama. ### Hardware We train on 8xH100 GPUs with Deepspeed Zero Stage 3. ## Evaluation Methodology We use the EasyContext implementation of Needle-in-a-Haystack to evaluate Llama-3-Giraffe-70B. We evaluate with the following parameters: - Min context length: 2000 - Max context length: 128000 - Context interval: 4000 - Depth interval: 0.1 - Num samples: 2 - Rnd number digits: 7 - Haystack dir: PaulGrahamEssays
[ "# Llama-3-Giraffe-70B\n\nAbacus.AI presents our longer-necked variant of Llama 3 70B!\n\nThis model has an effective context length of approximately 128k.\n\nWe have currently trained on ~1B tokens.\nThis is an initial release and we are hoping to improve the heatmap below further as we continue training.\n\n!image/png", "## Training Methodology\n\nThe methodology for training uses PoSE and dynamic-NTK interpolation.", "### NTK-scaling\n\nThe scale factor for NTK is 4. Note that we also tried theta-scaling but this did not work as well as NTK scaling in our experiments.", "### PoSE\n\nWe utilise Positional Skip-wise Training (PoSE) with the following parameters:\n\n- Number of Chunks: 5\n- Max position ID: 32768", "### Data\n\nWe use on average ~8K long samples from RedPajama.", "### Hardware\n\nWe train on 8xH100 GPUs with Deepspeed Zero Stage 3.", "## Evaluation Methodology\n\nWe use the EasyContext implementation of Needle-in-a-Haystack to evaluate Llama-3-Giraffe-70B.\n\nWe evaluate with the following parameters:\n\n- Min context length: 2000\n- Max context length: 128000\n- Context interval: 4000\n- Depth interval: 0.1\n- Num samples: 2\n- Rnd number digits: 7\n- Haystack dir: PaulGrahamEssays" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #meta #llama-3 #en #arxiv-2309.10400 #license-llama3 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Llama-3-Giraffe-70B\n\nAbacus.AI presents our longer-necked variant of Llama 3 70B!\n\nThis model has an effective context length of approximately 128k.\n\nWe have currently trained on ~1B tokens.\nThis is an initial release and we are hoping to improve the heatmap below further as we continue training.\n\n!image/png", "## Training Methodology\n\nThe methodology for training uses PoSE and dynamic-NTK interpolation.", "### NTK-scaling\n\nThe scale factor for NTK is 4. Note that we also tried theta-scaling but this did not work as well as NTK scaling in our experiments.", "### PoSE\n\nWe utilise Positional Skip-wise Training (PoSE) with the following parameters:\n\n- Number of Chunks: 5\n- Max position ID: 32768", "### Data\n\nWe use on average ~8K long samples from RedPajama.", "### Hardware\n\nWe train on 8xH100 GPUs with Deepspeed Zero Stage 3.", "## Evaluation Methodology\n\nWe use the EasyContext implementation of Needle-in-a-Haystack to evaluate Llama-3-Giraffe-70B.\n\nWe evaluate with the following parameters:\n\n- Min context length: 2000\n- Max context length: 128000\n- Context interval: 4000\n- Depth interval: 0.1\n- Num samples: 2\n- Rnd number digits: 7\n- Haystack dir: PaulGrahamEssays" ]
[ 59, 79, 19, 39, 36, 19, 21, 92 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #meta #llama-3 #en #arxiv-2309.10400 #license-llama3 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Llama-3-Giraffe-70B\n\nAbacus.AI presents our longer-necked variant of Llama 3 70B!\n\nThis model has an effective context length of approximately 128k.\n\nWe have currently trained on ~1B tokens.\nThis is an initial release and we are hoping to improve the heatmap below further as we continue training.\n\n!image/png## Training Methodology\n\nThe methodology for training uses PoSE and dynamic-NTK interpolation.### NTK-scaling\n\nThe scale factor for NTK is 4. Note that we also tried theta-scaling but this did not work as well as NTK scaling in our experiments.### PoSE\n\nWe utilise Positional Skip-wise Training (PoSE) with the following parameters:\n\n- Number of Chunks: 5\n- Max position ID: 32768### Data\n\nWe use on average ~8K long samples from RedPajama.### Hardware\n\nWe train on 8xH100 GPUs with Deepspeed Zero Stage 3.## Evaluation Methodology\n\nWe use the EasyContext implementation of Needle-in-a-Haystack to evaluate Llama-3-Giraffe-70B.\n\nWe evaluate with the following parameters:\n\n- Min context length: 2000\n- Max context length: 128000\n- Context interval: 4000\n- Depth interval: 0.1\n- Num samples: 2\n- Rnd number digits: 7\n- Haystack dir: PaulGrahamEssays" ]
automatic-speech-recognition
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
hdave/wav2vec2-large-xlsr-53-demo-colab
null
[ "transformers", "tensorboard", "safetensors", "wav2vec2", "automatic-speech-recognition", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:43:42+00:00
[ "1910.09700" ]
[]
TAGS #transformers #tensorboard #safetensors #wav2vec2 #automatic-speech-recognition #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #tensorboard #safetensors #wav2vec2 #automatic-speech-recognition #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 42, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #tensorboard #safetensors #wav2vec2 #automatic-speech-recognition #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
feature-extraction
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
claudios/graphcodebert-base
null
[ "transformers", "safetensors", "roberta", "feature-extraction", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:44:22+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #roberta #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #roberta #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 32, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #roberta #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
feature-extraction
transformers
## Model Description We introduce Dragon-multiturn, a retriever specifically designed for the conversational QA scenario. It can handle conversational query which combine dialogue history with the current query. It is built on top of the [Dragon](https://huggingface.co/facebook/dragon-plus-query-encoder) retriever. The details of Dragon-multiturn can be found in [here](https://arxiv.org/abs/2401.10225). **Please note that this repository is for the query encoder of Dragon-multiturn, and we use a separate model for the context encoder, which can be found [here](https://huggingface.co/nvidia/dragon-multiturn-context-encoder).** ## Other Resources [Llama3-ChatQA-1.5-8B](https://huggingface.co/nvidia/Llama3-ChatQA-1.5-8B) &ensp; [Llama3-ChatQA-1.5-70B](https://huggingface.co/nvidia/Llama3-ChatQA-1.5-70B) &ensp; [Evaluation Data](https://huggingface.co/datasets/nvidia/ConvRAG-Bench) &ensp; [Training Data](https://huggingface.co/datasets/nvidia/ChatQA-Training-Data) ## Benchmark Results <style type="text/css"> .tg {border:none;border-collapse:collapse;border-spacing:0;} .tg td{border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;overflow:hidden; padding:10px 5px;word-break:normal;} .tg th{border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;font-weight:normal; overflow:hidden;padding:10px 5px;word-break:normal;} .tg .tg-c3ow{border-color:inherit;text-align:center;vertical-align:center} .tg .tg-0pky{border-color:inherit;text-align:left;vertical-align:center} </style> <table class="tg"> <thead> <tr> <th class="tg-0pky" rowspan="2"></th> <th class="tg-c3ow" colspan="2">Average</th> <th class="tg-c3ow" colspan="2">Doc2Dial</th> <th class="tg-c3ow" colspan="2">QuAC</th> <th class="tg-c3ow" colspan="2">QReCC</th> <th class="tg-c3ow" colspan="2">TopiOCQA</th> <th class="tg-c3ow" colspan="2">INSCIT</th> </tr> <tr> <th class="tg-c3ow">top-1</th> <th class="tg-c3ow">top-5</th> <th class="tg-c3ow">top-1</th> <th class="tg-c3ow">top-5</th> <th class="tg-c3ow">top-1</th> <th class="tg-c3ow">top-5</th> <th class="tg-c3ow">top-1</th> <th class="tg-c3ow">top-5</th> <th class="tg-c3ow">top-5*</th> <th class="tg-c3ow">top-20*</th> <th class="tg-c3ow">top-5*</th> <th class="tg-c3ow">top-20*</th> </tr> </thead> <tbody> <tr> <td class="tg-0pky">Dragon</td> <td class="tg-c3ow">46.3</td> <td class="tg-c3ow">73.1</td> <td class="tg-c3ow">43.3</td> <td class="tg-c3ow">75.6</td> <td class="tg-c3ow">56.8</td> <td class="tg-c3ow">82.9</td> <td class="tg-c3ow">46.2</td> <td class="tg-c3ow">82.0</td> <td class="tg-c3ow">57.7</td> <td class="tg-c3ow">78.8</td> <td class="tg-c3ow">27.5</td> <td class="tg-c3ow">46.2</td> </tr> <tr> <td class="tg-0pky">Dragon-multiturn</td> <td class="tg-c3ow">53.0</td> <td class="tg-c3ow">81.2</td> <td class="tg-c3ow">48.6</td> <td class="tg-c3ow">83.5</td> <td class="tg-c3ow">54.8</td> <td class="tg-c3ow">83.2</td> <td class="tg-c3ow">49.6</td> <td class="tg-c3ow">86.7</td> <td class="tg-c3ow">64.5</td> <td class="tg-c3ow">85.2</td> <td class="tg-c3ow">47.4</td> <td class="tg-c3ow">67.1</td> </tr> </tbody> </table> Retrieval results across five multi-turn QA datasets (Doc2Dial, QuAC, QReCC, TopiOCQA, INSCIT) with the average top-1 and top-5 recall scores. *Since the average context length in TopiOCQA and INSCIT is smaller than in other datasets, we report top-5 and top-20 to roughly match the context lengths of top-1 and top-5, respectively, in those datasets. ## How to use ```python import torch from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained('nvidia/dragon-multiturn-query-encoder') query_encoder = AutoModel.from_pretrained('nvidia/dragon-multiturn-query-encoder') context_encoder = AutoModel.from_pretrained('nvidia/dragon-multiturn-context-encoder') query = [ {"role": "user", "content": "I need help planning my Social Security benefits for my survivors."}, {"role": "agent", "content": "Are you currently planning for your future?"}, {"role": "user", "content": "Yes, I am."} ] contexts = [ "Benefits Planner: Survivors | Planning For Your Survivors \nAs you plan for the future , you'll want to think about what your family would need if you should die now. Social Security can help your family if you have earned enough Social Security credits through your work. You can earn up to four credits each year. In 2019 , for example , you earn one credit for each $1,360 of wages or self - employment income. When you have earned $5,440 , you have earned your four credits for the year. The number of credits needed to provide benefits for your survivors depends on your age when you die. No one needs more than 40 credits 10 years of work to be eligible for any Social Security benefit. But , the younger a person is , the fewer credits they must have for family members to receive survivors benefits. Benefits can be paid to your children and your spouse who is caring for the children even if you don't have the required number of credits. They can get benefits if you have credit for one and one - half years of work 6 credits in the three years just before your death. For Your Widow Or Widower \nThere are about five million widows and widowers receiving monthly Social Security benefits based on their deceased spouse's earnings record.", "Benefits Planner: Retirement \nOther Things to Consider \nWhat Is The Best Age To Start Your Benefits? The answer is that there is no one \" best age \" for everyone and, ultimately, it is your choice. You should make an informed decision about when to apply for benefits based on your individual and family circumstances. Your monthly benefit amount can differ substantially based on the age when you start receiving benefits. If you decide to start benefits : before your full retirement age , your benefit will be smaller but you will receive it for a longer period of time. at your full retirement age or later , you will receive a larger monthly benefit for a shorter period of time. The amount you receive when you first get benefits sets the base for the amount you will receive for the rest of your life. You may want to consider the following when you make that decision : If you plan to continue working , there are limits on how much you can earn each year between age 62 and full retirement age and still get all your benefits. Depending on the amount of your benefit and your earnings for the year , you may have to give up some of your benefits." ] ## convert query into a format as follows: ## user: {user}\nagent: {agent}\nuser: {user} formatted_query = '\n'.join([turn['role'] + ": " + turn['content'] for turn in query]).strip() ## get query and context embeddings query_input = tokenizer(formatted_query, return_tensors='pt') ctx_input = tokenizer(contexts, padding=True, truncation=True, max_length=512, return_tensors='pt') query_emb = query_encoder(**query_input).last_hidden_state[:, 0, :] ctx_emb = context_encoder(**ctx_input).last_hidden_state[:, 0, :] ## Compute similarity scores using dot product similarities = query_emb.matmul(ctx_emb.transpose(0, 1)) # (1, num_ctx) ## rank the similarity (from highest to lowest) ranked_results = torch.argsort(similarities, dim=-1, descending=True) # (1, num_ctx) ``` ## License Dragon-multiturn is built on top of [Dragon](https://arxiv.org/abs/2302.07452). We refer users to the original license of the Dragon model. ## Correspondence to Zihan Liu ([email protected]), Wei Ping ([email protected]) ## Citation <pre> @article{liu2024chatqa, title={ChatQA: Building GPT-4 Level Conversational QA Models}, author={Liu, Zihan and Ping, Wei and Roy, Rajarshi and Xu, Peng and Lee, Chankyu and Shoeybi, Mohammad and Catanzaro, Bryan}, journal={arXiv preprint arXiv:2401.10225}, year={2024}} </pre>
{"language": ["en"], "license": ["other"], "tag": ["dragon", "retriever", "conversation", "multi-turn", "conversational query"]}
nvidia/dragon-multiturn-query-encoder
null
[ "transformers", "pytorch", "bert", "feature-extraction", "en", "arxiv:2401.10225", "arxiv:2302.07452", "license:other", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:44:35+00:00
[ "2401.10225", "2302.07452" ]
[ "en" ]
TAGS #transformers #pytorch #bert #feature-extraction #en #arxiv-2401.10225 #arxiv-2302.07452 #license-other #endpoints_compatible #region-us
## Model Description We introduce Dragon-multiturn, a retriever specifically designed for the conversational QA scenario. It can handle conversational query which combine dialogue history with the current query. It is built on top of the Dragon retriever. The details of Dragon-multiturn can be found in here. Please note that this repository is for the query encoder of Dragon-multiturn, and we use a separate model for the context encoder, which can be found here. ## Other Resources Llama3-ChatQA-1.5-8B &ensp; Llama3-ChatQA-1.5-70B &ensp; Evaluation Data &ensp; Training Data ## Benchmark Results <style type="text/css"> .tg {border:none;border-collapse:collapse;border-spacing:0;} .tg td{border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;overflow:hidden; padding:10px 5px;word-break:normal;} .tg th{border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;font-weight:normal; overflow:hidden;padding:10px 5px;word-break:normal;} .tg .tg-c3ow{border-color:inherit;text-align:center;vertical-align:center} .tg .tg-0pky{border-color:inherit;text-align:left;vertical-align:center} </style> <table class="tg"> <thead> <tr> <th class="tg-0pky" rowspan="2"></th> <th class="tg-c3ow" colspan="2">Average</th> <th class="tg-c3ow" colspan="2">Doc2Dial</th> <th class="tg-c3ow" colspan="2">QuAC</th> <th class="tg-c3ow" colspan="2">QReCC</th> <th class="tg-c3ow" colspan="2">TopiOCQA</th> <th class="tg-c3ow" colspan="2">INSCIT</th> </tr> <tr> <th class="tg-c3ow">top-1</th> <th class="tg-c3ow">top-5</th> <th class="tg-c3ow">top-1</th> <th class="tg-c3ow">top-5</th> <th class="tg-c3ow">top-1</th> <th class="tg-c3ow">top-5</th> <th class="tg-c3ow">top-1</th> <th class="tg-c3ow">top-5</th> <th class="tg-c3ow">top-5*</th> <th class="tg-c3ow">top-20*</th> <th class="tg-c3ow">top-5*</th> <th class="tg-c3ow">top-20*</th> </tr> </thead> <tbody> <tr> <td class="tg-0pky">Dragon</td> <td class="tg-c3ow">46.3</td> <td class="tg-c3ow">73.1</td> <td class="tg-c3ow">43.3</td> <td class="tg-c3ow">75.6</td> <td class="tg-c3ow">56.8</td> <td class="tg-c3ow">82.9</td> <td class="tg-c3ow">46.2</td> <td class="tg-c3ow">82.0</td> <td class="tg-c3ow">57.7</td> <td class="tg-c3ow">78.8</td> <td class="tg-c3ow">27.5</td> <td class="tg-c3ow">46.2</td> </tr> <tr> <td class="tg-0pky">Dragon-multiturn</td> <td class="tg-c3ow">53.0</td> <td class="tg-c3ow">81.2</td> <td class="tg-c3ow">48.6</td> <td class="tg-c3ow">83.5</td> <td class="tg-c3ow">54.8</td> <td class="tg-c3ow">83.2</td> <td class="tg-c3ow">49.6</td> <td class="tg-c3ow">86.7</td> <td class="tg-c3ow">64.5</td> <td class="tg-c3ow">85.2</td> <td class="tg-c3ow">47.4</td> <td class="tg-c3ow">67.1</td> </tr> </tbody> </table> Retrieval results across five multi-turn QA datasets (Doc2Dial, QuAC, QReCC, TopiOCQA, INSCIT) with the average top-1 and top-5 recall scores. *Since the average context length in TopiOCQA and INSCIT is smaller than in other datasets, we report top-5 and top-20 to roughly match the context lengths of top-1 and top-5, respectively, in those datasets. ## How to use ## License Dragon-multiturn is built on top of Dragon. We refer users to the original license of the Dragon model. ## Correspondence to Zihan Liu (zihanl@URL), Wei Ping (wping@URL) <pre> @article{liu2024chatqa, title={ChatQA: Building GPT-4 Level Conversational QA Models}, author={Liu, Zihan and Ping, Wei and Roy, Rajarshi and Xu, Peng and Lee, Chankyu and Shoeybi, Mohammad and Catanzaro, Bryan}, journal={arXiv preprint arXiv:2401.10225}, year={2024}} </pre>
[ "## Model Description\nWe introduce Dragon-multiturn, a retriever specifically designed for the conversational QA scenario. It can handle conversational query which combine dialogue history with the current query. It is built on top of the Dragon retriever. The details of Dragon-multiturn can be found in here. Please note that this repository is for the query encoder of Dragon-multiturn, and we use a separate model for the context encoder, which can be found here.", "## Other Resources\nLlama3-ChatQA-1.5-8B &ensp; Llama3-ChatQA-1.5-70B &ensp; Evaluation Data &ensp; Training Data", "## Benchmark Results\n<style type=\"text/css\">\n.tg {border:none;border-collapse:collapse;border-spacing:0;}\n.tg td{border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;overflow:hidden;\n padding:10px 5px;word-break:normal;}\n.tg th{border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;font-weight:normal;\n overflow:hidden;padding:10px 5px;word-break:normal;}\n.tg .tg-c3ow{border-color:inherit;text-align:center;vertical-align:center}\n.tg .tg-0pky{border-color:inherit;text-align:left;vertical-align:center}\n</style>\n<table class=\"tg\">\n<thead>\n <tr>\n <th class=\"tg-0pky\" rowspan=\"2\"></th>\n <th class=\"tg-c3ow\" colspan=\"2\">Average</th>\n <th class=\"tg-c3ow\" colspan=\"2\">Doc2Dial</th>\n <th class=\"tg-c3ow\" colspan=\"2\">QuAC</th>\n <th class=\"tg-c3ow\" colspan=\"2\">QReCC</th>\n <th class=\"tg-c3ow\" colspan=\"2\">TopiOCQA</th>\n <th class=\"tg-c3ow\" colspan=\"2\">INSCIT</th>\n </tr>\n <tr>\n <th class=\"tg-c3ow\">top-1</th>\n <th class=\"tg-c3ow\">top-5</th>\n <th class=\"tg-c3ow\">top-1</th>\n <th class=\"tg-c3ow\">top-5</th>\n <th class=\"tg-c3ow\">top-1</th>\n <th class=\"tg-c3ow\">top-5</th>\n <th class=\"tg-c3ow\">top-1</th>\n <th class=\"tg-c3ow\">top-5</th>\n <th class=\"tg-c3ow\">top-5*</th>\n <th class=\"tg-c3ow\">top-20*</th>\n <th class=\"tg-c3ow\">top-5*</th>\n <th class=\"tg-c3ow\">top-20*</th>\n </tr>\n</thead>\n<tbody>\n <tr>\n <td class=\"tg-0pky\">Dragon</td>\n <td class=\"tg-c3ow\">46.3</td>\n <td class=\"tg-c3ow\">73.1</td>\n <td class=\"tg-c3ow\">43.3</td>\n <td class=\"tg-c3ow\">75.6</td>\n <td class=\"tg-c3ow\">56.8</td>\n <td class=\"tg-c3ow\">82.9</td>\n <td class=\"tg-c3ow\">46.2</td>\n <td class=\"tg-c3ow\">82.0</td>\n <td class=\"tg-c3ow\">57.7</td>\n <td class=\"tg-c3ow\">78.8</td>\n <td class=\"tg-c3ow\">27.5</td>\n <td class=\"tg-c3ow\">46.2</td>\n </tr>\n <tr>\n <td class=\"tg-0pky\">Dragon-multiturn</td>\n <td class=\"tg-c3ow\">53.0</td>\n <td class=\"tg-c3ow\">81.2</td>\n <td class=\"tg-c3ow\">48.6</td>\n <td class=\"tg-c3ow\">83.5</td>\n <td class=\"tg-c3ow\">54.8</td>\n <td class=\"tg-c3ow\">83.2</td>\n <td class=\"tg-c3ow\">49.6</td>\n <td class=\"tg-c3ow\">86.7</td>\n <td class=\"tg-c3ow\">64.5</td>\n <td class=\"tg-c3ow\">85.2</td>\n <td class=\"tg-c3ow\">47.4</td>\n <td class=\"tg-c3ow\">67.1</td>\n </tr>\n</tbody>\n</table>\nRetrieval results across five multi-turn QA datasets (Doc2Dial, QuAC, QReCC, TopiOCQA, INSCIT) with the average top-1 and top-5 recall scores. *Since the average context length in TopiOCQA and INSCIT is smaller than in other datasets, we report top-5 and top-20 to roughly match the context lengths of top-1 and top-5, respectively, in those datasets.", "## How to use", "## License\nDragon-multiturn is built on top of Dragon. We refer users to the original license of the Dragon model.", "## Correspondence to\nZihan Liu (zihanl@URL), Wei Ping (wping@URL)\n\n\n<pre>\n@article{liu2024chatqa,\n title={ChatQA: Building GPT-4 Level Conversational QA Models},\n author={Liu, Zihan and Ping, Wei and Roy, Rajarshi and Xu, Peng and Lee, Chankyu and Shoeybi, Mohammad and Catanzaro, Bryan},\n journal={arXiv preprint arXiv:2401.10225},\n year={2024}}\n</pre>" ]
[ "TAGS\n#transformers #pytorch #bert #feature-extraction #en #arxiv-2401.10225 #arxiv-2302.07452 #license-other #endpoints_compatible #region-us \n", "## Model Description\nWe introduce Dragon-multiturn, a retriever specifically designed for the conversational QA scenario. It can handle conversational query which combine dialogue history with the current query. It is built on top of the Dragon retriever. The details of Dragon-multiturn can be found in here. Please note that this repository is for the query encoder of Dragon-multiturn, and we use a separate model for the context encoder, which can be found here.", "## Other Resources\nLlama3-ChatQA-1.5-8B &ensp; Llama3-ChatQA-1.5-70B &ensp; Evaluation Data &ensp; Training Data", "## Benchmark Results\n<style type=\"text/css\">\n.tg {border:none;border-collapse:collapse;border-spacing:0;}\n.tg td{border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;overflow:hidden;\n padding:10px 5px;word-break:normal;}\n.tg th{border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;font-weight:normal;\n overflow:hidden;padding:10px 5px;word-break:normal;}\n.tg .tg-c3ow{border-color:inherit;text-align:center;vertical-align:center}\n.tg .tg-0pky{border-color:inherit;text-align:left;vertical-align:center}\n</style>\n<table class=\"tg\">\n<thead>\n <tr>\n <th class=\"tg-0pky\" rowspan=\"2\"></th>\n <th class=\"tg-c3ow\" colspan=\"2\">Average</th>\n <th class=\"tg-c3ow\" colspan=\"2\">Doc2Dial</th>\n <th class=\"tg-c3ow\" colspan=\"2\">QuAC</th>\n <th class=\"tg-c3ow\" colspan=\"2\">QReCC</th>\n <th class=\"tg-c3ow\" colspan=\"2\">TopiOCQA</th>\n <th class=\"tg-c3ow\" colspan=\"2\">INSCIT</th>\n </tr>\n <tr>\n <th class=\"tg-c3ow\">top-1</th>\n <th class=\"tg-c3ow\">top-5</th>\n <th class=\"tg-c3ow\">top-1</th>\n <th class=\"tg-c3ow\">top-5</th>\n <th class=\"tg-c3ow\">top-1</th>\n <th class=\"tg-c3ow\">top-5</th>\n <th class=\"tg-c3ow\">top-1</th>\n <th class=\"tg-c3ow\">top-5</th>\n <th class=\"tg-c3ow\">top-5*</th>\n <th class=\"tg-c3ow\">top-20*</th>\n <th class=\"tg-c3ow\">top-5*</th>\n <th class=\"tg-c3ow\">top-20*</th>\n </tr>\n</thead>\n<tbody>\n <tr>\n <td class=\"tg-0pky\">Dragon</td>\n <td class=\"tg-c3ow\">46.3</td>\n <td class=\"tg-c3ow\">73.1</td>\n <td class=\"tg-c3ow\">43.3</td>\n <td class=\"tg-c3ow\">75.6</td>\n <td class=\"tg-c3ow\">56.8</td>\n <td class=\"tg-c3ow\">82.9</td>\n <td class=\"tg-c3ow\">46.2</td>\n <td class=\"tg-c3ow\">82.0</td>\n <td class=\"tg-c3ow\">57.7</td>\n <td class=\"tg-c3ow\">78.8</td>\n <td class=\"tg-c3ow\">27.5</td>\n <td class=\"tg-c3ow\">46.2</td>\n </tr>\n <tr>\n <td class=\"tg-0pky\">Dragon-multiturn</td>\n <td class=\"tg-c3ow\">53.0</td>\n <td class=\"tg-c3ow\">81.2</td>\n <td class=\"tg-c3ow\">48.6</td>\n <td class=\"tg-c3ow\">83.5</td>\n <td class=\"tg-c3ow\">54.8</td>\n <td class=\"tg-c3ow\">83.2</td>\n <td class=\"tg-c3ow\">49.6</td>\n <td class=\"tg-c3ow\">86.7</td>\n <td class=\"tg-c3ow\">64.5</td>\n <td class=\"tg-c3ow\">85.2</td>\n <td class=\"tg-c3ow\">47.4</td>\n <td class=\"tg-c3ow\">67.1</td>\n </tr>\n</tbody>\n</table>\nRetrieval results across five multi-turn QA datasets (Doc2Dial, QuAC, QReCC, TopiOCQA, INSCIT) with the average top-1 and top-5 recall scores. *Since the average context length in TopiOCQA and INSCIT is smaller than in other datasets, we report top-5 and top-20 to roughly match the context lengths of top-1 and top-5, respectively, in those datasets.", "## How to use", "## License\nDragon-multiturn is built on top of Dragon. We refer users to the original license of the Dragon model.", "## Correspondence to\nZihan Liu (zihanl@URL), Wei Ping (wping@URL)\n\n\n<pre>\n@article{liu2024chatqa,\n title={ChatQA: Building GPT-4 Level Conversational QA Models},\n author={Liu, Zihan and Ping, Wei and Roy, Rajarshi and Xu, Peng and Lee, Chankyu and Shoeybi, Mohammad and Catanzaro, Bryan},\n journal={arXiv preprint arXiv:2401.10225},\n year={2024}}\n</pre>" ]
[ 50, 100, 46, 1334, 5, 26, 131 ]
[ "TAGS\n#transformers #pytorch #bert #feature-extraction #en #arxiv-2401.10225 #arxiv-2302.07452 #license-other #endpoints_compatible #region-us \n## Model Description\nWe introduce Dragon-multiturn, a retriever specifically designed for the conversational QA scenario. It can handle conversational query which combine dialogue history with the current query. It is built on top of the Dragon retriever. The details of Dragon-multiturn can be found in here. Please note that this repository is for the query encoder of Dragon-multiturn, and we use a separate model for the context encoder, which can be found here.## Other Resources\nLlama3-ChatQA-1.5-8B &ensp; Llama3-ChatQA-1.5-70B &ensp; Evaluation Data &ensp; Training Data## Benchmark Results\n<style type=\"text/css\">\n.tg {border:none;border-collapse:collapse;border-spacing:0;}\n.tg td{border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;overflow:hidden;\n padding:10px 5px;word-break:normal;}\n.tg th{border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;font-weight:normal;\n overflow:hidden;padding:10px 5px;word-break:normal;}\n.tg .tg-c3ow{border-color:inherit;text-align:center;vertical-align:center}\n.tg .tg-0pky{border-color:inherit;text-align:left;vertical-align:center}\n</style>\n<table class=\"tg\">\n<thead>\n <tr>\n <th class=\"tg-0pky\" rowspan=\"2\"></th>\n <th class=\"tg-c3ow\" colspan=\"2\">Average</th>\n <th class=\"tg-c3ow\" colspan=\"2\">Doc2Dial</th>\n <th class=\"tg-c3ow\" colspan=\"2\">QuAC</th>\n <th class=\"tg-c3ow\" colspan=\"2\">QReCC</th>\n <th class=\"tg-c3ow\" colspan=\"2\">TopiOCQA</th>\n <th class=\"tg-c3ow\" colspan=\"2\">INSCIT</th>\n </tr>\n <tr>\n <th class=\"tg-c3ow\">top-1</th>\n <th class=\"tg-c3ow\">top-5</th>\n <th class=\"tg-c3ow\">top-1</th>\n <th class=\"tg-c3ow\">top-5</th>\n <th class=\"tg-c3ow\">top-1</th>\n <th class=\"tg-c3ow\">top-5</th>\n <th class=\"tg-c3ow\">top-1</th>\n <th class=\"tg-c3ow\">top-5</th>\n <th class=\"tg-c3ow\">top-5*</th>\n <th class=\"tg-c3ow\">top-20*</th>\n <th class=\"tg-c3ow\">top-5*</th>\n <th class=\"tg-c3ow\">top-20*</th>\n </tr>\n</thead>\n<tbody>\n <tr>\n <td class=\"tg-0pky\">Dragon</td>\n <td class=\"tg-c3ow\">46.3</td>\n <td class=\"tg-c3ow\">73.1</td>\n <td class=\"tg-c3ow\">43.3</td>\n <td class=\"tg-c3ow\">75.6</td>\n <td class=\"tg-c3ow\">56.8</td>\n <td class=\"tg-c3ow\">82.9</td>\n <td class=\"tg-c3ow\">46.2</td>\n <td class=\"tg-c3ow\">82.0</td>\n <td class=\"tg-c3ow\">57.7</td>\n <td class=\"tg-c3ow\">78.8</td>\n <td class=\"tg-c3ow\">27.5</td>\n <td class=\"tg-c3ow\">46.2</td>\n </tr>\n <tr>\n <td class=\"tg-0pky\">Dragon-multiturn</td>\n <td class=\"tg-c3ow\">53.0</td>\n <td class=\"tg-c3ow\">81.2</td>\n <td class=\"tg-c3ow\">48.6</td>\n <td class=\"tg-c3ow\">83.5</td>\n <td class=\"tg-c3ow\">54.8</td>\n <td class=\"tg-c3ow\">83.2</td>\n <td class=\"tg-c3ow\">49.6</td>\n <td class=\"tg-c3ow\">86.7</td>\n <td class=\"tg-c3ow\">64.5</td>\n <td class=\"tg-c3ow\">85.2</td>\n <td class=\"tg-c3ow\">47.4</td>\n <td class=\"tg-c3ow\">67.1</td>\n </tr>\n</tbody>\n</table>\nRetrieval results across five multi-turn QA datasets (Doc2Dial, QuAC, QReCC, TopiOCQA, INSCIT) with the average top-1 and top-5 recall scores. *Since the average context length in TopiOCQA and INSCIT is smaller than in other datasets, we report top-5 and top-20 to roughly match the context lengths of top-1 and top-5, respectively, in those datasets.## How to use## License\nDragon-multiturn is built on top of Dragon. We refer users to the original license of the Dragon model.## Correspondence to\nZihan Liu (zihanl@URL), Wei Ping (wping@URL)\n\n\n<pre>\n@article{liu2024chatqa,\n title={ChatQA: Building GPT-4 Level Conversational QA Models},\n author={Liu, Zihan and Ping, Wei and Roy, Rajarshi and Xu, Peng and Lee, Chankyu and Shoeybi, Mohammad and Catanzaro, Bryan},\n journal={arXiv preprint arXiv:2401.10225},\n year={2024}}\n</pre>" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # results This model is a fine-tuned version of [microsoft/phi-1_5](https://huggingface.co/microsoft/phi-1_5) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.0513 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0004 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 100 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.0729 | 0.66 | 200 | 1.0513 | ### Framework versions - PEFT 0.10.0 - Transformers 4.37.0 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"license": "mit", "library_name": "peft", "tags": ["generated_from_trainer"], "base_model": "microsoft/phi-1_5", "model-index": [{"name": "results", "results": []}]}
megajajo/results
null
[ "peft", "tensorboard", "safetensors", "generated_from_trainer", "base_model:microsoft/phi-1_5", "license:mit", "region:us" ]
null
2024-04-30T18:45:16+00:00
[]
[]
TAGS #peft #tensorboard #safetensors #generated_from_trainer #base_model-microsoft/phi-1_5 #license-mit #region-us
results ======= This model is a fine-tuned version of microsoft/phi-1\_5 on the None dataset. It achieves the following results on the evaluation set: * Loss: 1.0513 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0004 * train\_batch\_size: 4 * eval\_batch\_size: 4 * seed: 42 * gradient\_accumulation\_steps: 8 * total\_train\_batch\_size: 32 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: cosine * lr\_scheduler\_warmup\_steps: 100 * num\_epochs: 1 ### Training results ### Framework versions * PEFT 0.10.0 * Transformers 4.37.0 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0004\n* train\\_batch\\_size: 4\n* eval\\_batch\\_size: 4\n* seed: 42\n* gradient\\_accumulation\\_steps: 8\n* total\\_train\\_batch\\_size: 32\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_steps: 100\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* PEFT 0.10.0\n* Transformers 4.37.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #tensorboard #safetensors #generated_from_trainer #base_model-microsoft/phi-1_5 #license-mit #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0004\n* train\\_batch\\_size: 4\n* eval\\_batch\\_size: 4\n* seed: 42\n* gradient\\_accumulation\\_steps: 8\n* total\\_train\\_batch\\_size: 32\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_steps: 100\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* PEFT 0.10.0\n* Transformers 4.37.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ 37, 142, 5, 52 ]
[ "TAGS\n#peft #tensorboard #safetensors #generated_from_trainer #base_model-microsoft/phi-1_5 #license-mit #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0004\n* train\\_batch\\_size: 4\n* eval\\_batch\\_size: 4\n* seed: 42\n* gradient\\_accumulation\\_steps: 8\n* total\\_train\\_batch\\_size: 32\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_steps: 100\n* num\\_epochs: 1### Training results### Framework versions\n\n\n* PEFT 0.10.0\n* Transformers 4.37.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
reinforcement-learning
null
# **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="bendupont/q-FrozenLake-v1-4x4-Slippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
{"tags": ["FrozenLake-v1-4x4", "q-learning", "reinforcement-learning", "custom-implementation"], "model-index": [{"name": "q-FrozenLake-v1-4x4-Slippery", "results": [{"task": {"type": "reinforcement-learning", "name": "reinforcement-learning"}, "dataset": {"name": "FrozenLake-v1-4x4", "type": "FrozenLake-v1-4x4"}, "metrics": [{"type": "mean_reward", "value": "0.01 +/- 0.10", "name": "mean_reward", "verified": false}]}]}]}
bendupont/q-FrozenLake-v1-4x4-Slippery
null
[ "FrozenLake-v1-4x4", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
null
2024-04-30T18:47:12+00:00
[]
[]
TAGS #FrozenLake-v1-4x4 #q-learning #reinforcement-learning #custom-implementation #model-index #region-us
# Q-Learning Agent playing1 FrozenLake-v1 This is a trained model of a Q-Learning agent playing FrozenLake-v1 . ## Usage
[ "# Q-Learning Agent playing1 FrozenLake-v1\n This is a trained model of a Q-Learning agent playing FrozenLake-v1 .\n\n ## Usage" ]
[ "TAGS\n#FrozenLake-v1-4x4 #q-learning #reinforcement-learning #custom-implementation #model-index #region-us \n", "# Q-Learning Agent playing1 FrozenLake-v1\n This is a trained model of a Q-Learning agent playing FrozenLake-v1 .\n\n ## Usage" ]
[ 31, 33 ]
[ "TAGS\n#FrozenLake-v1-4x4 #q-learning #reinforcement-learning #custom-implementation #model-index #region-us \n# Q-Learning Agent playing1 FrozenLake-v1\n This is a trained model of a Q-Learning agent playing FrozenLake-v1 .\n\n ## Usage" ]
feature-extraction
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
claudios/JavaBERT
null
[ "transformers", "safetensors", "bert", "feature-extraction", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:47:50+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #bert #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #bert #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 32, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #bert #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
transformers
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Qwen1.5-0.5B - bnb 4bits - Model creator: https://huggingface.co/Qwen/ - Original model: https://huggingface.co/Qwen/Qwen1.5-0.5B/ Original model description: --- license: other license_name: tongyi-qianwen-research license_link: >- https://huggingface.co/Qwen/Qwen1.5-0.5B/blob/main/LICENSE language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-0.5B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of `trust_remote_code`. For more details, please refer to our [blog post](https://qwenlm.github.io/blog/qwen1.5/) and [GitHub repo](https://github.com/QwenLM/Qwen1.5). ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install `transformers>=4.37.0`, or you might encounter the following error: ``` KeyError: 'qwen2'. ``` ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{qwen, title={Qwen Technical Report}, author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu}, journal={arXiv preprint arXiv:2309.16609}, year={2023} } ```
{}
RichardErkhov/Qwen_-_Qwen1.5-0.5B-4bits
null
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "region:us" ]
null
2024-04-30T18:50:25+00:00
[]
[]
TAGS #transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us
Quantization made by Richard Erkhov. Github Discord Request more models Qwen1.5-0.5B - bnb 4bits - Model creator: URL - Original model: URL Original model description: --- license: other license_name: tongyi-qianwen-research license_link: >- URL language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-0.5B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of 'trust_remote_code'. For more details, please refer to our blog post and GitHub repo. ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error: ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. If you find our work helpful, feel free to give us a cite.
[ "# Qwen1.5-0.5B", "## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.", "## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA and the mixture of SWA and full attention.", "## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:", "## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
[ "TAGS\n#transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n", "# Qwen1.5-0.5B", "## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.", "## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA and the mixture of SWA and full attention.", "## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:", "## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
[ 42, 11, 160, 113, 44, 66 ]
[ "TAGS\n#transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n# Qwen1.5-0.5B## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA and the mixture of SWA and full attention.## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
feature-extraction
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
claudios/JavaBERT-uncased
null
[ "transformers", "safetensors", "bert", "feature-extraction", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:51:35+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #bert #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #bert #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 32, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #bert #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
reinforcement-learning
null
# **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="bendupont/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
{"tags": ["Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation"], "model-index": [{"name": "q-Taxi-v3", "results": [{"task": {"type": "reinforcement-learning", "name": "reinforcement-learning"}, "dataset": {"name": "Taxi-v3", "type": "Taxi-v3"}, "metrics": [{"type": "mean_reward", "value": "7.56 +/- 2.71", "name": "mean_reward", "verified": false}]}]}]}
bendupont/q-Taxi-v3
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
null
2024-04-30T18:51:43+00:00
[]
[]
TAGS #Taxi-v3 #q-learning #reinforcement-learning #custom-implementation #model-index #region-us
# Q-Learning Agent playing1 Taxi-v3 This is a trained model of a Q-Learning agent playing Taxi-v3 . ## Usage
[ "# Q-Learning Agent playing1 Taxi-v3\n This is a trained model of a Q-Learning agent playing Taxi-v3 .\n\n ## Usage" ]
[ "TAGS\n#Taxi-v3 #q-learning #reinforcement-learning #custom-implementation #model-index #region-us \n", "# Q-Learning Agent playing1 Taxi-v3\n This is a trained model of a Q-Learning agent playing Taxi-v3 .\n\n ## Usage" ]
[ 26, 31 ]
[ "TAGS\n#Taxi-v3 #q-learning #reinforcement-learning #custom-implementation #model-index #region-us \n# Q-Learning Agent playing1 Taxi-v3\n This is a trained model of a Q-Learning agent playing Taxi-v3 .\n\n ## Usage" ]
text-generation
transformers
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Qwen1.5-0.5B - bnb 8bits - Model creator: https://huggingface.co/Qwen/ - Original model: https://huggingface.co/Qwen/Qwen1.5-0.5B/ Original model description: --- license: other license_name: tongyi-qianwen-research license_link: >- https://huggingface.co/Qwen/Qwen1.5-0.5B/blob/main/LICENSE language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-0.5B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of `trust_remote_code`. For more details, please refer to our [blog post](https://qwenlm.github.io/blog/qwen1.5/) and [GitHub repo](https://github.com/QwenLM/Qwen1.5). ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install `transformers>=4.37.0`, or you might encounter the following error: ``` KeyError: 'qwen2'. ``` ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{qwen, title={Qwen Technical Report}, author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu}, journal={arXiv preprint arXiv:2309.16609}, year={2023} } ```
{}
RichardErkhov/Qwen_-_Qwen1.5-0.5B-8bits
null
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "8-bit", "region:us" ]
null
2024-04-30T18:52:11+00:00
[]
[]
TAGS #transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #8-bit #region-us
Quantization made by Richard Erkhov. Github Discord Request more models Qwen1.5-0.5B - bnb 8bits - Model creator: URL - Original model: URL Original model description: --- license: other license_name: tongyi-qianwen-research license_link: >- URL language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-0.5B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of 'trust_remote_code'. For more details, please refer to our blog post and GitHub repo. ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error: ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. If you find our work helpful, feel free to give us a cite.
[ "# Qwen1.5-0.5B", "## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.", "## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA and the mixture of SWA and full attention.", "## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:", "## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
[ "TAGS\n#transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #8-bit #region-us \n", "# Qwen1.5-0.5B", "## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.", "## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA and the mixture of SWA and full attention.", "## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:", "## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
[ 42, 11, 160, 113, 44, 66 ]
[ "TAGS\n#transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #8-bit #region-us \n# Qwen1.5-0.5B## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA and the mixture of SWA and full attention.## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
reinforcement-learning
stable-baselines3
# **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga hui168 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga hui168 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga hui168 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 1000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.00025), ('learning_starts', 100000), ('n_timesteps', 200000), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 2000), ('train_freq', 4), ('normalize', False)]) ``` # Environment Arguments ```python {'render_mode': 'rgb_array'} ```
{"library_name": "stable-baselines3", "tags": ["SpaceInvadersNoFrameskip-v4", "deep-reinforcement-learning", "reinforcement-learning", "stable-baselines3"], "model-index": [{"name": "DQN", "results": [{"task": {"type": "reinforcement-learning", "name": "reinforcement-learning"}, "dataset": {"name": "SpaceInvadersNoFrameskip-v4", "type": "SpaceInvadersNoFrameskip-v4"}, "metrics": [{"type": "mean_reward", "value": "239.50 +/- 117.59", "name": "mean_reward", "verified": false}]}]}]}
hui168/dqn-SpaceInvadersNoFrameskip-v4-gpu-2
null
[ "stable-baselines3", "SpaceInvadersNoFrameskip-v4", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
null
2024-04-30T18:52:15+00:00
[]
[]
TAGS #stable-baselines3 #SpaceInvadersNoFrameskip-v4 #deep-reinforcement-learning #reinforcement-learning #model-index #region-us
# DQN Agent playing SpaceInvadersNoFrameskip-v4 This is a trained model of a DQN agent playing SpaceInvadersNoFrameskip-v4 using the stable-baselines3 library and the RL Zoo. The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: URL SB3: URL SB3 Contrib: URL Install the RL Zoo (with SB3 and SB3-Contrib): If you installed the RL Zoo3 via pip ('pip install rl_zoo3'), from anywhere you can do: ## Training (with the RL Zoo) ## Hyperparameters # Environment Arguments
[ "# DQN Agent playing SpaceInvadersNoFrameskip-v4\nThis is a trained model of a DQN agent playing SpaceInvadersNoFrameskip-v4\nusing the stable-baselines3 library\nand the RL Zoo.\n\nThe RL Zoo is a training framework for Stable Baselines3\nreinforcement learning agents,\nwith hyperparameter optimization and pre-trained agents included.", "## Usage (with SB3 RL Zoo)\n\nRL Zoo: URL\nSB3: URL\nSB3 Contrib: URL\n\nInstall the RL Zoo (with SB3 and SB3-Contrib):\n\n\n\n\nIf you installed the RL Zoo3 via pip ('pip install rl_zoo3'), from anywhere you can do:", "## Training (with the RL Zoo)", "## Hyperparameters", "# Environment Arguments" ]
[ "TAGS\n#stable-baselines3 #SpaceInvadersNoFrameskip-v4 #deep-reinforcement-learning #reinforcement-learning #model-index #region-us \n", "# DQN Agent playing SpaceInvadersNoFrameskip-v4\nThis is a trained model of a DQN agent playing SpaceInvadersNoFrameskip-v4\nusing the stable-baselines3 library\nand the RL Zoo.\n\nThe RL Zoo is a training framework for Stable Baselines3\nreinforcement learning agents,\nwith hyperparameter optimization and pre-trained agents included.", "## Usage (with SB3 RL Zoo)\n\nRL Zoo: URL\nSB3: URL\nSB3 Contrib: URL\n\nInstall the RL Zoo (with SB3 and SB3-Contrib):\n\n\n\n\nIf you installed the RL Zoo3 via pip ('pip install rl_zoo3'), from anywhere you can do:", "## Training (with the RL Zoo)", "## Hyperparameters", "# Environment Arguments" ]
[ 37, 81, 76, 10, 6, 3 ]
[ "TAGS\n#stable-baselines3 #SpaceInvadersNoFrameskip-v4 #deep-reinforcement-learning #reinforcement-learning #model-index #region-us \n# DQN Agent playing SpaceInvadersNoFrameskip-v4\nThis is a trained model of a DQN agent playing SpaceInvadersNoFrameskip-v4\nusing the stable-baselines3 library\nand the RL Zoo.\n\nThe RL Zoo is a training framework for Stable Baselines3\nreinforcement learning agents,\nwith hyperparameter optimization and pre-trained agents included.## Usage (with SB3 RL Zoo)\n\nRL Zoo: URL\nSB3: URL\nSB3 Contrib: URL\n\nInstall the RL Zoo (with SB3 and SB3-Contrib):\n\n\n\n\nIf you installed the RL Zoo3 via pip ('pip install rl_zoo3'), from anywhere you can do:## Training (with the RL Zoo)## Hyperparameters# Environment Arguments" ]
text-generation
transformers
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Qwen1.5-1.8B - bnb 4bits - Model creator: https://huggingface.co/Qwen/ - Original model: https://huggingface.co/Qwen/Qwen1.5-1.8B/ Original model description: --- license: other license_name: tongyi-qianwen-research license_link: >- https://huggingface.co/Qwen/Qwen1.5-1.8B/blob/main/LICENSE language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-1.8B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of `trust_remote_code`. For more details, please refer to our [blog post](https://qwenlm.github.io/blog/qwen1.5/) and [GitHub repo](https://github.com/QwenLM/Qwen1.5). ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install `transformers>=4.37.0`, or you might encounter the following error: ``` KeyError: 'qwen2'. ``` ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{qwen, title={Qwen Technical Report}, author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu}, journal={arXiv preprint arXiv:2309.16609}, year={2023} } ```
{}
RichardErkhov/Qwen_-_Qwen1.5-1.8B-4bits
null
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "region:us" ]
null
2024-04-30T18:53:14+00:00
[]
[]
TAGS #transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us
Quantization made by Richard Erkhov. Github Discord Request more models Qwen1.5-1.8B - bnb 4bits - Model creator: URL - Original model: URL Original model description: --- license: other license_name: tongyi-qianwen-research license_link: >- URL language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-1.8B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of 'trust_remote_code'. For more details, please refer to our blog post and GitHub repo. ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error: ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. If you find our work helpful, feel free to give us a cite.
[ "# Qwen1.5-1.8B", "## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.", "## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.", "## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:", "## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
[ "TAGS\n#transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n", "# Qwen1.5-1.8B", "## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.", "## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.", "## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:", "## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
[ 42, 11, 160, 119, 44, 66 ]
[ "TAGS\n#transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n# Qwen1.5-1.8B## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
feature-extraction
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
claudios/CodeGPT-small-py-adaptedGPT2
null
[ "transformers", "safetensors", "gpt2", "feature-extraction", "arxiv:1910.09700", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:54:52+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #gpt2 #feature-extraction #arxiv-1910.09700 #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #gpt2 #feature-extraction #arxiv-1910.09700 #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 40, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #gpt2 #feature-extraction #arxiv-1910.09700 #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # codeT5-small-without-lora-with-prompt This model is a fine-tuned version of [Salesforce/codet5-small](https://huggingface.co/Salesforce/codet5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8393 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 12 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.1708 | 1.0 | 4383 | 0.9260 | | 1.0645 | 2.0 | 8766 | 0.8791 | | 1.0192 | 3.0 | 13149 | 0.8537 | | 1.0103 | 4.0 | 17532 | 0.8397 | | 0.9855 | 5.0 | 21915 | 0.8393 | ### Framework versions - Transformers 4.40.1 - Pytorch 2.2.1+cu121 - Tokenizers 0.19.1
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "base_model": "Salesforce/codet5-small", "model-index": [{"name": "codeT5-small-without-lora-with-prompt", "results": []}]}
EEsu/codeT5-small-without-lora-with-prompt
null
[ "transformers", "safetensors", "t5", "text2text-generation", "generated_from_trainer", "base_model:Salesforce/codet5-small", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:55:08+00:00
[]
[]
TAGS #transformers #safetensors #t5 #text2text-generation #generated_from_trainer #base_model-Salesforce/codet5-small #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
codeT5-small-without-lora-with-prompt ===================================== This model is a fine-tuned version of Salesforce/codet5-small on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.8393 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0001 * train\_batch\_size: 12 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 5 ### Training results ### Framework versions * Transformers 4.40.1 * Pytorch 2.2.1+cu121 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0001\n* train\\_batch\\_size: 12\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 5", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #safetensors #t5 #text2text-generation #generated_from_trainer #base_model-Salesforce/codet5-small #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0001\n* train\\_batch\\_size: 12\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 5", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Tokenizers 0.19.1" ]
[ 63, 101, 5, 35 ]
[ "TAGS\n#transformers #safetensors #t5 #text2text-generation #generated_from_trainer #base_model-Salesforce/codet5-small #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0001\n* train\\_batch\\_size: 12\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 5### Training results### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Tokenizers 0.19.1" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # NDD-mrbs_test-content This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1564 - Accuracy: 0.9552 - F1: 0.9556 - Precision: 0.9575 - Recall: 0.9552 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:| | 0.1192 | 1.0 | 672 | 0.1996 | 0.9515 | 0.9520 | 0.9553 | 0.9515 | | 0.069 | 2.0 | 1344 | 0.1564 | 0.9552 | 0.9556 | 0.9575 | 0.9552 | ### Framework versions - Transformers 4.40.1 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy", "f1", "precision", "recall"], "base_model": "distilbert-base-uncased", "model-index": [{"name": "NDD-mrbs_test-content", "results": []}]}
lgk03/NDD-mrbs_test-content
null
[ "transformers", "tensorboard", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "base_model:distilbert-base-uncased", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T18:57:26+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #distilbert #text-classification #generated_from_trainer #base_model-distilbert-base-uncased #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
NDD-mrbs\_test-content ====================== This model is a fine-tuned version of distilbert-base-uncased on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.1564 * Accuracy: 0.9552 * F1: 0.9556 * Precision: 0.9575 * Recall: 0.9552 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 32 * eval\_batch\_size: 32 * seed: 42 * gradient\_accumulation\_steps: 4 * total\_train\_batch\_size: 128 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 2 ### Training results ### Framework versions * Transformers 4.40.1 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 32\n* eval\\_batch\\_size: 32\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 128\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #tensorboard #safetensors #distilbert #text-classification #generated_from_trainer #base_model-distilbert-base-uncased #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 32\n* eval\\_batch\\_size: 32\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 128\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ 59, 124, 5, 44 ]
[ "TAGS\n#transformers #tensorboard #safetensors #distilbert #text-classification #generated_from_trainer #base_model-distilbert-base-uncased #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 32\n* eval\\_batch\\_size: 32\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 128\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2### Training results### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
text-generation
transformers
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Qwen1.5-7B - bnb 4bits - Model creator: https://huggingface.co/Qwen/ - Original model: https://huggingface.co/Qwen/Qwen1.5-7B/ Original model description: --- license: other license_name: tongyi-qianwen license_link: >- https://huggingface.co/Qwen/Qwen1.5-7B/blob/main/LICENSE language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-7B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of `trust_remote_code`. For more details, please refer to our [blog post](https://qwenlm.github.io/blog/qwen1.5/) and [GitHub repo](https://github.com/QwenLM/Qwen1.5). ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install `transformers>=4.37.0`, or you might encounter the following error: ``` KeyError: 'qwen2'. ``` ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{qwen, title={Qwen Technical Report}, author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu}, journal={arXiv preprint arXiv:2309.16609}, year={2023} } ```
{}
RichardErkhov/Qwen_-_Qwen1.5-7B-4bits
null
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "region:us" ]
null
2024-04-30T18:58:34+00:00
[]
[]
TAGS #transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us
Quantization made by Richard Erkhov. Github Discord Request more models Qwen1.5-7B - bnb 4bits - Model creator: URL - Original model: URL Original model description: --- license: other license_name: tongyi-qianwen license_link: >- URL language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-7B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of 'trust_remote_code'. For more details, please refer to our blog post and GitHub repo. ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error: ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. If you find our work helpful, feel free to give us a cite.
[ "# Qwen1.5-7B", "## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.", "## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.", "## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:", "## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
[ "TAGS\n#transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n", "# Qwen1.5-7B", "## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.", "## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.", "## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:", "## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
[ 42, 9, 160, 119, 44, 66 ]
[ "TAGS\n#transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n# Qwen1.5-7B## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
text-generation
transformers
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Qwen1.5-4B - bnb 4bits - Model creator: https://huggingface.co/Qwen/ - Original model: https://huggingface.co/Qwen/Qwen1.5-4B/ Original model description: --- license: other license_name: tongyi-qianwen-research license_link: >- https://huggingface.co/Qwen/Qwen1.5-4B/blob/main/LICENSE language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-4B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of `trust_remote_code`. For more details, please refer to our [blog post](https://qwenlm.github.io/blog/qwen1.5/) and [GitHub repo](https://github.com/QwenLM/Qwen1.5). ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install `transformers>=4.37.0`, or you might encounter the following error: ``` KeyError: 'qwen2'. ``` ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{qwen, title={Qwen Technical Report}, author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu}, journal={arXiv preprint arXiv:2309.16609}, year={2023} } ```
{}
RichardErkhov/Qwen_-_Qwen1.5-4B-4bits
null
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "region:us" ]
null
2024-04-30T18:58:45+00:00
[]
[]
TAGS #transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us
Quantization made by Richard Erkhov. Github Discord Request more models Qwen1.5-4B - bnb 4bits - Model creator: URL - Original model: URL Original model description: --- license: other license_name: tongyi-qianwen-research license_link: >- URL language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-4B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of 'trust_remote_code'. For more details, please refer to our blog post and GitHub repo. ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error: ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. If you find our work helpful, feel free to give us a cite.
[ "# Qwen1.5-4B", "## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.", "## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.", "## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:", "## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
[ "TAGS\n#transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n", "# Qwen1.5-4B", "## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.", "## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.", "## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:", "## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
[ 42, 9, 160, 119, 44, 66 ]
[ "TAGS\n#transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n# Qwen1.5-4B## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
feature-extraction
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
claudios/CodeGPT-small-java-adaptedGPT2
null
[ "transformers", "safetensors", "gpt2", "feature-extraction", "arxiv:1910.09700", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:59:09+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #gpt2 #feature-extraction #arxiv-1910.09700 #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #gpt2 #feature-extraction #arxiv-1910.09700 #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 40, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #gpt2 #feature-extraction #arxiv-1910.09700 #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
transformers
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Qwen1.5-1.8B - bnb 8bits - Model creator: https://huggingface.co/Qwen/ - Original model: https://huggingface.co/Qwen/Qwen1.5-1.8B/ Original model description: --- license: other license_name: tongyi-qianwen-research license_link: >- https://huggingface.co/Qwen/Qwen1.5-1.8B/blob/main/LICENSE language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-1.8B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of `trust_remote_code`. For more details, please refer to our [blog post](https://qwenlm.github.io/blog/qwen1.5/) and [GitHub repo](https://github.com/QwenLM/Qwen1.5). ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install `transformers>=4.37.0`, or you might encounter the following error: ``` KeyError: 'qwen2'. ``` ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{qwen, title={Qwen Technical Report}, author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu}, journal={arXiv preprint arXiv:2309.16609}, year={2023} } ```
{}
RichardErkhov/Qwen_-_Qwen1.5-1.8B-8bits
null
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "8-bit", "region:us" ]
null
2024-04-30T18:59:28+00:00
[]
[]
TAGS #transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #8-bit #region-us
Quantization made by Richard Erkhov. Github Discord Request more models Qwen1.5-1.8B - bnb 8bits - Model creator: URL - Original model: URL Original model description: --- license: other license_name: tongyi-qianwen-research license_link: >- URL language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-1.8B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of 'trust_remote_code'. For more details, please refer to our blog post and GitHub repo. ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error: ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. If you find our work helpful, feel free to give us a cite.
[ "# Qwen1.5-1.8B", "## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.", "## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.", "## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:", "## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
[ "TAGS\n#transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #8-bit #region-us \n", "# Qwen1.5-1.8B", "## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.", "## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.", "## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:", "## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
[ 42, 11, 160, 119, 44, 66 ]
[ "TAGS\n#transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #8-bit #region-us \n# Qwen1.5-1.8B## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-4 This model is a fine-tuned version of [EleutherAI/pythia-14m](https://huggingface.co/EleutherAI/pythia-14m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
{"tags": ["generated_from_trainer"], "base_model": "EleutherAI/pythia-14m", "model-index": [{"name": "robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-4", "results": []}]}
AlignmentResearch/robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-4
null
[ "transformers", "tensorboard", "safetensors", "gpt_neox", "text-classification", "generated_from_trainer", "base_model:EleutherAI/pythia-14m", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T18:59:44+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-14m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-4 This model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-4\n\nThis model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 4\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-14m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-4\n\nThis model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 4\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ 62, 58, 7, 9, 9, 4, 93, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-14m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# robust_llm_pythia-14m_mz-135_WordLength_n-its-10-seed-4\n\nThis model is a fine-tuned version of EleutherAI/pythia-14m on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 4\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-2 This model is a fine-tuned version of [EleutherAI/pythia-31m](https://huggingface.co/EleutherAI/pythia-31m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
{"tags": ["generated_from_trainer"], "base_model": "EleutherAI/pythia-31m", "model-index": [{"name": "robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-2", "results": []}]}
AlignmentResearch/robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-2
null
[ "transformers", "tensorboard", "safetensors", "gpt_neox", "text-classification", "generated_from_trainer", "base_model:EleutherAI/pythia-31m", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T19:00:28+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-31m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-2 This model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-2\n\nThis model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-31m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-2\n\nThis model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ 62, 58, 7, 9, 9, 4, 93, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-31m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-2\n\nThis model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-0 This model is a fine-tuned version of [EleutherAI/pythia-31m](https://huggingface.co/EleutherAI/pythia-31m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
{"tags": ["generated_from_trainer"], "base_model": "EleutherAI/pythia-31m", "model-index": [{"name": "robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-0", "results": []}]}
AlignmentResearch/robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-0
null
[ "transformers", "tensorboard", "safetensors", "gpt_neox", "text-classification", "generated_from_trainer", "base_model:EleutherAI/pythia-31m", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T19:00:38+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-31m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-0 This model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-0\n\nThis model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 0\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-31m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-0\n\nThis model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 0\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ 62, 58, 7, 9, 9, 4, 93, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-31m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-0\n\nThis model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 0\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-1 This model is a fine-tuned version of [EleutherAI/pythia-31m](https://huggingface.co/EleutherAI/pythia-31m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 1 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
{"tags": ["generated_from_trainer"], "base_model": "EleutherAI/pythia-31m", "model-index": [{"name": "robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-1", "results": []}]}
AlignmentResearch/robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-1
null
[ "transformers", "tensorboard", "safetensors", "gpt_neox", "text-classification", "generated_from_trainer", "base_model:EleutherAI/pythia-31m", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T19:01:08+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-31m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-1 This model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 1 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-1\n\nThis model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 1\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-31m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-1\n\nThis model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 1\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ 62, 58, 7, 9, 9, 4, 93, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-31m #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# robust_llm_pythia-31m_mz-135_WordLength_n-its-10-seed-1\n\nThis model is a fine-tuned version of EleutherAI/pythia-31m on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 1\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": ["trl", "dpo"]}
kyuhyun/final_4bit_DST_DPO_free
null
[ "transformers", "safetensors", "llama", "text-generation", "trl", "dpo", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "region:us" ]
null
2024-04-30T19:02:00+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #trl #dpo #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #trl #dpo #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 54, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #trl #dpo #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
quickstep3621/log2vqm
null
[ "transformers", "safetensors", "stablelm", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T19:03:26+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 41, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
fill-mask
transformers
../../blobs/c5d5541e2a5fbfd61ffc4acae50f956c575edc1f
{}
ALTAH/wojood_Tah
null
[ "transformers", "pytorch", "tf", "jax", "tensorboard", "bert", "fill-mask", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T19:04:01+00:00
[]
[]
TAGS #transformers #pytorch #tf #jax #tensorboard #bert #fill-mask #autotrain_compatible #endpoints_compatible #region-us
../../blobs/c5d5541e2a5fbfd61ffc4acae50f956c575edc1f
[]
[ "TAGS\n#transformers #pytorch #tf #jax #tensorboard #bert #fill-mask #autotrain_compatible #endpoints_compatible #region-us \n" ]
[ 36 ]
[ "TAGS\n#transformers #pytorch #tf #jax #tensorboard #bert #fill-mask #autotrain_compatible #endpoints_compatible #region-us \n" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-4 This model is a fine-tuned version of [EleutherAI/pythia-70m](https://huggingface.co/EleutherAI/pythia-70m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "base_model": "EleutherAI/pythia-70m", "model-index": [{"name": "robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-4", "results": []}]}
AlignmentResearch/robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-4
null
[ "transformers", "tensorboard", "safetensors", "gpt_neox", "text-classification", "generated_from_trainer", "base_model:EleutherAI/pythia-70m", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T19:04:42+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-70m #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-4 This model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 64 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.1 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-4\n\nThis model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 4\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-70m #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-4\n\nThis model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 4\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ 70, 58, 7, 9, 9, 4, 93, 5, 40 ]
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neox #text-classification #generated_from_trainer #base_model-EleutherAI/pythia-70m #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# robust_llm_pythia-70m_mz-135_WordLength_n-its-10-seed-4\n\nThis model is a fine-tuned version of EleutherAI/pythia-70m on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-05\n- train_batch_size: 8\n- eval_batch_size: 64\n- seed: 4\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1### Training results### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.2.1\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilled-code-llama This model is a fine-tuned version of [TinyLlama/TinyLlama-1.1B-Chat-v1.0](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 32 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - lr_scheduler_warmup_ratio: 0.03 - num_epochs: 6 ### Framework versions - Transformers 4.40.1 - Pytorch 2.3.0+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "apache-2.0", "tags": ["trl", "sft", "generated_from_trainer"], "base_model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "model-index": [{"name": "distilled-code-llama", "results": []}]}
anudaw/distilled-finetuned-code-llama
null
[ "transformers", "safetensors", "llama", "text-generation", "trl", "sft", "generated_from_trainer", "conversational", "base_model:TinyLlama/TinyLlama-1.1B-Chat-v1.0", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T19:05:23+00:00
[]
[]
TAGS #transformers #safetensors #llama #text-generation #trl #sft #generated_from_trainer #conversational #base_model-TinyLlama/TinyLlama-1.1B-Chat-v1.0 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# distilled-code-llama This model is a fine-tuned version of TinyLlama/TinyLlama-1.1B-Chat-v1.0 on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 32 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - lr_scheduler_warmup_ratio: 0.03 - num_epochs: 6 ### Framework versions - Transformers 4.40.1 - Pytorch 2.3.0+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
[ "# distilled-code-llama\n\nThis model is a fine-tuned version of TinyLlama/TinyLlama-1.1B-Chat-v1.0 on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 1\n- eval_batch_size: 1\n- seed: 42\n- gradient_accumulation_steps: 32\n- total_train_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: constant\n- lr_scheduler_warmup_ratio: 0.03\n- num_epochs: 6", "### Framework versions\n\n- Transformers 4.40.1\n- Pytorch 2.3.0+cu121\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #trl #sft #generated_from_trainer #conversational #base_model-TinyLlama/TinyLlama-1.1B-Chat-v1.0 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# distilled-code-llama\n\nThis model is a fine-tuned version of TinyLlama/TinyLlama-1.1B-Chat-v1.0 on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 1\n- eval_batch_size: 1\n- seed: 42\n- gradient_accumulation_steps: 32\n- total_train_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: constant\n- lr_scheduler_warmup_ratio: 0.03\n- num_epochs: 6", "### Framework versions\n\n- Transformers 4.40.1\n- Pytorch 2.3.0+cu121\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
[ 80, 42, 7, 9, 9, 4, 126, 44 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #trl #sft #generated_from_trainer #conversational #base_model-TinyLlama/TinyLlama-1.1B-Chat-v1.0 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# distilled-code-llama\n\nThis model is a fine-tuned version of TinyLlama/TinyLlama-1.1B-Chat-v1.0 on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limitations\n\nMore information needed## Training and evaluation data\n\nMore information needed## Training procedure### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 1\n- eval_batch_size: 1\n- seed: 42\n- gradient_accumulation_steps: 32\n- total_train_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: constant\n- lr_scheduler_warmup_ratio: 0.03\n- num_epochs: 6### Framework versions\n\n- Transformers 4.40.1\n- Pytorch 2.3.0+cu121\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # kdrt_contentt This model is a fine-tuned version of [indobenchmark/indobert-base-p1](https://huggingface.co/indobenchmark/indobert-base-p1) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4551 - Accuracy: 0.8820 - Precision: 0.8820 - Recall: 0.8820 - F1: 0.8820 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | No log | 1.0 | 81 | 0.2982 | 0.8758 | 0.8758 | 0.8758 | 0.8758 | | No log | 2.0 | 162 | 0.4551 | 0.8820 | 0.8820 | 0.8820 | 0.8820 | ### Framework versions - Transformers 4.40.1 - Pytorch 2.3.0+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["accuracy", "precision", "recall", "f1"], "base_model": "indobenchmark/indobert-base-p1", "model-index": [{"name": "kdrt_contentt", "results": []}]}
brescia/kdrt_contentt
null
[ "transformers", "tensorboard", "safetensors", "bert", "text-classification", "generated_from_trainer", "base_model:indobenchmark/indobert-base-p1", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T19:06:22+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #bert #text-classification #generated_from_trainer #base_model-indobenchmark/indobert-base-p1 #license-mit #autotrain_compatible #endpoints_compatible #region-us
kdrt\_contentt ============== This model is a fine-tuned version of indobenchmark/indobert-base-p1 on the None dataset. It achieves the following results on the evaluation set: * Loss: 0.4551 * Accuracy: 0.8820 * Precision: 0.8820 * Recall: 0.8820 * F1: 0.8820 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 5e-05 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 2 ### Training results ### Framework versions * Transformers 4.40.1 * Pytorch 2.3.0+cu121 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #tensorboard #safetensors #bert #text-classification #generated_from_trainer #base_model-indobenchmark/indobert-base-p1 #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ 57, 101, 5, 44 ]
[ "TAGS\n#transformers #tensorboard #safetensors #bert #text-classification #generated_from_trainer #base_model-indobenchmark/indobert-base-p1 #license-mit #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2### Training results### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.3.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
HenryCai1129/adapter-llama-adaptertoxic2nontoxic-100-50-0.001
null
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-30T19:06:25+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 26, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
automatic-speech-recognition
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisperFinetuneTakeTwo This model is a fine-tuned version of [openai/whisper-tiny.en](https://huggingface.co/openai/whisper-tiny.en) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.5685 - Wer: 26.8493 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 128 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:------:|:----:|:---------------:|:-------:| | 2.8768 | 0.2778 | 10 | 1.8749 | 35.4033 | | 0.8301 | 0.5556 | 20 | 0.6687 | 24.9619 | | 0.5543 | 0.8333 | 30 | 0.5484 | 22.9224 | | 0.3902 | 1.1111 | 40 | 0.5108 | 20.3044 | | 0.3395 | 1.3889 | 50 | 0.4900 | 20.6088 | | 0.3255 | 1.6667 | 60 | 0.4830 | 20.7915 | | 0.362 | 1.9444 | 70 | 0.4867 | 20.6393 | | 0.1228 | 2.2222 | 80 | 0.5114 | 20.8524 | | 0.1288 | 2.5 | 90 | 0.5299 | 21.3090 | | 0.1513 | 2.7778 | 100 | 0.5685 | 26.8493 | ### Framework versions - Transformers 4.40.1 - Pytorch 2.2.1+cu121 - Datasets 2.19.1.dev0 - Tokenizers 0.19.1
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["wer"], "base_model": "openai/whisper-tiny.en", "model-index": [{"name": "whisperFinetuneTakeTwo", "results": []}]}
shljessie/whisperFinetuneTakeTwo
null
[ "transformers", "tensorboard", "safetensors", "whisper", "automatic-speech-recognition", "generated_from_trainer", "base_model:openai/whisper-tiny.en", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-30T19:06:27+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #whisper #automatic-speech-recognition #generated_from_trainer #base_model-openai/whisper-tiny.en #license-apache-2.0 #endpoints_compatible #region-us
whisperFinetuneTakeTwo ====================== This model is a fine-tuned version of openai/URL on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.5685 * Wer: 26.8493 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.001 * train\_batch\_size: 128 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * lr\_scheduler\_warmup\_steps: 500 * training\_steps: 100 ### Training results ### Framework versions * Transformers 4.40.1 * Pytorch 2.2.1+cu121 * Datasets 2.19.1.dev0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.001\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 500\n* training\\_steps: 100", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.1.dev0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #tensorboard #safetensors #whisper #automatic-speech-recognition #generated_from_trainer #base_model-openai/whisper-tiny.en #license-apache-2.0 #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.001\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 500\n* training\\_steps: 100", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.1.dev0\n* Tokenizers 0.19.1" ]
[ 54, 114, 5, 47 ]
[ "TAGS\n#transformers #tensorboard #safetensors #whisper #automatic-speech-recognition #generated_from_trainer #base_model-openai/whisper-tiny.en #license-apache-2.0 #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.001\n* train\\_batch\\_size: 128\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 500\n* training\\_steps: 100### Training results### Framework versions\n\n\n* Transformers 4.40.1\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.1.dev0\n* Tokenizers 0.19.1" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
nem012/gemma2B-1e-5r2
null
[ "transformers", "safetensors", "gemma", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-30T19:06:27+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #gemma #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #gemma #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ 43, 6, 4, 75, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5 ]
[ "TAGS\n#transformers #safetensors #gemma #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact" ]
null
peft
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed] ### Framework versions - PEFT 0.10.0
{"license": "apache-2.0", "library_name": "peft", "base_model": "google/gemma-2b-it"}
azarafrooz/gemma-nlaf-v1
null
[ "peft", "safetensors", "arxiv:1910.09700", "base_model:google/gemma-2b-it", "license:apache-2.0", "region:us" ]
null
2024-04-30T19:07:05+00:00
[ "1910.09700" ]
[]
TAGS #peft #safetensors #arxiv-1910.09700 #base_model-google/gemma-2b-it #license-apache-2.0 #region-us
# Model Card for Model ID ## Model Details ### Model Description - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact ### Framework versions - PEFT 0.10.0
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact", "### Framework versions\n\n- PEFT 0.10.0" ]
[ "TAGS\n#peft #safetensors #arxiv-1910.09700 #base_model-google/gemma-2b-it #license-apache-2.0 #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact", "### Framework versions\n\n- PEFT 0.10.0" ]
[ 43, 6, 4, 50, 23, 3, 5, 8, 9, 8, 34, 20, 4, 5, 5, 11, 13, 12, 3, 10, 6, 5, 6, 4, 5, 7, 49, 7, 7, 5, 5, 15, 7, 7, 8, 5, 13 ]
[ "TAGS\n#peft #safetensors #arxiv-1910.09700 #base_model-google/gemma-2b-it #license-apache-2.0 #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Downstream Use [optional]### Out-of-Scope Use## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.## How to Get Started with the Model\n\nUse the code below to get started with the model.## Training Details### Training Data### Training Procedure#### Preprocessing [optional]#### Training Hyperparameters\n\n- Training regime:#### Speeds, Sizes, Times [optional]## Evaluation### Testing Data, Factors & Metrics#### Testing Data#### Factors#### Metrics### Results#### Summary## Model Examination [optional]## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:## Technical Specifications [optional]### Model Architecture and Objective### Compute Infrastructure#### Hardware#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Model Card Authors [optional]## Model Card Contact### Framework versions\n\n- PEFT 0.10.0" ]
text-generation
transformers
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Qwen1.5-14B - bnb 4bits - Model creator: https://huggingface.co/Qwen/ - Original model: https://huggingface.co/Qwen/Qwen1.5-14B/ Original model description: --- license: other license_name: tongyi-qianwen license_link: >- https://huggingface.co/Qwen/Qwen1.5-14B/blob/main/LICENSE language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-14B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of `trust_remote_code`. For more details, please refer to our [blog post](https://qwenlm.github.io/blog/qwen1.5/) and [GitHub repo](https://github.com/QwenLM/Qwen1.5). ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install `transformers>=4.37.0`, or you might encounter the following error: ``` KeyError: 'qwen2'. ``` ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{qwen, title={Qwen Technical Report}, author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu}, journal={arXiv preprint arXiv:2309.16609}, year={2023} } ```
{}
RichardErkhov/Qwen_-_Qwen1.5-14B-4bits
null
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "region:us" ]
null
2024-04-30T19:08:40+00:00
[]
[]
TAGS #transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us
Quantization made by Richard Erkhov. Github Discord Request more models Qwen1.5-14B - bnb 4bits - Model creator: URL - Original model: URL Original model description: --- license: other license_name: tongyi-qianwen license_link: >- URL language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-14B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of 'trust_remote_code'. For more details, please refer to our blog post and GitHub repo. ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error: ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. If you find our work helpful, feel free to give us a cite.
[ "# Qwen1.5-14B", "## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.", "## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.", "## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:", "## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
[ "TAGS\n#transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n", "# Qwen1.5-14B", "## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.", "## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.", "## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:", "## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
[ 42, 9, 160, 119, 44, 66 ]
[ "TAGS\n#transformers #safetensors #qwen2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n# Qwen1.5-14B## Introduction\n\nQwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:\n\n* 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;\n* Significant performance improvement in Chat models;\n* Multilingual support of both base and chat models;\n* Stable support of 32K context length for models of all sizes\n* No need of 'trust_remote_code'.\n\nFor more details, please refer to our blog post and GitHub repo.## Model Details\nQwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.## Requirements\nThe code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install 'transformers>=4.37.0', or you might encounter the following error:## Usage\n\nWe do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model.\n\n\nIf you find our work helpful, feel free to give us a cite." ]
text-generation
transformers
# Uploaded model - **Developed by:** arthrod - **License:** apache-2.0 - **Finetuned from model :** meta-llama/Meta-Llama-3-8B-Instruct This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl", "orpo"], "base_model": "meta-llama/Meta-Llama-3-8B-Instruct"}
arthrod/ciceroptllama500
null
[ "transformers", "safetensors", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "orpo", "conversational", "en", "base_model:meta-llama/Meta-Llama-3-8B-Instruct", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-30T19:08:43+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #llama #text-generation #text-generation-inference #unsloth #trl #orpo #conversational #en #base_model-meta-llama/Meta-Llama-3-8B-Instruct #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
# Uploaded model - Developed by: arthrod - License: apache-2.0 - Finetuned from model : meta-llama/Meta-Llama-3-8B-Instruct This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: arthrod\n- License: apache-2.0\n- Finetuned from model : meta-llama/Meta-Llama-3-8B-Instruct\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #text-generation-inference #unsloth #trl #orpo #conversational #en #base_model-meta-llama/Meta-Llama-3-8B-Instruct #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: arthrod\n- License: apache-2.0\n- Finetuned from model : meta-llama/Meta-Llama-3-8B-Instruct\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ 79, 80 ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #text-generation-inference #unsloth #trl #orpo #conversational #en #base_model-meta-llama/Meta-Llama-3-8B-Instruct #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n# Uploaded model\n\n- Developed by: arthrod\n- License: apache-2.0\n- Finetuned from model : meta-llama/Meta-Llama-3-8B-Instruct\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]