modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
AnonymousSub/AR_rule_based_roberta_twostage_quadruplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: creativeml-openrail-m base_model: stabilityai/stable-diffusion-2-1-base instance_prompt: a photo of a <HBbathrobemen> bathrobe tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA DreamBooth - hbbathrobemen These are LoRA adaption weights for [stabilityai/stable-diffusion-2-1-base](https://huggingface.co/stabilityai/stable-diffusion-2-1-base). The weights were trained on the instance prompt "a photo of a <HBbathrobemen> bathrobe" using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. Test prompt: a <HBbathrobemen> bathrobe ![image_0](test_images/image_0.png) ![image_1](test_images/image_1.png) ![image_2](test_images/image_2.png) ![image_3](test_images/image_3.png)
AnonymousSub/AR_rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-03-22T14:30:40Z
--- license: cc-by-sa-3.0 language: - th --- This model was train by Weerayut Buaphet from [https://github.com/vistec-AI/Thai-NNER](https://github.com/vistec-AI/Thai-NNER). This model was convert from 0906_214036/checkpoint.pth to model.pth for using with PyThaiNLP. License: CC-BY-SA 3.0
AnonymousSub/SR_rule_based_hier_triplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 12.34 +/- 4.13 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r JessicaHsu/rl_course_vizdoom_health_gathering_supreme-v1 ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme-v1 ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme-v1 --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
AnonymousSub/SR_specter
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - generated_from_trainer model-index: - name: result results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # result This model is a fine-tuned version of [Mahmoud22/AraClassificationModel](https://huggingface.co/Mahmoud22/AraClassificationModel) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0295 - F1-macro: 0.9856 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 8e-05 - train_batch_size: 64 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | F1-macro | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1818 | 1.0 | 1630 | 0.0996 | 0.9661 | | 0.0899 | 2.0 | 3260 | 0.0398 | 0.9837 | | 0.0326 | 3.0 | 4890 | 0.0218 | 0.9893 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.0 - Datasets 2.1.0 - Tokenizers 0.13.2
AnonymousSub/roberta-base_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - autotrain - vision - image-classification datasets: - Hrishikesh332/autotrain-data-meme-classification widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace co2_eq_emissions: emissions: 1.132924473643039 --- **Dataset** The dataset consist of two label images: * Meme * Not Meme Meme folder consist of 222 meme images and Not Meme folder consist of 108 non meme files. Meme file consist most of the images contaning the text on the picture and not meme consist of all type of images from sports to the text in various forms like document, image text to get the higher accuracy and understand about the meme in a most efficient way. **UseCase** * **Content Moderation** - The meme classification model can be used to filter out the content of meme from the vast amount of data generated for the specific domain from the social media for the better understanding. **Future Scope** * Further work on the sentiment of the meme image like positive, voilence, offensive, sarcasm, neutral, etc. This can be used for various task like: * **Education** - To eliminate the offensive content from the curated memes for education * **Brand Monitoring** - To understand the sentiments of the user by understanding the representation by meme culture for decision making process. # Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 42897109437 - CO2 Emissions (in grams): 1.1329 ## Validation Metrics - Loss: 0.025 - Accuracy: 1.000 - Precision: 1.000 - Recall: 1.000 - AUC: 1.000 - F1: 1.000
AnonymousSub/rule_based_hier_quadruplet_0.1_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
create a house on a 700 sqm lot with 8 car garage and a pool on the second floor, 5 bedrooms garden
AnonymousSub/rule_based_hier_quadruplet_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- license: mit tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: roberta-base-finetuned-paperconc5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # IRyS-NER-Paper This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on a paper dataset. It achieves the following results on the evaluation set: - Loss: 0.1197 - Precision: 0.7812 - Recall: 0.7548 - F1: 0.7677 - Accuracy: 0.9686 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 81 | 0.1969 | 0.6799 | 0.5433 | 0.6040 | 0.9448 | | No log | 2.0 | 162 | 0.1423 | 0.7634 | 0.6617 | 0.7089 | 0.9623 | | No log | 3.0 | 243 | 0.1197 | 0.7812 | 0.7548 | 0.7677 | 0.9686 | | No log | 4.0 | 324 | 0.1335 | 0.7819 | 0.7505 | 0.7659 | 0.9678 | | No log | 5.0 | 405 | 0.1326 | 0.7345 | 0.8013 | 0.7664 | 0.9650 | | No log | 6.0 | 486 | 0.1427 | 0.7471 | 0.8182 | 0.7810 | 0.9657 | | 0.1446 | 7.0 | 567 | 0.1439 | 0.7447 | 0.8203 | 0.7807 | 0.9666 | | 0.1446 | 8.0 | 648 | 0.1586 | 0.7368 | 0.8288 | 0.7801 | 0.9650 | | 0.1446 | 9.0 | 729 | 0.1707 | 0.7273 | 0.8288 | 0.7747 | 0.9629 | | 0.1446 | 10.0 | 810 | 0.1650 | 0.7438 | 0.8288 | 0.784 | 0.9649 | ### Framework versions - Transformers 4.27.2 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
AnonymousSub/rule_based_hier_triplet_0.1_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: cc-by-4.0 datasets: - pythainlp/thainer-corpus-v2 language: - th metrics: - f1 widget: - text: "ฉันชื่อ นางสาวมะลิวา บุญสระดี อาศัยอยู่ที่อำเภอนางรอง จังหวัดบุรีรัมย์ อายุ 23 ปี เพิ่งเรียนจบจาก มหาวิทยาลัยขอนแก่น และนี่คือข้อมูลปลอม ชื่อคนไม่มีอยู่จริง" --- This is a Named Entity Recognition model that trained with [Thai NER v2.0 Corpus](https://huggingface.co/datasets/pythainlp/thainer-corpus-v2) Training script and split data: [https://zenodo.org/record/7761354](https://zenodo.org/record/7761354) The model was trained by [WangchanBERTa base model](https://huggingface.co/airesearch/wangchanberta-base-att-spm-uncased). Validation from the Validation set - Precision: 0.830336794125095 - Recall: 0.873701039168665 - F1: 0.8514671513892494 - Accuracy: 0.9736483416628805 Test from the Test set - Precision: 0.8199168093956447 - Recall: 0.8781446540880503 - F1: 0.8480323927622422 - Accuracy: 0.9724346779516247 Download: [HuggingFace Hub](https://huggingface.co/datasets/pythainlp/thainer-corpus-v2) Read more: [Thai NER v2.0](https://pythainlp.github.io/Thai-NER/version/2) ## Inference Huggingface doesn't support inference token classification for Thai and It will give wrong tag. You must using this code. ```python from transformers import AutoTokenizer from transformers import AutoModelForTokenClassification from pythainlp.tokenize import word_tokenize # pip install pythainlp import torch name="pythainlp/thainer-corpus-v2-base-model" tokenizer = AutoTokenizer.from_pretrained(name) model = AutoModelForTokenClassification.from_pretrained(name) sentence="ฉันชื่อ นางสาวมะลิวา บุญสระดี อาศัยอยู่ที่อำเภอนางรอง จังหวัดบุรีรัมย์ อายุ 23 ปี เพิ่งเรียนจบจาก มหาวิทยาลัยขอนแก่น และนี่คือข้อมูลปลอมชื่อคนไม่มีอยู่จริง อายุ 23 ปี" cut=word_tokenize(sentence.replace(" ", "<_>")) inputs=tokenizer(cut,is_split_into_words=True,return_tensors="pt") ids = inputs["input_ids"] mask = inputs["attention_mask"] # forward pass outputs = model(ids, attention_mask=mask) logits = outputs[0] predictions = torch.argmax(logits, dim=2) predicted_token_class = [model.config.id2label[t.item()] for t in predictions[0]] def fix_span_error(words,ner): _ner = [] _ner=ner _new_tag=[] for i,j in zip(words,_ner): #print(i,j) i=tokenizer.decode(i) if i.isspace() and j.startswith("B-"): j="O" if i=='' or i=='<s>' or i=='</s>': continue if i=="<_>": i=" " _new_tag.append((i,j)) return _new_tag ner_tag=fix_span_error(inputs['input_ids'][0],predicted_token_class) print(ner_tag) ``` output: ```python [('ฉัน', 'O'), ('ชื่อ', 'O'), (' ', 'O'), ('นางสาว', 'B-PERSON'), ('มะลิ', 'I-PERSON'), ('วา', 'I-PERSON'), (' ', 'I-PERSON'), ('บุญ', 'I-PERSON'), ('สระ', 'I-PERSON'), ('ดี', 'I-PERSON'), (' ', 'O'), ('อาศัย', 'O'), ('อยู่', 'O'), ('ที่', 'O'), ('อําเภอ', 'B-LOCATION'), ('นาง', 'I-LOCATION'), ('รอง', 'I-LOCATION'), (' ', 'O'), ('จังหวัด', 'B-LOCATION'), ('บุรีรัมย์', 'I-LOCATION'), (' ', 'O'), ('อายุ', 'O'), (' ', 'O'), ('23', 'B-AGO'), (' ', 'I-AGO'), ('ปี', 'I-AGO'), (' ', 'O'), ('เพิ่ง', 'O'), ('เรียนจบ', 'O'), ('จาก', 'O'), (' ', 'O'), ('มหาวิทยาลั', 'B-ORGANIZATION'), ('ยขอนแก่น', 'I-ORGANIZATION'), (' ', 'O'), ('และ', 'O'), ('นี่', 'O'), ('คือ', 'O'), ('ข้อมูล', 'O'), ('ปลอม', 'O'), ('ชื่อ', 'O'), ('คน', 'O'), ('ไม่', 'O'), ('มี', 'O'), ('อยู่', 'O'), ('จริง', 'O'), (' ', 'O'), ('อายุ', 'O'), (' ', 'O'), ('23', 'B-AGO'), (' ', 'O'), ('ปี', 'I-AGO')] ``` ## Cite > Wannaphong Phatthiyaphaibun. (2022). Thai NER 2.0 (2.0) [Data set]. Zenodo. https://doi.org/10.5281/zenodo.7761354 or BibTeX ``` @dataset{wannaphong_phatthiyaphaibun_2022_7761354, author = {Wannaphong Phatthiyaphaibun}, title = {Thai NER 2.0}, month = sep, year = 2022, publisher = {Zenodo}, version = {2.0}, doi = {10.5281/zenodo.7761354}, url = {https://doi.org/10.5281/zenodo.7761354} } ```
AnonymousSub/rule_based_hier_triplet_0.1_epochs_1_shard_1_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="snicolau/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
AnonymousSub/rule_based_only_classfn_twostage_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: irl_mlm_model_v1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # irl_mlm_model_v1 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.6569 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 110 | 3.6701 | ### Framework versions - Transformers 4.27.2 - Pytorch 2.0.0+cu117 - Datasets 2.10.1 - Tokenizers 0.13.2
AnonymousSub/rule_based_roberta_hier_quadruplet_0.1_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
Access to model degaga/document_classification is restricted and you are not in the authorized list. Visit https://huggingface.co/degaga/document_classification to ask for access.
AnonymousSub/rule_based_roberta_twostagetriplet_hier_epochs_1_shard_1_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
23
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="XaneWayner/taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
AnonymousSub/rule_based_twostagequadruplet_hier_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="BoschAI/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
AnonymousSub/rule_based_twostagetriplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: mit --- This is an [NB-BERT](https://huggingface.co/NbAiLab/nb-bert-large) model fine-tuned on 4000 examples of the [NoReC dataset](https://github.com/ltgoslo/norec) where examples with score 1/2 were marked as negative and 5/6 were marked as positive. The model was fine-tuned for 1 epoch with the following parameters: - learning_rate = 5e-05 - warmup_ratio = 0.1 - optim = 'adamw_hf' - weight_decay = 0
AnonymousSub/unsup-consert-base_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-cartpoleV1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 464.90 +/- 113.16 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
AnonymousSub/unsup-consert-emanuals
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: distilbert-base-uncased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue config: cola split: train args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.5497693861041112 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.8376 - Matthews Correlation: 0.5498 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5265 | 1.0 | 535 | 0.5547 | 0.4112 | | 0.3497 | 2.0 | 1070 | 0.5017 | 0.4919 | | 0.2307 | 3.0 | 1605 | 0.5383 | 0.5482 | | 0.1734 | 4.0 | 2140 | 0.8100 | 0.5387 | | 0.1336 | 5.0 | 2675 | 0.8376 | 0.5498 | ### Framework versions - Transformers 4.25.1 - Pytorch 2.0.0 - Datasets 2.10.1 - Tokenizers 0.13.2
AnonymousSub/unsup-consert-papers-bert
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: dussinus/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Antony/mint_model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
Access to model Fathi123/fathi is restricted and you are not in the authorized list. Visit https://huggingface.co/Fathi123/fathi to ask for access.
Anupam/QuestionClassifier
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for resnetv2_50.a1h_in1k A ResNet-V2 (pre-activation ResNet) image classification model. Trained on ImageNet-1k by Ross Wightman in `timm` using ResNet strikes back (RSB) `A1` based recipe. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 25.5 - GMACs: 4.1 - Activations (M): 11.1 - Image size: 224 x 224 - **Papers:** - ResNet strikes back: An improved training procedure in timm: https://arxiv.org/abs/2110.00476 - Identity Mappings in Deep Residual Networks: https://arxiv.org/abs/1603.05027 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('resnetv2_50.a1h_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_50.a1h_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 1024, 14, 14]) # torch.Size([1, 2048, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_50.a1h_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{wightman2021resnet, title={ResNet strikes back: An improved training procedure in timm}, author={Wightman, Ross and Touvron, Hugo and Jegou, Herve}, booktitle={NeurIPS 2021 Workshop on ImageNet: Past, Present, and Future} } ``` ```bibtex @article{He2016, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Identity Mappings in Deep Residual Networks}, journal = {arXiv preprint arXiv:1603.05027}, year = {2016} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
gaurishhs/API
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for resnetv2_50d_evos.ah_in1k A ResNet-V2 (pre-activation ResNet) image classification model. Trained on ImageNet-1k by Ross Wightman in `timm` using ResNet strikes back (RSB) `A1` based recipe. This model uses: * A 3x3 3-layer stem, avg-pool in shortcut downsample. * EvoNorm-S0 normalization-activation layers instead of Batch Normalization with ReLU activations. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 25.6 - GMACs: 4.3 - Activations (M): 11.9 - Image size: train = 224 x 224, test = 288 x 288 - **Papers:** - ResNet strikes back: An improved training procedure in timm: https://arxiv.org/abs/2110.00476 - Identity Mappings in Deep Residual Networks: https://arxiv.org/abs/1603.05027 - Evolving Normalization-Activation Layers: https://arxiv.org/abs/2004.02967 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('resnetv2_50d_evos.ah_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_50d_evos.ah_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 1024, 14, 14]) # torch.Size([1, 2048, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_50d_evos.ah_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{wightman2021resnet, title={ResNet strikes back: An improved training procedure in timm}, author={Wightman, Ross and Touvron, Hugo and Jegou, Herve}, booktitle={NeurIPS 2021 Workshop on ImageNet: Past, Present, and Future} } ``` ```bibtex @article{He2016, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Identity Mappings in Deep Residual Networks}, journal = {arXiv preprint arXiv:1603.05027}, year = {2016} } ``` ```bibtex @article{liu2020evolving, title={Evolving normalization-activation layers}, author={Liu, Hanxiao and Brock, Andy and Simonyan, Karen and Le, Quoc}, journal={Advances in Neural Information Processing Systems}, volume={33}, pages={13539--13550}, year={2020} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
Apisate/DialoGPT-small-jordan
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 10.38 +/- 4.57 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r Stokrotka/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
Aplinxy9plin/toxic-detection-rus
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 datasets: - imagenet-1k - imagenet-21k --- # Model card for resnetv2_50x1_bit.goog_distilled_in1k A ResNet-V2-BiT (Big Transfer w/ pre-activation ResNet) image classification model. Distilled from ImageNet-21k pretrained teacher model on ImageNet-1k by paper authors. This model uses: * Group Normalization (GN) in combination with Weight Standardization (WS) instead of Batch Normalization (BN).. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 25.5 - GMACs: 4.2 - Activations (M): 11.1 - Image size: 224 x 224 - **Papers:** - Knowledge distillation: A good teacher is patient and consistent: https://arxiv.org/abs/2106.05237 - Big Transfer (BiT): General Visual Representation Learning: https://arxiv.org/abs/1912.11370 - Identity Mappings in Deep Residual Networks: https://arxiv.org/abs/1603.05027 - **Dataset:** ImageNet-1k - **Pretrain Dataset:** ImageNet-21k - **Original:** https://github.com/google-research/big_transfer ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('resnetv2_50x1_bit.goog_distilled_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_50x1_bit.goog_distilled_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 1024, 14, 14]) # torch.Size([1, 2048, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_50x1_bit.goog_distilled_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{beyer2022knowledge, title={Knowledge distillation: A good teacher is patient and consistent}, author={Beyer, Lucas and Zhai, Xiaohua and Royer, Am{'e}lie and Markeeva, Larisa and Anil, Rohan and Kolesnikov, Alexander}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, pages={10925--10934}, year={2022} } ``` ```bibtex @inproceedings{Kolesnikov2019BigT, title={Big Transfer (BiT): General Visual Representation Learning}, author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}, booktitle={European Conference on Computer Vision}, year={2019} } ``` ```bibtex @article{He2016, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Identity Mappings in Deep Residual Networks}, journal = {arXiv preprint arXiv:1603.05027}, year = {2016} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
Apoorva/k2t-test
[ "pytorch", "t5", "text2text-generation", "en", "transformers", "keytotext", "k2t", "Keywords to Sentences", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
7
null
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 datasets: - imagenet-21k --- # Model card for resnetv2_50x1_bit.goog_in21k A ResNet-V2-BiT (Big Transfer w/ pre-activation ResNet) image classification model. Trained on ImageNet-21k by paper authors. This model uses: * Group Normalization (GN) in combination with Weight Standardization (WS) instead of Batch Normalization (BN).. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 68.3 - GMACs: 4.3 - Activations (M): 11.1 - Image size: 224 x 224 - **Papers:** - Big Transfer (BiT): General Visual Representation Learning: https://arxiv.org/abs/1912.11370 - Identity Mappings in Deep Residual Networks: https://arxiv.org/abs/1603.05027 - **Dataset:** ImageNet-21k - **Original:** https://github.com/google-research/big_transfer ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('resnetv2_50x1_bit.goog_in21k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_50x1_bit.goog_in21k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 1024, 14, 14]) # torch.Size([1, 2048, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_50x1_bit.goog_in21k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{Kolesnikov2019BigT, title={Big Transfer (BiT): General Visual Representation Learning}, author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}, booktitle={European Conference on Computer Vision}, year={2019} } ``` ```bibtex @article{He2016, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Identity Mappings in Deep Residual Networks}, journal = {arXiv preprint arXiv:1603.05027}, year = {2016} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
Appolo/TestModel
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 datasets: - imagenet-1k - imagenet-21k --- # Model card for resnetv2_50x1_bit.goog_in21k_ft_in1k A ResNet-V2-BiT (Big Transfer w/ pre-activation ResNet) image classification model. Pretrained on ImageNet-21k and fine-tuned on ImageNet-1k by paper authors. This model uses: * Group Normalization (GN) in combination with Weight Standardization (WS) instead of Batch Normalization (BN).. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 25.5 - GMACs: 16.6 - Activations (M): 44.5 - Image size: 448 x 448 - **Papers:** - Big Transfer (BiT): General Visual Representation Learning: https://arxiv.org/abs/1912.11370 - Identity Mappings in Deep Residual Networks: https://arxiv.org/abs/1603.05027 - **Dataset:** ImageNet-1k - **Pretrain Dataset:** ImageNet-21k - **Original:** https://github.com/google-research/big_transfer ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('resnetv2_50x1_bit.goog_in21k_ft_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_50x1_bit.goog_in21k_ft_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 224, 224]) # torch.Size([1, 256, 112, 112]) # torch.Size([1, 512, 56, 56]) # torch.Size([1, 1024, 28, 28]) # torch.Size([1, 2048, 14, 14]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_50x1_bit.goog_in21k_ft_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 14, 14) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{Kolesnikov2019BigT, title={Big Transfer (BiT): General Visual Representation Learning}, author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}, booktitle={European Conference on Computer Vision}, year={2019} } ``` ```bibtex @article{He2016, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Identity Mappings in Deep Residual Networks}, journal = {arXiv preprint arXiv:1603.05027}, year = {2016} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
ArBert/albert-base-v2-finetuned-ner-agglo
[ "pytorch", "tensorboard", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 datasets: - imagenet-1k - imagenet-21k --- # Model card for resnetv2_50x3_bit.goog_in21k_ft_in1k A ResNet-V2-BiT (Big Transfer w/ pre-activation ResNet) image classification model. Pretrained on ImageNet-21k and fine-tuned on ImageNet-1k by paper authors. This model uses: * Group Normalization (GN) in combination with Weight Standardization (WS) instead of Batch Normalization (BN).. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 217.3 - GMACs: 145.7 - Activations (M): 133.4 - Image size: 448 x 448 - **Papers:** - Big Transfer (BiT): General Visual Representation Learning: https://arxiv.org/abs/1912.11370 - Identity Mappings in Deep Residual Networks: https://arxiv.org/abs/1603.05027 - **Dataset:** ImageNet-1k - **Pretrain Dataset:** ImageNet-21k - **Original:** https://github.com/google-research/big_transfer ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('resnetv2_50x3_bit.goog_in21k_ft_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_50x3_bit.goog_in21k_ft_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 192, 224, 224]) # torch.Size([1, 768, 112, 112]) # torch.Size([1, 1536, 56, 56]) # torch.Size([1, 3072, 28, 28]) # torch.Size([1, 6144, 14, 14]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_50x3_bit.goog_in21k_ft_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 6144, 14, 14) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{Kolesnikov2019BigT, title={Big Transfer (BiT): General Visual Representation Learning}, author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}, booktitle={European Conference on Computer Vision}, year={2019} } ``` ```bibtex @article{He2016, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Identity Mappings in Deep Residual Networks}, journal = {arXiv preprint arXiv:1603.05027}, year = {2016} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
ArBert/albert-base-v2-finetuned-ner-gmm-twitter
[ "pytorch", "tensorboard", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for resnetv2_101.a1h_in1k A ResNet-V2 (pre-activation ResNet) image classification model. Trained on ImageNet-1k by Ross Wightman in `timm` using ResNet strikes back (RSB) `A1` based recipe. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 44.5 - GMACs: 7.8 - Activations (M): 16.2 - Image size: 224 x 224 - **Papers:** - ResNet strikes back: An improved training procedure in timm: https://arxiv.org/abs/2110.00476 - Identity Mappings in Deep Residual Networks: https://arxiv.org/abs/1603.05027 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('resnetv2_101.a1h_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_101.a1h_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 1024, 14, 14]) # torch.Size([1, 2048, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_101.a1h_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{wightman2021resnet, title={ResNet strikes back: An improved training procedure in timm}, author={Wightman, Ross and Touvron, Hugo and Jegou, Herve}, booktitle={NeurIPS 2021 Workshop on ImageNet: Past, Present, and Future} } ``` ```bibtex @article{He2016, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Identity Mappings in Deep Residual Networks}, journal = {arXiv preprint arXiv:1603.05027}, year = {2016} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
ArBert/albert-base-v2-finetuned-ner-gmm
[ "pytorch", "tensorboard", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 datasets: - imagenet-21k --- # Model card for resnetv2_101x1_bit.goog_in21k A ResNet-V2-BiT (Big Transfer w/ pre-activation ResNet) image classification model. Trained on ImageNet-21k by paper authors. This model uses: * Group Normalization (GN) in combination with Weight Standardization (WS) instead of Batch Normalization (BN).. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 87.2 - GMACs: 8.1 - Activations (M): 16.3 - Image size: 224 x 224 - **Papers:** - Big Transfer (BiT): General Visual Representation Learning: https://arxiv.org/abs/1912.11370 - Identity Mappings in Deep Residual Networks: https://arxiv.org/abs/1603.05027 - **Dataset:** ImageNet-21k - **Original:** https://github.com/google-research/big_transfer ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('resnetv2_101x1_bit.goog_in21k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_101x1_bit.goog_in21k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 1024, 14, 14]) # torch.Size([1, 2048, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_101x1_bit.goog_in21k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{Kolesnikov2019BigT, title={Big Transfer (BiT): General Visual Representation Learning}, author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}, booktitle={European Conference on Computer Vision}, year={2019} } ``` ```bibtex @article{He2016, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Identity Mappings in Deep Residual Networks}, journal = {arXiv preprint arXiv:1603.05027}, year = {2016} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
ArBert/bert-base-uncased-finetuned-ner-agglo
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 datasets: - imagenet-1k - imagenet-21k --- # Model card for resnetv2_101x3_bit.goog_in21k_ft_in1k A ResNet-V2-BiT (Big Transfer w/ pre-activation ResNet) image classification model. Pretrained on ImageNet-21k and fine-tuned on ImageNet-1k by paper authors. This model uses: * Group Normalization (GN) in combination with Weight Standardization (WS) instead of Batch Normalization (BN).. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 387.9 - GMACs: 280.3 - Activations (M): 194.8 - Image size: 448 x 448 - **Papers:** - Big Transfer (BiT): General Visual Representation Learning: https://arxiv.org/abs/1912.11370 - Identity Mappings in Deep Residual Networks: https://arxiv.org/abs/1603.05027 - **Dataset:** ImageNet-1k - **Pretrain Dataset:** ImageNet-21k - **Original:** https://github.com/google-research/big_transfer ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('resnetv2_101x3_bit.goog_in21k_ft_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_101x3_bit.goog_in21k_ft_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 192, 224, 224]) # torch.Size([1, 768, 112, 112]) # torch.Size([1, 1536, 56, 56]) # torch.Size([1, 3072, 28, 28]) # torch.Size([1, 6144, 14, 14]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_101x3_bit.goog_in21k_ft_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 6144, 14, 14) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{Kolesnikov2019BigT, title={Big Transfer (BiT): General Visual Representation Learning}, author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}, booktitle={European Conference on Computer Vision}, year={2019} } ``` ```bibtex @article{He2016, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Identity Mappings in Deep Residual Networks}, journal = {arXiv preprint arXiv:1603.05027}, year = {2016} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
ArBert/bert-base-uncased-finetuned-ner-gmm
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - image-classification - tfjs --- ## TensorFlow.js version of Mobilenet Pushed from Web ![](coffee.jpg)
ArBert/roberta-base-finetuned-ner-agglo
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-22T21:27:09Z
--- pipeline_tag: fill-mask license: mit language: ko mask_token: <mask> widget: - text: 우리 삶의 목표는 <mask>이다. --- # conv-bert-base-ko A convbert model trained from the scratch. It has the same structure as [YituTech/conv-bert-base](https://huggingface.co/YituTech/conv-bert-base).
ArBert/roberta-base-finetuned-ner-gmm
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 datasets: - imagenet-1k - imagenet-21k --- # Model card for resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k A ResNet-V2-BiT (Big Transfer w/ pre-activation ResNet) image classification model. Pretrained on ImageNet-21k and fine-tuned on ImageNet-1k by paper authors. This model uses: * Group Normalization (GN) in combination with Weight Standardization (WS) instead of Batch Normalization (BN).. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 236.3 - GMACs: 46.9 - Activations (M): 45.1 - Image size: 224 x 224 - **Papers:** - Knowledge distillation: A good teacher is patient and consistent: https://arxiv.org/abs/2106.05237 - Big Transfer (BiT): General Visual Representation Learning: https://arxiv.org/abs/1912.11370 - Identity Mappings in Deep Residual Networks: https://arxiv.org/abs/1603.05027 - **Dataset:** ImageNet-1k - **Pretrain Dataset:** ImageNet-21k - **Original:** https://github.com/google-research/big_transfer ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 128, 112, 112]) # torch.Size([1, 512, 56, 56]) # torch.Size([1, 1024, 28, 28]) # torch.Size([1, 2048, 14, 14]) # torch.Size([1, 4096, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 4096, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{beyer2022knowledge, title={Knowledge distillation: A good teacher is patient and consistent}, author={Beyer, Lucas and Zhai, Xiaohua and Royer, Am{'e}lie and Markeeva, Larisa and Anil, Rohan and Kolesnikov, Alexander}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, pages={10925--10934}, year={2022} } ``` ```bibtex @inproceedings{Kolesnikov2019BigT, title={Big Transfer (BiT): General Visual Representation Learning}, author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}, booktitle={European Conference on Computer Vision}, year={2019} } ``` ```bibtex @article{He2016, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Identity Mappings in Deep Residual Networks}, journal = {arXiv preprint arXiv:1603.05027}, year = {2016} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
ArBert/roberta-base-finetuned-ner-kmeans
[ "pytorch", "tensorboard", "roberta", "token-classification", "dataset:conll2003", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: emotion-english-distilroberta-base-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - name: Accuracy type: accuracy value: 0.9265 - name: F1 type: f1 value: 0.9273547353924119 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # emotion-english-distilroberta-base-finetuned-emotion This model is a fine-tuned version of [j-hartmann/emotion-english-distilroberta-base](https://huggingface.co/j-hartmann/emotion-english-distilroberta-base) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1807 - Accuracy: 0.9265 - F1: 0.9274 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.4136 | 1.0 | 250 | 0.2115 | 0.92 | 0.9193 | | 0.1896 | 2.0 | 500 | 0.1807 | 0.9265 | 0.9274 | ### Framework versions - Transformers 4.27.2 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
ArJakusz/DialoGPT-small-starky
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - image-classification - tfjs --- ## TensorFlow.js version of Mobilenet Pushed from Web ![](coffee.jpg)
Araby/Arabic-TTS
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-22T21:34:53Z
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Cartpole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Araf/Ummah
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-22T21:36:33Z
--- tags: - image-classification - timm library_tag: timm license: apache-2.0 datasets: - imagenet-21k --- # Model card for resnetv2_152x4_bit.goog_in21k A ResNet-V2-BiT (Big Transfer w/ pre-activation ResNet) image classification model. Trained on ImageNet-21k by paper authors. This model uses: * Group Normalization (GN) in combination with Weight Standardization (WS) instead of Batch Normalization (BN).. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 1107.3 - GMACs: 187.1 - Activations (M): 90.2 - Image size: 224 x 224 - **Papers:** - Big Transfer (BiT): General Visual Representation Learning: https://arxiv.org/abs/1912.11370 - Identity Mappings in Deep Residual Networks: https://arxiv.org/abs/1603.05027 - **Dataset:** ImageNet-21k - **Original:** https://github.com/google-research/big_transfer ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('resnetv2_152x4_bit.goog_in21k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_152x4_bit.goog_in21k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 256, 112, 112]) # torch.Size([1, 1024, 56, 56]) # torch.Size([1, 2048, 28, 28]) # torch.Size([1, 4096, 14, 14]) # torch.Size([1, 8192, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnetv2_152x4_bit.goog_in21k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 8192, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{Kolesnikov2019BigT, title={Big Transfer (BiT): General Visual Representation Learning}, author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}, booktitle={European Conference on Computer Vision}, year={2019} } ``` ```bibtex @article{He2016, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Identity Mappings in Deep Residual Networks}, journal = {arXiv preprint arXiv:1603.05027}, year = {2016} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
Aries/T5_question_generation
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
13
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Pixelcopter-PLE-v0 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 7.10 +/- 4.01 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Arina/Erine
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Absie/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
asaakyan/mbart-poetic-all
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer datasets: - funsd model-index: - name: layoutmlv2_funsd_rjz results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # layoutmlv2_funsd_rjz This model is a fine-tuned version of [microsoft/layoutlm-base-uncased](https://huggingface.co/microsoft/layoutlm-base-uncased) on the funsd dataset. It achieves the following results on the evaluation set: - Loss: 0.9422 - Answer: {'precision': 0.7382857142857143, 'recall': 0.7985166872682324, 'f1': 0.7672209026128266, 'number': 809} - Header: {'precision': 0.42758620689655175, 'recall': 0.5210084033613446, 'f1': 0.4696969696969697, 'number': 119} - Question: {'precision': 0.8075160403299725, 'recall': 0.8272300469483568, 'f1': 0.8172541743970314, 'number': 1065} - Overall Precision: 0.7527 - Overall Recall: 0.7973 - Overall F1: 0.7744 - Overall Accuracy: 0.8096 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 15 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Answer | Header | Question | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------:|:-----------------:|:--------------:|:----------:|:----------------:| | 0.3143 | 1.0 | 10 | 0.7685 | {'precision': 0.7, 'recall': 0.7700865265760197, 'f1': 0.7333725721012359, 'number': 809} | {'precision': 0.2986111111111111, 'recall': 0.36134453781512604, 'f1': 0.32699619771863114, 'number': 119} | {'precision': 0.7693032015065914, 'recall': 0.7671361502347418, 'f1': 0.768218147625764, 'number': 1065} | 0.7075 | 0.7441 | 0.7254 | 0.7924 | | 0.2816 | 2.0 | 20 | 0.7829 | {'precision': 0.7162315550510783, 'recall': 0.7799752781211372, 'f1': 0.7467455621301775, 'number': 809} | {'precision': 0.33152173913043476, 'recall': 0.5126050420168067, 'f1': 0.40264026402640263, 'number': 119} | {'precision': 0.7855839416058394, 'recall': 0.8084507042253521, 'f1': 0.7968533086534013, 'number': 1065} | 0.7186 | 0.7792 | 0.7477 | 0.7976 | | 0.2216 | 3.0 | 30 | 0.7825 | {'precision': 0.7016806722689075, 'recall': 0.8257107540173053, 'f1': 0.7586598523566157, 'number': 809} | {'precision': 0.35570469798657717, 'recall': 0.44537815126050423, 'f1': 0.39552238805970147, 'number': 119} | {'precision': 0.7851985559566786, 'recall': 0.8169014084507042, 'f1': 0.8007363092498849, 'number': 1065} | 0.7202 | 0.7983 | 0.7573 | 0.7942 | | 0.1973 | 4.0 | 40 | 0.7683 | {'precision': 0.7095032397408207, 'recall': 0.8121137206427689, 'f1': 0.7573487031700288, 'number': 809} | {'precision': 0.3968253968253968, 'recall': 0.42016806722689076, 'f1': 0.40816326530612246, 'number': 119} | {'precision': 0.802367941712204, 'recall': 0.8272300469483568, 'f1': 0.8146093388811835, 'number': 1065} | 0.7386 | 0.7968 | 0.7666 | 0.8143 | | 0.1671 | 5.0 | 50 | 0.7918 | {'precision': 0.7269585253456221, 'recall': 0.7799752781211372, 'f1': 0.7525342874180083, 'number': 809} | {'precision': 0.4076923076923077, 'recall': 0.44537815126050423, 'f1': 0.42570281124497994, 'number': 119} | {'precision': 0.7848888888888889, 'recall': 0.8291079812206573, 'f1': 0.8063926940639269, 'number': 1065} | 0.7381 | 0.7863 | 0.7614 | 0.8139 | | 0.1342 | 6.0 | 60 | 0.8295 | {'precision': 0.7234972677595628, 'recall': 0.8182941903584673, 'f1': 0.7679814385150812, 'number': 809} | {'precision': 0.37857142857142856, 'recall': 0.44537815126050423, 'f1': 0.4092664092664093, 'number': 119} | {'precision': 0.7939339875111507, 'recall': 0.8356807511737089, 'f1': 0.8142726440988106, 'number': 1065} | 0.7376 | 0.8053 | 0.7700 | 0.8120 | | 0.1212 | 7.0 | 70 | 0.8632 | {'precision': 0.7337883959044369, 'recall': 0.7972805933250927, 'f1': 0.764218009478673, 'number': 809} | {'precision': 0.4084507042253521, 'recall': 0.48739495798319327, 'f1': 0.4444444444444445, 'number': 119} | {'precision': 0.8137347130761995, 'recall': 0.812206572769953, 'f1': 0.8129699248120301, 'number': 1065} | 0.7524 | 0.7868 | 0.7692 | 0.8082 | | 0.1131 | 8.0 | 80 | 0.9081 | {'precision': 0.7244785949506037, 'recall': 0.8158220024721878, 'f1': 0.7674418604651163, 'number': 809} | {'precision': 0.40131578947368424, 'recall': 0.5126050420168067, 'f1': 0.4501845018450184, 'number': 119} | {'precision': 0.8097876269621422, 'recall': 0.8234741784037559, 'f1': 0.8165735567970206, 'number': 1065} | 0.7446 | 0.8018 | 0.7722 | 0.8011 | | 0.1043 | 9.0 | 90 | 0.9021 | {'precision': 0.7308132875143184, 'recall': 0.788627935723115, 'f1': 0.7586206896551724, 'number': 809} | {'precision': 0.425531914893617, 'recall': 0.5042016806722689, 'f1': 0.4615384615384615, 'number': 119} | {'precision': 0.7914818101153505, 'recall': 0.8375586854460094, 'f1': 0.8138686131386863, 'number': 1065} | 0.7426 | 0.7978 | 0.7692 | 0.8075 | | 0.0884 | 10.0 | 100 | 0.9126 | {'precision': 0.7231450719822813, 'recall': 0.8071693448702101, 'f1': 0.7628504672897196, 'number': 809} | {'precision': 0.40939597315436244, 'recall': 0.5126050420168067, 'f1': 0.4552238805970149, 'number': 119} | {'precision': 0.819718309859155, 'recall': 0.819718309859155, 'f1': 0.819718309859155, 'number': 1065} | 0.7496 | 0.7963 | 0.7723 | 0.8094 | | 0.084 | 11.0 | 110 | 0.9354 | {'precision': 0.7502944640753828, 'recall': 0.7873918417799752, 'f1': 0.7683956574185766, 'number': 809} | {'precision': 0.4140127388535032, 'recall': 0.5462184873949579, 'f1': 0.47101449275362317, 'number': 119} | {'precision': 0.7946428571428571, 'recall': 0.8356807511737089, 'f1': 0.8146453089244852, 'number': 1065} | 0.7488 | 0.7988 | 0.7730 | 0.8064 | | 0.0794 | 12.0 | 120 | 0.9323 | {'precision': 0.7244785949506037, 'recall': 0.8158220024721878, 'f1': 0.7674418604651163, 'number': 809} | {'precision': 0.4172661870503597, 'recall': 0.48739495798319327, 'f1': 0.4496124031007752, 'number': 119} | {'precision': 0.8152985074626866, 'recall': 0.8206572769953052, 'f1': 0.8179691155825924, 'number': 1065} | 0.7502 | 0.7988 | 0.7738 | 0.8094 | | 0.0803 | 13.0 | 130 | 0.9429 | {'precision': 0.7401129943502824, 'recall': 0.8096415327564895, 'f1': 0.7733175914994096, 'number': 809} | {'precision': 0.42592592592592593, 'recall': 0.5798319327731093, 'f1': 0.49110320284697506, 'number': 119} | {'precision': 0.8110599078341014, 'recall': 0.8262910798122066, 'f1': 0.8186046511627907, 'number': 1065} | 0.7523 | 0.8048 | 0.7777 | 0.8085 | | 0.0754 | 14.0 | 140 | 0.9393 | {'precision': 0.7425629290617849, 'recall': 0.8022249690976514, 'f1': 0.7712418300653594, 'number': 809} | {'precision': 0.4225352112676056, 'recall': 0.5042016806722689, 'f1': 0.45977011494252873, 'number': 119} | {'precision': 0.8018099547511313, 'recall': 0.831924882629108, 'f1': 0.816589861751152, 'number': 1065} | 0.7520 | 0.8003 | 0.7754 | 0.8106 | | 0.0732 | 15.0 | 150 | 0.9422 | {'precision': 0.7382857142857143, 'recall': 0.7985166872682324, 'f1': 0.7672209026128266, 'number': 809} | {'precision': 0.42758620689655175, 'recall': 0.5210084033613446, 'f1': 0.4696969696969697, 'number': 119} | {'precision': 0.8075160403299725, 'recall': 0.8272300469483568, 'f1': 0.8172541743970314, 'number': 1065} | 0.7527 | 0.7973 | 0.7744 | 0.8096 | ### Framework versions - Transformers 4.27.2 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
ArnaudPannatier/MLPMixer
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad_modified_for_t5_qg model-index: - name: t5-squad-end-to-end-qg results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-squad-end-to-end-qg This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on the squad_modified_for_t5_qg dataset. It achieves the following results on the evaluation set: - Loss: 1.8008 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.6046 | 0.34 | 100 | 1.9520 | | 2.0323 | 0.68 | 200 | 1.8008 | ### Framework versions - Transformers 4.27.2 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
ArpanZS/search_model
[ "joblib" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit --- ### ahx-beta-41b803f on Stable Diffusion This is the `<ahx-beta-41b803f>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<ahx-beta-41b803f> 0](https://huggingface.co/sd-concepts-library/ahx-beta-41b803f/resolve/main/concept_images/5.jpeg) ![<ahx-beta-41b803f> 1](https://huggingface.co/sd-concepts-library/ahx-beta-41b803f/resolve/main/concept_images/2.jpeg) ![<ahx-beta-41b803f> 2](https://huggingface.co/sd-concepts-library/ahx-beta-41b803f/resolve/main/concept_images/0.jpeg) ![<ahx-beta-41b803f> 3](https://huggingface.co/sd-concepts-library/ahx-beta-41b803f/resolve/main/concept_images/3.jpeg) ![<ahx-beta-41b803f> 4](https://huggingface.co/sd-concepts-library/ahx-beta-41b803f/resolve/main/concept_images/4.jpeg) ![<ahx-beta-41b803f> 5](https://huggingface.co/sd-concepts-library/ahx-beta-41b803f/resolve/main/concept_images/1.jpeg)
Arpita/opus-mt-en-ro-finetuned-syn-to-react
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: Fridge_Items_Model_V2 results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.5731573104858398 --- # Fridge_Items_Model_V2 Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### Apples ![Apples](images/Apples.jpg) #### Asparagus ![Asparagus](images/Asparagus.jpg) #### Avocado ![Avocado](images/Avocado.jpg) #### BBQ sauce ![BBQ sauce](images/BBQ_sauce.jpg) #### Bananas ![Bananas](images/Bananas.jpg) #### Beans ![Beans](images/Beans.jpg) #### Beef ![Beef](images/Beef.jpg) #### Beer ![Beer](images/Beer.jpg) #### Berries ![Berries](images/Berries.jpg) #### Bison ![Bison](images/Bison.jpg) #### Bread ![Bread](images/Bread.jpg) #### Broccoli ![Broccoli](images/Broccoli.jpg) #### Cauliflower ![Cauliflower](images/Cauliflower.jpg) #### Celery ![Celery](images/Celery.jpg) #### Cheese ![Cheese](images/Cheese.jpg) #### Chicken ![Chicken](images/Chicken.jpg) #### Chocolate ![Chocolate](images/Chocolate.jpg) #### Citrus fruits ![Citrus fruits](images/Citrus_fruits.jpg) #### Clams ![Clams](images/Clams.jpg) #### Cold cuts ![Cold cuts](images/Cold_cuts.jpg) #### Corn ![Corn](images/Corn.jpg) #### Cottage cheese ![Cottage cheese](images/Cottage_cheese.jpg) #### Crab ![Crab](images/Crab.jpg) #### Cream ![Cream](images/Cream.jpg) #### Cream cheese ![Cream cheese](images/Cream_cheese.jpg) #### Cucumbers ![Cucumbers](images/Cucumbers.jpg) #### Duck ![Duck](images/Duck.jpg) #### Eggs ![Eggs](images/Eggs.jpg) #### Energy drinks ![Energy drinks](images/Energy_drinks.jpg) #### Fish ![Fish](images/Fish.jpg) #### Frozen Fruits ![Frozen Fruits](images/Frozen_Fruits.jpg) #### Frozen meals ![Frozen meals](images/Frozen_meals.jpg) #### Frozen vegetables ![Frozen vegetables](images/Frozen_vegetables.jpg) #### Garlic ![Garlic](images/Garlic.jpg) #### Grapes ![Grapes](images/Grapes.jpg) #### Ground beef ![Ground beef](images/Ground_beef.jpg) #### Ground chicken ![Ground chicken](images/Ground_chicken.jpg) #### Ham ![Ham](images/Ham.jpg) #### Hot sauce ![Hot sauce](images/Hot_sauce.jpg) #### Hummus ![Hummus](images/Hummus.jpg) #### Ice cream ![Ice cream](images/Ice_cream.jpg) #### Jams ![Jams](images/Jams.jpg) #### Jerky ![Jerky](images/Jerky.jpg) #### Kiwi ![Kiwi](images/Kiwi.jpg) #### Lamb ![Lamb](images/Lamb.jpg) #### Lemons ![Lemons](images/Lemons.jpg) #### Lobster ![Lobster](images/Lobster.jpg) #### Mangoes ![Mangoes](images/Mangoes.jpg) #### Mayonnaise ![Mayonnaise](images/Mayonnaise.jpg) #### Melons ![Melons](images/Melons.jpg) #### Milk ![Milk](images/Milk.jpg) #### Mussels ![Mussels](images/Mussels.jpg) #### Mustard ![Mustard](images/Mustard.jpg) #### Nectarines ![Nectarines](images/Nectarines.jpg) #### Onions ![Onions](images/Onions.jpg) #### Oranges ![Oranges](images/Oranges.jpg) #### Peaches ![Peaches](images/Peaches.jpg) #### Peas ![Peas](images/Peas.jpg) #### Peppers ![Peppers](images/Peppers.jpg) #### Pineapple ![Pineapple](images/Pineapple.jpg) #### Pizza ![Pizza](images/Pizza.jpg) #### Plums ![Plums](images/Plums.jpg) #### Pork ![Pork](images/Pork.jpg) #### Potatoes ![Potatoes](images/Potatoes.jpg) #### Salad dressings ![Salad dressings](images/Salad_dressings.jpg) #### Salmon ![Salmon](images/Salmon.jpg) #### Shrimp ![Shrimp](images/Shrimp.jpg) #### Sour cream ![Sour cream](images/Sour_cream.jpg) #### Soy sauce ![Soy sauce](images/Soy_sauce.jpg) #### Spinach ![Spinach](images/Spinach.jpg) #### Squash ![Squash](images/Squash.jpg) #### Steak ![Steak](images/Steak.jpg) #### Sweet potatoes ![Sweet potatoes](images/Sweet_potatoes.jpg) #### Tilapia ![Tilapia](images/Tilapia.jpg) #### Tomatoes ![Tomatoes](images/Tomatoes.jpg) #### Tuna ![Tuna](images/Tuna.jpg) #### Turkey ![Turkey](images/Turkey.jpg) #### Venison ![Venison](images/Venison.jpg) #### Water bottles ![Water bottles](images/Water_bottles.jpg) #### Wine ![Wine](images/Wine.jpg) #### Yogurt ![Yogurt](images/Yogurt.jpg) #### Zucchini ![Zucchini](images/Zucchini.jpg)
Arpita/opus-mt-en-ro-finetuned-synthon-to-reactant
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - en license: other tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - art - artistic - diffusers inference: false --- For info: https://civitai.com/models/23521/anime-pastel-dream
Augustvember/WokkaBot6
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - coreml - stable-diffusion - text-to-image - not-for-all-eyes --- # Core ML Converted Model: - This model was converted to [Core ML for use on Apple Silicon devices](https://github.com/apple/ml-stable-diffusion). Conversion instructions can be found [here](https://github.com/godly-devotion/MochiDiffusion/wiki/How-to-convert-ckpt-or-safetensors-files-to-Core-ML).<br> - Provide the model to an app such as Mochi Diffusion [Github](https://github.com/godly-devotion/MochiDiffusion) - [Discord](https://discord.gg/x2kartzxGv) to generate images.<br> - `split_einsum` version is compatible with all compute unit options including Neural Engine.<br> - `original` version is only compatible with CPU & GPU option.<br> - Custom resolution versions are tagged accordingly.<br> - `vae` tagged files have a vae embedded into the model.<br> - Descriptions are posted as-is from original model source. Not all features and/or results may be available in CoreML format.<br> - This model was converted with `vae-encoder` for i2i. - Models that are 32 bit will have "fp32" in the filename. # Note: Some models do not have the [unet split into chunks](https://github.com/apple/ml-stable-diffusion#-converting-models-to-core-ml). # Project AIO: Source(s): [CivitAI](https://civitai.com/models/18428/project-aio) Project All In One (AIO) Please Red Description This model is a merge between all of my previously released models and, some unreleased models. Going forward, all future models and updates to existing models will also be added/updated on this model. Hands Hands are the Achilles heel of latent diffusion models (LDMs). This model is no exception. Due to the unorthodox methods used in the creation of this model. It often fails at generating good hands. As of right now, I have no fix for this. Given the dynamic nature of this model (being updated alongside my other models), there's a chance the issue solves itself in a later update. That being said, I also plan on dedicating time into researching possible cost-effective solutions to "the hand issue" for all LDMs. In the meantime, I encourage you to use the following two embeddings to help alleviate some of the deformed hand generations: Bad_prompt_version2 - https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt bad-hands-5 - https://huggingface.co/MortalSage/Strange_Dedication/blob/main/embeddings/bad-hands-5.pt NOTE: If using bad-hands-5 bare in mind that your generations will not match the sample images for this model. This is because I didn't use the bad-hands embedding when creating the sample images. How To Install Embeddings in Automatic1111 webui Place bad_prompt_version2.pt and/or bad-hands-5.pt inside your embeddings folder for webui. stable-diffusion-webui//embeddings//<place .pt embeddings here> Launch webui, under "Generate" you will see a little red button. Click it, now under the "textual inversions" tab you will see the embeddings you added. Click on them, they will automatically be added to the end of your negative prompt. To change the weight (strength) of the embedding; use attention/emphasis. For example, (bad_prompt_version2:0.8) Recommended Settings Clipskip: 1 ENSD: 31337 Sampler: DPM++ SDE Karras, 18 - 30 steps DPM++ 2M Karras, 30 - 60 steps Heun, 20 steps, Sigma Churn = 1 Euler, 20 - 70 steps, Sigma Churn = 1 These parameters are not strictly required, experiment around with other samplers and parameter values. You might find something that works better for you. Check out my other models (also the models used in the merge) WonderMix - https://civitai.com/models/15666/wondermix Refined - https://civitai.com/models/8392/refined Experience - https://civitai.com/models/5952/experience Elegance - https://civitai.com/models/5564/elegance Clarity - https://civitai.com/models/5062/clarity VisionGen - Realism Reborn -https://civitai.com/models/4834/visiongen-realism LoRA Pant Pull Down - https://civitai.com/models/11126/pant-pull-down-lora Questions or Feedback? Visit my thread on the Unstable Diffusion Discord Server Special thanks to rocp for the witch and French maid prompt used in the sample pictures, @nutrition for copious research/testing of various sampling methods/parameters, and to everyone in the Unstable Diffusion Discord community that make doing this a more fun and enjoyable experience. Hide
Augustvember/WokkaBot9
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 datasets: - Amalq/shared_TaskA language: - en --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Flan_t5_Large_Chat_Summary This model is a fine-tuned version of [google/flan-t5-large](https://huggingface.co/google/flan-t5-large) on the shared_TaskA dataset. ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Example Uses ```python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer_pre = AutoTokenizer.from_pretrained("Amalq/flan_t5_large_chat_summary") model_pre = AutoModelForSeq2SeqLM.from_pretrained("Amalq/flan_t5_large_chat_summary") ```
Augustvember/WokkaBotF
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - en license: other tags: - stable-diffusion - text-to-image - art - artistic - diffusers inference: false --- FOr info: https://civitai.com/models/23900/anylora
Augustvember/test
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1100.57 +/- 23.70 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Augustvember/wokka
[ "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: sanak/ML-Agents-Pyramids 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Ayta/Haha
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-cartpole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Ayu/Shiriro
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- datasets: - bertin-project/alpaca-spanish language: - es license: apache-2.0 --- <div style="text-align:center;width:350px;height:350px;"> <img src="https://huggingface.co/hackathon-somos-nlp-2023/salsapaca-native/resolve/main/Alpaca2.png" alt="SAlsapaca logo""> </div> # SAlsapaca: Spanish + Salsa + Alpaca-Native (WIP) ## Adapter Description This adapter was created with the [PEFT](https://github.com/huggingface/peft) library and allowed the base model [chavinlo/alpaca-native](https://huggingface.co/chavinlo/alpaca-native) to be fine-tuned on the [Spanish Alpaca Dataset](https://huggingface.co/datasets/bertin-project/alpaca-spanish) by using the method *LoRA*. ## How to use ```py import torch from peft import PeftModel, PeftConfig from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "hackathon-somos-nlp-2023/salsapaca-native" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=True, load_in_8bit=True, device_map='auto') tokenizer = AutoTokenizer.from_pretrained(peft_model_id) # Load the Lora model model = PeftModel.from_pretrained(model, peft_model_id) def gen_conversation(text): text = "<SC>instruction: " + text + "\n " batch = tokenizer(text, return_tensors='pt') with torch.cuda.amp.autocast(): output_tokens = model.generate(**batch, max_new_tokens=256, eos_token_id=50258, early_stopping = True, temperature=.9) print('\n\n', tokenizer.decode(output_tokens[0], skip_special_tokens=False)) text = "hola" gen_conversation(text) ``` ## Resources used Google Colab machine with the following specifications <div style="text-align:center;width:550px;height:550px;"> <img src="https://huggingface.co/hackathon-somos-nlp-2023/bertin-gpt-j-6B-es-finetuned-salpaca/resolve/main/resource.jpeg" alt="Resource logo"> </div> ## Citation ``` @misc {hackathon-somos-nlp-2023, author = { {Edison Bejarano, Leonardo Bolaños, Alberto Ceballos, Santiago Pineda, Nicolay Potes} }, title = { SAlsapaca }, year = 2023, url = { https://huggingface.co/hackathon-somos-nlp-2023/salsapaca-native } publisher = { Hugging Face } } ```
Ayumi/Jovana
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: cc-by-4.0 metrics: - bleu4 - meteor - rouge-l - bertscore - moverscore language: it datasets: - lmqg/qg_itquad pipeline_tag: text2text-generation tags: - question answering widget: - text: "question: Quale batterio ha il nome del paese che colpisce di più nel suo nome?, context: Il complesso M. tubercolosi (MTBC) comprende altri quattro micobatteri causa di tubercolosi: M. bovis, M. africanum, M. canetti e M. microti. M. africanum non è molto diffuso, ma è una causa significativa di tubercolosi in alcune parti dell' Africa. M. bovis era una volta una causa comune della tubercolosi, ma l' introduzione del latte pastorizzato ha quasi completamente eliminato questo problema di salute pubblica nei paesi sviluppati. M. canetti è raro e sembra essere limitato al Corno d' Africa, anche se alcuni casi sono stati osservati negli emigranti africani. M. microti è anche raro ed è visto quasi solo in persone immunodeficienti, anche se la sua prevalenza può essere significativamente sottovalutata." example_title: "Question Answering Example 1" model-index: - name: vocabtrimmer/mt5-small-trimmed-it-90000-itquad-qa results: - task: name: Text2text Generation type: text2text-generation dataset: name: lmqg/qg_itquad type: default args: default metrics: - name: BLEU4 (Question Answering) type: bleu4_question_answering value: 11.24 - name: ROUGE-L (Question Answering) type: rouge_l_question_answering value: 33.86 - name: METEOR (Question Answering) type: meteor_question_answering value: 29.1 - name: BERTScore (Question Answering) type: bertscore_question_answering value: 90.81 - name: MoverScore (Question Answering) type: moverscore_question_answering value: 76.04 - name: AnswerF1Score (Question Answering) type: answer_f1_score__question_answering value: 55.36 - name: AnswerExactMatch (Question Answering) type: answer_exact_match_question_answering value: 39.23 --- # Model Card of `vocabtrimmer/mt5-small-trimmed-it-90000-itquad-qa` This model is fine-tuned version of [vocabtrimmer/mt5-small-trimmed-it-90000](https://huggingface.co/vocabtrimmer/mt5-small-trimmed-it-90000) for question answering task on the [lmqg/qg_itquad](https://huggingface.co/datasets/lmqg/qg_itquad) (dataset_name: default) via [`lmqg`](https://github.com/asahi417/lm-question-generation). ### Overview - **Language model:** [vocabtrimmer/mt5-small-trimmed-it-90000](https://huggingface.co/vocabtrimmer/mt5-small-trimmed-it-90000) - **Language:** it - **Training data:** [lmqg/qg_itquad](https://huggingface.co/datasets/lmqg/qg_itquad) (default) - **Online Demo:** [https://autoqg.net/](https://autoqg.net/) - **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation) - **Paper:** [https://arxiv.org/abs/2210.03992](https://arxiv.org/abs/2210.03992) ### Usage - With [`lmqg`](https://github.com/asahi417/lm-question-generation#lmqg-language-model-for-question-generation-) ```python from lmqg import TransformersQG # initialize model model = TransformersQG(language="it", model="vocabtrimmer/mt5-small-trimmed-it-90000-itquad-qa") # model prediction answers = model.answer_q(list_question="Quale batterio ha il nome del paese che colpisce di più nel suo nome?", list_context=" Il complesso M. tubercolosi (MTBC) comprende altri quattro micobatteri causa di tubercolosi: M. bovis, M. africanum, M. canetti e M. microti. M. africanum non è molto diffuso, ma è una causa significativa di tubercolosi in alcune parti dell' Africa. M. bovis era una volta una causa comune della tubercolosi, ma l' introduzione del latte pastorizzato ha quasi completamente eliminato questo problema di salute pubblica nei paesi sviluppati. M. canetti è raro e sembra essere limitato al Corno d' Africa, anche se alcuni casi sono stati osservati negli emigranti africani. M. microti è anche raro ed è visto quasi solo in persone immunodeficienti, anche se la sua prevalenza può essere significativamente sottovalutata.") ``` - With `transformers` ```python from transformers import pipeline pipe = pipeline("text2text-generation", "vocabtrimmer/mt5-small-trimmed-it-90000-itquad-qa") output = pipe("question: Quale batterio ha il nome del paese che colpisce di più nel suo nome?, context: Il complesso M. tubercolosi (MTBC) comprende altri quattro micobatteri causa di tubercolosi: M. bovis, M. africanum, M. canetti e M. microti. M. africanum non è molto diffuso, ma è una causa significativa di tubercolosi in alcune parti dell' Africa. M. bovis era una volta una causa comune della tubercolosi, ma l' introduzione del latte pastorizzato ha quasi completamente eliminato questo problema di salute pubblica nei paesi sviluppati. M. canetti è raro e sembra essere limitato al Corno d' Africa, anche se alcuni casi sono stati osservati negli emigranti africani. M. microti è anche raro ed è visto quasi solo in persone immunodeficienti, anche se la sua prevalenza può essere significativamente sottovalutata.") ``` ## Evaluation - ***Metric (Question Answering)***: [raw metric file](https://huggingface.co/vocabtrimmer/mt5-small-trimmed-it-90000-itquad-qa/raw/main/eval/metric.first.answer.paragraph_question.answer.lmqg_qg_itquad.default.json) | | Score | Type | Dataset | |:-----------------|--------:|:--------|:-----------------------------------------------------------------| | AnswerExactMatch | 39.23 | default | [lmqg/qg_itquad](https://huggingface.co/datasets/lmqg/qg_itquad) | | AnswerF1Score | 55.36 | default | [lmqg/qg_itquad](https://huggingface.co/datasets/lmqg/qg_itquad) | | BERTScore | 90.81 | default | [lmqg/qg_itquad](https://huggingface.co/datasets/lmqg/qg_itquad) | | Bleu_1 | 22.6 | default | [lmqg/qg_itquad](https://huggingface.co/datasets/lmqg/qg_itquad) | | Bleu_2 | 17.52 | default | [lmqg/qg_itquad](https://huggingface.co/datasets/lmqg/qg_itquad) | | Bleu_3 | 14.04 | default | [lmqg/qg_itquad](https://huggingface.co/datasets/lmqg/qg_itquad) | | Bleu_4 | 11.24 | default | [lmqg/qg_itquad](https://huggingface.co/datasets/lmqg/qg_itquad) | | METEOR | 29.1 | default | [lmqg/qg_itquad](https://huggingface.co/datasets/lmqg/qg_itquad) | | MoverScore | 76.04 | default | [lmqg/qg_itquad](https://huggingface.co/datasets/lmqg/qg_itquad) | | ROUGE_L | 33.86 | default | [lmqg/qg_itquad](https://huggingface.co/datasets/lmqg/qg_itquad) | ## Training hyperparameters The following hyperparameters were used during fine-tuning: - dataset_path: lmqg/qg_itquad - dataset_name: default - input_types: ['paragraph_question'] - output_types: ['answer'] - prefix_types: None - model: vocabtrimmer/mt5-small-trimmed-it-90000 - max_length: 512 - max_length_output: 32 - epoch: 14 - batch: 32 - lr: 0.0005 - fp16: False - random_seed: 1 - gradient_accumulation_steps: 2 - label_smoothing: 0.15 The full configuration can be found at [fine-tuning config file](https://huggingface.co/vocabtrimmer/mt5-small-trimmed-it-90000-itquad-qa/raw/main/trainer_config.json). ## Citation ``` @inproceedings{ushio-etal-2022-generative, title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration", author = "Ushio, Asahi and Alva-Manchego, Fernando and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", month = dec, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
AyushPJ/ai-club-inductions-21-nlp-ALBERT
[ "pytorch", "albert", "question-answering", "transformers", "generated_from_trainer", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: mit language: - th pipeline_tag: text-generation tags: - instruction-finetuning library_name: adapter-transformers datasets: - iapp_wiki_qa_squad - tatsu-lab/alpaca - wongnai_reviews - wisesight_sentiment --- # 🐃🇹🇭 Buffala-LoRa-TH Buffala-LoRA is a 7B-parameter LLaMA model finetuned to follow instructions. It is trained on the Stanford Alpaca (TH Translated), Wisesignt, WikiTH, Pantip and IAppQ&A dataset and makes use of the Huggingface LLaMA implementation. For more information, please visit [the project's website](https://github.com/tloen/alpaca-lora). ## Issues and what next? - The model still lacks a significant amount of world knowledge, so it is necessary to fine-tune it on larger Thai datasets > Next version: CCNet,OSCAR,thWiki - Currently, there is no translation prompt. We plan to fine-tune the model on the SCB Thai-English dataset soon. - The model works well with the LangChain Search agent (Serpapi), which serves as a hotfix for world knowledge. > Plan for Spaces with search chain demo - Lacked of chat capabilities, waiting for LangChain implementation. - Colab demo. - Github for datasets and training notebook. ## How to use ```python import torch from peft import PeftModel from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer device = "cuda" tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf") model = LlamaForCausalLM.from_pretrained( "decapoda-research/llama-7b-hf", load_in_8bit=True, torch_dtype=torch.float16, device_map="auto", ) model = PeftModel.from_pretrained( model, "Thaweewat/thai-buffala-lora-7b-v0-1", torch_dtype=torch.float16, ) def generate_prompt(instruction, input=None): if input: return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Input: {input + get_list_and_snippet(instruction)} ### Response:""" else: return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Input: {get_list_and_snippet(instruction)} ### Response:""" if not LOAD_8BIT: model.half() model.eval() if torch.__version__ >= "2" and sys.platform != "win32": model = torch.compile(model) def evaluate( instruction, input=None, temperature=0.1, top_p=0.75, top_k=40, num_beams=4, max_new_tokens=128, **kwargs, ): prompt = generate_prompt(instruction, input) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].to(device) generation_config = GenerationConfig( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, **kwargs, ) with torch.no_grad(): generation_output = model.generate( input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, output_scores=True, max_new_tokens=max_new_tokens, ) s = generation_output.sequences[0] output = tokenizer.decode(s) return output.split("### Response:")[1].strip() evaluate(instruction = "จงแก้สมการต่อไปนี้ X เท่ากับเท่าไหร่", input="X+Y=15 and Y=7") """ X = 8 """
Azaghast/GPT2-SCP-ContainmentProcedures
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: - en license: creativeml-openrail-m tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers inference: true duplicated_from: andite/anything-v4.0 --- Fantasy.ai is the official and exclusive hosted AI generation platform that holds a commercial use license for Anything V4.0, you can use their service at https://Fantasy.ai/ Please report any unauthorized commercial use. ----------------- Try out my new model! - [Pastel Mix || Stylized Anime Model](https://huggingface.co/andite/pastel-mix). Thanks. I also uploaded it in CivitAI! https://civitai.com/models/5414/pastel-mix-stylized-anime-model I'd appreciate the ratings, thank you! Yes, it's a shameless plug. Examples: ![](https://huggingface.co/andite/Pastel-Mix/resolve/main/example-images/grid-0018.png) ![](https://huggingface.co/andite/pastel-mix/resolve/main/example-images/grid-reimu.png) ![](https://huggingface.co/andite/pastel-mix/resolve/main/example-images/grid-0043.png) ------- <font color="grey">Thanks to [Linaqruf](https://huggingface.co/Linaqruf) for letting me borrow his model card for reference. # Anything V4 Welcome to Anything V4 - a latent diffusion model for weebs. The newest version of Anything. This model is intended to produce high-quality, highly detailed anime style with just a few prompts. Like other anime-style Stable Diffusion models, it also supports danbooru tags to generate images. e.g. **_1girl, white hair, golden eyes, beautiful eyes, detail, flower meadow, cumulonimbus clouds, lighting, detailed sky, garden_** I think the V4.5 version better though, it's in this repo. feel free 2 try it. ## Yes, this model has [AbyssOrangeMix2](https://huggingface.co/WarriorMama777/OrangeMixs) in it. coz its a very good model. check it out luls ;) # Gradio We support a [Gradio](https://github.com/gradio-app/gradio) Web UI to run anything-v4.0: [![Open In Spaces](https://camo.githubusercontent.com/00380c35e60d6b04be65d3d94a58332be5cc93779f630bcdfc18ab9a3a7d3388/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f25463025394625413425393725323048756767696e67253230466163652d5370616365732d626c7565)](https://huggingface.co/spaces/akhaliq/anything-v4.0) ## 🧨 Diffusers This model can be used just like any other Stable Diffusion model. For more information, please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion). You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX](). ```python from diffusers import StableDiffusionPipeline import torch model_id = "andite/anything-v4.0" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "hatsune_miku" image = pipe(prompt).images[0] image.save("./hatsune_miku.png") ``` ## Examples Below are some examples of images generated using this model: **Anime Girl:** ![Anime Girl](https://huggingface.co/andite/anything-v4.0/resolve/main/example-1.png) ``` masterpiece, best quality, 1girl, white hair, medium hair, cat ears, closed eyes, looking at viewer, :3, cute, scarf, jacket, outdoors, streets Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7 ``` **Anime Boy:** ![Anime Boy](https://huggingface.co/andite/anything-v4.0/resolve/main/example-2.png) ``` 1boy, bishounen, casual, indoors, sitting, coffee shop, bokeh Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 7 ``` **Scenery:** ![Scenery](https://huggingface.co/andite/anything-v4.0/resolve/main/example-4.png) ``` scenery, village, outdoors, sky, clouds Steps: 50, Sampler: DPM++ 2S a Karras, CFG scale: 7 ``` ## License This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license) ## Big Thanks to - [Linaqruf](https://huggingface.co/Linaqruf). [NoCrypt](https://huggingface.co/NoCrypt), and Fannovel16#9022 for helping me out alot regarding my inquiries and concern about models and other stuff.
Azaghast/GPT2-SCP-Descriptions
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: - en license: other tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - art - artistic - diffusers inference: false duplicated_from: Lykon/DreamShaper --- # Dream Shaper ## Official Repository Read more about this model here: https://civitai.com/models/4384/dreamshaper Also please support by giving 5 stars and a heart, which will notify new updates. Also consider supporting me on Patreon or ByuMeACoffee - https://www.patreon.com/Lykon275 - https://www.buymeacoffee.com/lykon You can run this model on: - https://huggingface.co/spaces/Lykon/DreamShaper-webui - https://sinkin.ai/m/4zdwGOB Be sure to check out NeverEnding Dream, which is another semi-realistic model which aims at being fully compatible with booru tag loras and prompts - https://huggingface.co/Lykon/NeverEnding-Dream Some sample output: ![sample 1](https://huggingface.co/Lykon/DreamShaper/resolve/main/1.png) ![sample 2](https://huggingface.co/Lykon/DreamShaper/resolve/main/2.png) ![sample 3](https://huggingface.co/Lykon/DreamShaper/resolve/main/3.png) ![sample 4](https://huggingface.co/Lykon/DreamShaper/resolve/main/4.png) ![sample 5](https://huggingface.co/Lykon/DreamShaper/resolve/main/5.png)
Azaghast/GPT2-SCP-Miscellaneous
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: - en license: other tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - photorealistic - photoreal - diffusers inference: false duplicated_from: dreamlike-art/dreamlike-photoreal-2.0 --- # Dreamlike Photoreal 2.0 is a photorealistic model based on Stable Diffusion 1.5, made by [dreamlike.art](https://dreamlike.art/). # If you want to use dreamlike models on your website/app/etc., check the license at the bottom first! Warning: This model is horny! Add "nude, naked" to the negative prompt if want to avoid NSFW. You can add **photo** to your prompt to make your gens look more photorealistic. Non-square aspect ratios work better for some prompts. If you want a portrait photo, try using a vertical aspect ratio. If you want a landscape photo, try using a horizontal aspect ratio. This model was trained on 768x768px images, so use 768x768px, 640x896px, 896x640px, etc. It also works pretty good with higher resolutions such as 768x1024px or 1024x768px. ### Examples <img src="https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/resolve/main/preview1.jpg" style="max-width: 800px;" width="100%"/> <img src="https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/resolve/main/preview2.jpg" style="max-width: 800px;" width="100%"/> <img src="https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/resolve/main/preview3.jpg" style="max-width: 800px;" width="100%"/> ### dreamlike.art You can use this model for free on [dreamlike.art](https://dreamlike.art/)! <img src="https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/resolve/main/dreamlike.jpg" style="max-width: 1000px;" width="100%"/> ### CKPT [Download dreamlike-photoreal-2.0.ckpt (2.13GB)](https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/resolve/main/dreamlike-photoreal-2.0.ckpt) ### Safetensors [Download dreamlike-photoreal-2.0.safetensors (2.13GB)](https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/resolve/main/dreamlike-photoreal-2.0.safetensors) ### 🧨 Diffusers This model can be used just like any other Stable Diffusion model. For more information, please have a look at the [Stable Diffusion Pipeline](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion). ```python from diffusers import StableDiffusionPipeline import torch model_id = "dreamlike-art/dreamlike-photoreal-2.0" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "photo, a church in the middle of a field of crops, bright cinematic lighting, gopro, fisheye lens" image = pipe(prompt).images[0] image.save("./result.jpg") ``` <img src="https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/resolve/main/church.jpg" style="max-width: 640px;" width="100%"/> # License This model is licesed under a **modified** CreativeML OpenRAIL-M license. - **You are not allowed to host, finetune, or do inference with the model or its derivatives on websites/apps/etc. If you want to, please email us at [email protected]** - **You are free to host the model card and files (Without any actual inference or finetuning) on both commercial and non-commercial websites/apps/etc. Please state the full model name (Dreamlike Photoreal 2.0) and include the license as well as a link to the model card (https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0)** - **You are free to use the outputs (images) of the model for commercial purposes in teams of 10 or less** - You can't use the model to deliberately produce nor share illegal or harmful outputs or content - The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license - You may re-distribute the weights. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the **modified** CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here: https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0/blob/main/LICENSE.md
Azura/data
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- thumbnail: https://imgur.com/6ztDBPR.png language: - en tags: - stable-diffusion - text-to-image - image-to-image - diffusers license: creativeml-openrail-m inference: true duplicated_from: eimiss/EimisAnimeDiffusion_1.0v --- # Diffusion model This model is trained with high quality and detailed anime images. ## Gradio We support a [Gradio](https://github.com/gradio-app/gradio) Web UI run EimisAnimeDiffusion_1.0v: [![Open In Spaces](https://camo.githubusercontent.com/00380c35e60d6b04be65d3d94a58332be5cc93779f630bcdfc18ab9a3a7d3388/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f25463025394625413425393725323048756767696e67253230466163652d5370616365732d626c7565)](https://huggingface.co/spaces/akhaliq/EimisAnimeDiffusion_1.0v) # Sample generations This model works well on anime and landscape generations.<br> Anime:<br> There are some sample generations:<br> ``` Positive:a girl, Phoenix girl, fluffy hair, war, a hell on earth, Beautiful and detailed explosion, Cold machine, Fire in eyes, burning, Metal texture, Exquisite cloth, Metal carving, volume, best quality, normal hands, Metal details, Metal scratch, Metal defects, masterpiece, best quality, best quality, illustration, highres, masterpiece, contour deepening, illustration,(beautiful detailed girl),beautiful detailed glow Negative:lowres, bad anatomy, ((bad hands)), text, error, ((missing fingers)), cropped, jpeg artifacts, worst quality, low quality, signature, watermark, blurry, deformed, extra ears, deformed, disfigured, mutation, censored, ((multiple_girls)) Steps: 20, Sampler: DPM++ 2S a, CFG scale: 8, Seed: 4186044705/4186044707, Size: 704x896 ``` <img src=https://imgur.com/2U295w3.png width=75% height=75%> <img src=https://imgur.com/2jtF376.png width=75% height=75%> ``` Positive:(1girl), cute, walking in the park, (night), full moon, north star, blue shirt, red skirt, detailed shirt, jewelry, autumn, dark blue hair, shirt hair, (magic:1.5), beautiful blue eyes Negative: lowres, bad anatomy, ((bad hands)), text, error, ((missing fingers)), cropped, jpeg artifacts, worst quality, low quality, signature, watermark, blurry, deformed, extra ears, deformed, disfigured, mutation, censored, ((multiple_girls)) Steps: 35, Sampler: Euler a, CFG scale: 9, Seed: 296195494, Size: 768x960 ``` <img src=https://imgur.com/gudKxQe.png width=75% height=75%> ``` Positive:night , ((1 girl)), alone, masterpiece, 8k wallpaper, highres, absurdres, high quality background, short hair, black hair, multicolor hair, beautiful frozen village, (full bright moon), blue dress, detailed dress, jewelry dress, (magic:1.2), blue fire, blue eyes, glowing eyes, fire, ice goddess, (blue detailed beautiful crown), electricity, blue electricity, blue light particles Negative: lowres, bad anatomy, ((bad hands)), text, error, ((missing fingers)), cropped, jpeg artifacts, worst quality, low quality, signature, watermark, blurry, deformed, extra ears, deformed, disfigured, mutation, censored, ((multiple_girls)) Steps: 20, Sampler: DPM++ 2S a Karras, CFG scale: 9, Seed: 2118767319, Size: 768x832 ``` <img src=https://imgur.com/lJL4CJL.png width=75% height=75%> Want to generate some amazing backgrounds? No problem: ``` Positive: above clouds, mountains, (night), full moon, castle, huge forest, forest between mountains, beautiful, masterpiece Negative: lowres, bad anatomy, ((bad hands)), text, error, ((missing fingers)), cropped, jpeg artifacts, worst quality, low quality, signature, watermark, blurry, deformed, extra ears, deformed, disfigured, mutation, censored, ((multiple_girls)) Steps: 20, Sampler: DPM++ 2S a Karras, CFG scale: 9, Seed: 83644543, Size: 896x640 ``` <img src=https://imgur.com/XfxAx0S.png width=75% height=75%> ## Disclaimer Some prompts might not work perfectly (mainly colors), so add some more prompts for it to work, or try these -->(). Usually they help. Also works well with img2img if you want to add detail. ## License This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
Azuris/DialoGPT-medium-senorita
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- language: - en thumbnail: >- https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/thumbnail.png tags: - cyberpunk - anime - waifu-diffusion - stable-diffusion - aiart - text-to-image license: creativeml-openrail-m duplicated_from: DGSpitzer/Cyberpunk-Anime-Diffusion --- <center><img src="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/5.jpg" width="512" height="512"/></center> ![visitors](https://visitor-badge.glitch.me/badge?page_id=Cyberpunk_Anime_Diffusion) # Cyberpunk Anime Diffusion An AI model that generates cyberpunk anime characters!~ Based of a finetuned Waifu Diffusion V1.3 Model with Stable Diffusion V1.5 New Vae, training in Dreambooth by [DGSpitzer](https://www.youtube.com/channel/UCzzsYBF4qwtMwJaPJZ5SuPg) ### 🧨 Diffusers This repo contains both .ckpt and Diffuser model files. It's compatible to be used as any Stable Diffusion model, using standard [Stable Diffusion Pipelines](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion). You can convert this model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX](https://huggingface.co/blog/stable_diffusion_jax). ```python example for loading the Diffuser #!pip install diffusers transformers scipy torch from diffusers import StableDiffusionPipeline import torch model_id = "DGSpitzer/Cyberpunk-Anime-Diffusion" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "a beautiful perfect face girl in dgs illustration style, Anime fine details portrait of school girl in front of modern tokyo city landscape on the background deep bokeh, anime masterpiece, 8k, sharp high quality anime" image = pipe(prompt).images[0] image.save("./cyberpunk_girl.png") ``` # Online Demo You can try the Online Web UI demo build with [Gradio](https://github.com/gradio-app/gradio), or use Colab Notebook at here: *My Online Space Demo* [![Open In Spaces](https://camo.githubusercontent.com/00380c35e60d6b04be65d3d94a58332be5cc93779f630bcdfc18ab9a3a7d3388/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f25463025394625413425393725323048756767696e67253230466163652d5370616365732d626c7565)](https://huggingface.co/spaces/DGSpitzer/DGS-Diffusion-Space) *Finetuned Diffusion WebUI Demo by anzorq* [![Use Finetuned_Diffusion WebUI](https://camo.githubusercontent.com/00380c35e60d6b04be65d3d94a58332be5cc93779f630bcdfc18ab9a3a7d3388/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f25463025394625413425393725323048756767696e67253230466163652d5370616365732d626c7565)](https://huggingface.co/spaces/anzorq/finetuned_diffusion) *Colab Notebook* [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/HelixNGC7293/cyberpunk-anime-diffusion/blob/main/cyberpunk_anime_diffusion.ipynb)[![GitHub](https://badgen.net/badge/icon/Github?icon=github&label)](https://github.com/HelixNGC7293/cyberpunk-anime-diffusion) *Buy me a coffee if you like this project ;P ♥* [![Buy me a coffee](https://badgen.net/badge/icon/Buy%20Me%20A%20Coffee?icon=buymeacoffee&label)](https://www.buymeacoffee.com/dgspitzer) <center><img src="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/1.jpg" width="512" height="512"/></center> # **👇Model👇** AI Model Weights available at huggingface: https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion <center><img src="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/2.jpg" width="512" height="512"/></center> # Usage After model loaded, use keyword **dgs** in your prompt, with **illustration style** to get even better results. For sampler, use **Euler A** for the best result (**DDIM** kinda works too), CFG Scale 7, steps 20 should be fine **Example 1:** ``` portrait of a girl in dgs illustration style, Anime girl, female soldier working in a cyberpunk city, cleavage, ((perfect femine face)), intricate, 8k, highly detailed, shy, digital painting, intense, sharp focus ``` For cyber robot male character, you can add **muscular male** to improve the output. **Example 2:** ``` a photo of muscular beard soldier male in dgs illustration style, half-body, holding robot arms, strong chest ``` **Example 3 (with Stable Diffusion WebUI):** If using [AUTOMATIC1111's Stable Diffusion WebUI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) You can simply use this as **prompt** with **Euler A** Sampler, CFG Scale 7, steps 20, 704 x 704px output res: ``` an anime girl in dgs illustration style ``` And set the **negative prompt** as this to get cleaner face: ``` out of focus, scary, creepy, evil, disfigured, missing limbs, ugly, gross, missing fingers ``` This will give you the exactly same style as the sample images above. <center><img src="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/ReadmeAddon.jpg" width="256" height="353"/></center> --- **NOTE: usage of this model implies accpetance of stable diffusion's [CreativeML Open RAIL-M license](LICENSE)** --- <center><img src="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/4.jpg" width="700" height="700"/></center> <center><img src="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/img/6.jpg" width="700" height="700"/></center>
BSC-LT/roberta-base-bne-capitel-ner
[ "pytorch", "roberta", "token-classification", "es", "dataset:bne", "dataset:capitel", "arxiv:1907.11692", "arxiv:2107.07253", "transformers", "national library of spain", "spanish", "bne", "capitel", "ner", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: artistic-2.0 language: - es - en - ja tags: - legal - biology - music - art - climate - medical ---
BSC-LT/roberta-base-ca
[ "pytorch", "roberta", "fill-mask", "ca", "transformers", "masked-lm", "BERTa", "catalan", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
18
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 567.50 +/- 159.83 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga sivan22 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga sivan22 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga sivan22 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 10000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
BSC-LT/roberta-large-bne-capitel-ner
[ "pytorch", "roberta", "token-classification", "es", "dataset:bne", "dataset:capitel", "arxiv:1907.11692", "arxiv:2107.07253", "transformers", "national library of spain", "spanish", "bne", "capitel", "ner", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
#TODO card. Mix of (GPT-J-6B-Shinen + PPO_HH_GPT-J) + Pygmalion-6b V8P4 At a ratio of GPT-J-6B-Shinen - 20% PPO_HH_GPT-J - 20% Pygmalion-6b V8P4 - 60%
BaptisteDoyen/camembert-base-xnli
[ "pytorch", "tf", "camembert", "text-classification", "fr", "dataset:xnli", "transformers", "zero-shot-classification", "xnli", "nli", "license:mit", "has_space" ]
zero-shot-classification
{ "architectures": [ "CamembertForSequenceClassification" ], "model_type": "camembert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
405,474
null
--- library_name: rl-algo-impls tags: - MicrortsRandomEnemyShapedReward3-v1 - ppo - deep-reinforcement-learning - reinforcement-learning model-index: - name: ppo results: - metrics: - type: mean_reward value: 5.29 +/- 0.38 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: MicrortsRandomEnemyShapedReward3-v1 type: MicrortsRandomEnemyShapedReward3-v1 --- # **PPO** Agent playing **MicrortsRandomEnemyShapedReward3-v1** This is a trained model of a **PPO** agent playing **MicrortsRandomEnemyShapedReward3-v1** using the [/sgoodfriend/rl-algo-impls](https://github.com/sgoodfriend/rl-algo-impls) repo. All models trained at this commit can be found at https://api.wandb.ai/links/sgoodfriend/z3kioih3. ## Training Results This model was trained from 3 trainings of **PPO** agents using different initial seeds. These agents were trained by checking out [fb34ab8](https://github.com/sgoodfriend/rl-algo-impls/tree/fb34ab86707f5e2db85e821ff7dbdc624072d640). The best and last models were kept from each training. This submission has loaded the best models from each training, reevaluates them, and selects the best model from these latest evaluations (mean - std). | algo | env | seed | reward_mean | reward_std | eval_episodes | best | wandb_url | |:-------|:------------------------------------|-------:|--------------:|-------------:|----------------:|:-------|:-----------------------------------------------------------------------------| | ppo | MicrortsRandomEnemyShapedReward3-v1 | 1 | 5.11875 | 0.304094 | 16 | | [wandb](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/runs/4xb7o6x6) | | ppo | MicrortsRandomEnemyShapedReward3-v1 | 2 | 1.57281 | 9.87922 | 16 | | [wandb](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/runs/nyzpkp1k) | | ppo | MicrortsRandomEnemyShapedReward3-v1 | 3 | 5.29124 | 0.381996 | 16 | * | [wandb](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/runs/71qt3jb7) | ### Prerequisites: Weights & Biases (WandB) Training and benchmarking assumes you have a Weights & Biases project to upload runs to. By default training goes to a rl-algo-impls project while benchmarks go to rl-algo-impls-benchmarks. During training and benchmarking runs, videos of the best models and the model weights are uploaded to WandB. Before doing anything below, you'll need to create a wandb account and run `wandb login`. ## Usage /sgoodfriend/rl-algo-impls: https://github.com/sgoodfriend/rl-algo-impls Note: While the model state dictionary and hyperaparameters are saved, the latest implementation could be sufficiently different to not be able to reproduce similar results. You might need to checkout the commit the agent was trained on: [fb34ab8](https://github.com/sgoodfriend/rl-algo-impls/tree/fb34ab86707f5e2db85e821ff7dbdc624072d640). ``` # Downloads the model, sets hyperparameters, and runs agent for 3 episodes python enjoy.py --wandb-run-path=sgoodfriend/rl-algo-impls-benchmarks/71qt3jb7 ``` Setup hasn't been completely worked out yet, so you might be best served by using Google Colab starting from the [colab_enjoy.ipynb](https://github.com/sgoodfriend/rl-algo-impls/blob/main/colab_enjoy.ipynb) notebook. ## Training If you want the highest chance to reproduce these results, you'll want to checkout the commit the agent was trained on: [fb34ab8](https://github.com/sgoodfriend/rl-algo-impls/tree/fb34ab86707f5e2db85e821ff7dbdc624072d640). While training is deterministic, different hardware will give different results. ``` python train.py --algo ppo --env MicrortsRandomEnemyShapedReward3-v1 --seed 3 ``` Setup hasn't been completely worked out yet, so you might be best served by using Google Colab starting from the [colab_train.ipynb](https://github.com/sgoodfriend/rl-algo-impls/blob/main/colab_train.ipynb) notebook. ## Benchmarking (with Lambda Labs instance) This and other models from https://api.wandb.ai/links/sgoodfriend/z3kioih3 were generated by running a script on a Lambda Labs instance. In a Lambda Labs instance terminal: ``` git clone [email protected]:sgoodfriend/rl-algo-impls.git cd rl-algo-impls bash ./lambda_labs/setup.sh wandb login bash ./lambda_labs/benchmark.sh [-a {"ppo a2c dqn vpg"}] [-e ENVS] [-j {6}] [-p {rl-algo-impls-benchmarks}] [-s {"1 2 3"}] ``` ### Alternative: Google Colab Pro+ As an alternative, [colab_benchmark.ipynb](https://github.com/sgoodfriend/rl-algo-impls/tree/main/benchmarks#:~:text=colab_benchmark.ipynb), can be used. However, this requires a Google Colab Pro+ subscription and running across 4 separate instances because otherwise running all jobs will exceed the 24-hour limit. ## Hyperparameters This isn't exactly the format of hyperparams in hyperparams/ppo.yml, but instead the Wandb Run Config. However, it's very close and has some additional data: ``` algo: ppo algo_hyperparams: batch_size: 256 clip_range: 0.1 clip_range_decay: none clip_range_vf: 0.1 ent_coef: 0.01 learning_rate: 0.00025 learning_rate_decay: linear n_epochs: 4 n_steps: 128 vf_coef: 0.5 device: auto env: MicrortsRandomEnemyShapedReward3-v1-NoMask env_hyperparams: mask_actions: false n_envs: 8 vec_env_class: sync env_id: MicrortsRandomEnemyShapedReward3-v1 eval_params: deterministic: false n_timesteps: 2000000 policy_hyperparams: activation_fn: relu cnn_feature_dim: 128 cnn_style: microrts seed: 3 use_deterministic_algorithms: true wandb_entity: null wandb_group: null wandb_project_name: rl-algo-impls-benchmarks wandb_tags: - benchmark_fb34ab8 - host_150-230-34-91 ```
Baybars/wav2vec2-xls-r-300m-cv8-turkish
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "tr", "dataset:common_voice", "transformers", "common_voice", "generated_from_trainer", "hf-asr-leaderboard", "robust-speech-event", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2023-03-23T07:02:06Z
--- tags: - conversational --- # Kurisu DiabloGPT Model
BeIR/query-gen-msmarco-t5-base-v1
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
1,816
null
## Alpaca Scrolls This Lora was trained using a combination of ElderScrolls books and the alpaca dataset. The ElderScrolls books were reformated to fit with the alpaca formatting so it should work for anthing designed for Alpaca although it has an understanding of the Elder Scrolls Universe
BeIR/query-gen-msmarco-t5-large-v1
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
1,225
null
Access to model MKP99/autotrain-turbine-chinese-43066109786 is restricted and you are not in the authorized list. Visit https://huggingface.co/MKP99/autotrain-turbine-chinese-43066109786 to ask for access.
BeIR/sparta-msmarco-distilbert-base-v1
[ "pytorch", "distilbert", "feature-extraction", "arxiv:2009.13013", "arxiv:2104.08663", "transformers" ]
feature-extraction
{ "architectures": [ "DistilBertModel" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
106
2023-03-23T07:08:49Z
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1495.90 +/- 315.48 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Benicio/t5-small-finetuned-en-to-ru
[ "pytorch", "tensorboard", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
50
2023-03-23T07:35:39Z
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1627139892135489536/8AugtGFn_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Minx Marple</div> <div style="text-align: center; font-size: 14px;">@minxmarple</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Minx Marple. | Data | Minx Marple | | --- | --- | | Tweets downloaded | 3227 | | Retweets | 159 | | Short tweets | 470 | | Tweets kept | 2598 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/s328dc4u/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @minxmarple's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/p8idc6q8) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/p8idc6q8/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/minxmarple') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
BertChristiaens/EmojiPredictor
[ "pytorch", "distilbert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('Fgenerberry/sd-class-butterflies-32') image = pipeline ().images [0] image
Bharathdamu/wav2vec2-large-xls-r-300m-hindi3-colab
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-23T07:53:36Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: my_awesome_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.8609 - Accuracy: 0.5706 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.013 | 1.0 | 2190 | 1.9610 | 0.5562 | | 1.7891 | 2.0 | 4380 | 1.8609 | 0.5706 | ### Framework versions - Transformers 4.26.1 - Pytorch 2.1.0.dev20230319 - Datasets 2.10.1 - Tokenizers 0.13.2
Bhuvana/t5-base-spellchecker
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
93
null
Access to model RuiZhang4041/baseline_bart_spin is restricted and you are not in the authorized list. Visit https://huggingface.co/RuiZhang4041/baseline_bart_spin to ask for access.
BigSalmon/Flowberta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
2023-03-23T08:14:57Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### lcbanner Dreambooth model trained by nan2 with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept: ![0](https://huggingface.co/nan2/lcbanner/resolve/main/sample_images/licai_(12).png) ![1](https://huggingface.co/nan2/lcbanner/resolve/main/sample_images/licai_(17).png) ![2](https://huggingface.co/nan2/lcbanner/resolve/main/sample_images/licai_(14).png) ![3](https://huggingface.co/nan2/lcbanner/resolve/main/sample_images/licai_(19).png) ![4](https://huggingface.co/nan2/lcbanner/resolve/main/sample_images/licai_(22).png) ![5](https://huggingface.co/nan2/lcbanner/resolve/main/sample_images/licai_(15).png) ![6](https://huggingface.co/nan2/lcbanner/resolve/main/sample_images/licai_(10).png) ![7](https://huggingface.co/nan2/lcbanner/resolve/main/sample_images/licai_(13).png) ![8](https://huggingface.co/nan2/lcbanner/resolve/main/sample_images/licai_(11).png) ![9](https://huggingface.co/nan2/lcbanner/resolve/main/sample_images/licai_(21).png) ![10](https://huggingface.co/nan2/lcbanner/resolve/main/sample_images/licai_(18).png) ![11](https://huggingface.co/nan2/lcbanner/resolve/main/sample_images/licai_(16).png) ![12](https://huggingface.co/nan2/lcbanner/resolve/main/sample_images/licai_(20).png)
BigSalmon/MrLincoln5
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
2023-03-23T09:47:49Z
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1580630744292024321/W25DMwS__400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Hebjaaa</div> <div style="text-align: center; font-size: 14px;">@hebja_</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Hebjaaa. | Data | Hebjaaa | | --- | --- | | Tweets downloaded | 499 | | Retweets | 90 | | Short tweets | 126 | | Tweets kept | 283 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/ddopgfxf/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @hebja_'s tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/vrbm2ocg) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/vrbm2ocg/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/hebja_') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
BigSalmon/MrLincoln6
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 50 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 50, "warmup_steps": 5, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: XLMRobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
BigSalmon/MrLincoln7
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-23T09:48:57Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="MohammedEltoum/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
BigSalmon/Points2
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: cc-by-sa-4.0 language: - ru - qm pipeline_tag: translation library_name: transformers tags: - qarachay-malqar --- Authors: Bogdan Tewnalany, Ali Berberov Github: https://github.com/TBSj/Qarachay_Malqar_translator_python As a base we took mbart-50 model and trained it on 27235 parallel sentences from russian to Qarachay-Malqar language. It is not enough for good prediction, nevertheless, there is some result. Nowdays, we are collecting more sentences to improve our result. And, I hope, it gives us opportunity to not use pre-trained models and realize translator on R instead of python. Dataset: https://huggingface.co/datasets/TSjB/qm_ru_parallel Where to use: https://huggingface.co/spaces/TSjB/QM_RU_translator https://tsjb-qm-ru-translator.hf.space/ Telegram: https://t.me/QMKochBot
BigTooth/DialoGPT-small-tohru
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
2023-03-23T10:17:41Z
--- language: - en tags: - audio - automatic-speech-recognition license: mit library_name: ctranslate2 --- # Whisper tiny.en model for CTranslate2 This repository contains the conversion of [openai/whisper-tiny.en](https://huggingface.co/openai/whisper-tiny.en) to the [CTranslate2](https://github.com/OpenNMT/CTranslate2) model format. This model can be used in CTranslate2 or projects based on CTranslate2 such as [faster-whisper](https://github.com/guillaumekln/faster-whisper). ## Example ```python from faster_whisper import WhisperModel model = WhisperModel("tiny.en") segments, info = model.transcribe("audio.mp3") for segment in segments: print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text)) ``` ## Conversion details The original model was converted with the following command: ``` ct2-transformers-converter --model openai/whisper-tiny.en --output_dir faster-whisper-tiny.en \ --copy_files tokenizer.json --quantization float16 ``` Note that the model weights are saved in FP16. This type can be changed when the model is loaded using the [`compute_type` option in CTranslate2](https://opennmt.net/CTranslate2/quantization.html). ## More information **For more information about the original model, see its [model card](https://huggingface.co/openai/whisper-tiny.en).**
BigTooth/Megumin-v0.2
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- language: - en tags: - audio - automatic-speech-recognition license: mit library_name: ctranslate2 --- # Whisper base.en model for CTranslate2 This repository contains the conversion of [openai/whisper-base.en](https://huggingface.co/openai/whisper-base.en) to the [CTranslate2](https://github.com/OpenNMT/CTranslate2) model format. This model can be used in CTranslate2 or projects based on CTranslate2 such as [faster-whisper](https://github.com/guillaumekln/faster-whisper). ## Example ```python from faster_whisper import WhisperModel model = WhisperModel("base.en") segments, info = model.transcribe("audio.mp3") for segment in segments: print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text)) ``` ## Conversion details The original model was converted with the following command: ``` ct2-transformers-converter --model openai/whisper-base.en --output_dir faster-whisper-base.en \ --copy_files tokenizer.json --quantization float16 ``` Note that the model weights are saved in FP16. This type can be changed when the model is loaded using the [`compute_type` option in CTranslate2](https://opennmt.net/CTranslate2/quantization.html). ## More information **For more information about the original model, see its [model card](https://huggingface.co/openai/whisper-base.en).**
BillelBenoudjit/jplu-wikiann
[ "fr", "dataset:wikiann", "model-index" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - en tags: - audio - automatic-speech-recognition license: mit library_name: ctranslate2 --- # Whisper small.en model for CTranslate2 This repository contains the conversion of [openai/whisper-small.en](https://huggingface.co/openai/whisper-small.en) to the [CTranslate2](https://github.com/OpenNMT/CTranslate2) model format. This model can be used in CTranslate2 or projects based on CTranslate2 such as [faster-whisper](https://github.com/guillaumekln/faster-whisper). ## Example ```python from faster_whisper import WhisperModel model = WhisperModel("small.en") segments, info = model.transcribe("audio.mp3") for segment in segments: print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text)) ``` ## Conversion details The original model was converted with the following command: ``` ct2-transformers-converter --model openai/whisper-small.en --output_dir faster-whisper-small.en \ --copy_files tokenizer.json --quantization float16 ``` Note that the model weights are saved in FP16. This type can be changed when the model is loaded using the [`compute_type` option in CTranslate2](https://opennmt.net/CTranslate2/quantization.html). ## More information **For more information about the original model, see its [model card](https://huggingface.co/openai/whisper-small.en).**
Bilz/DialoGPT-small-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-23T10:21:29Z
--- language: - en - zh - de - es - ru - ko - fr - ja - pt - tr - pl - ca - nl - ar - sv - it - id - hi - fi - vi - he - uk - el - ms - cs - ro - da - hu - ta - 'no' - th - ur - hr - bg - lt - la - mi - ml - cy - sk - te - fa - lv - bn - sr - az - sl - kn - et - mk - br - eu - is - hy - ne - mn - bs - kk - sq - sw - gl - mr - pa - si - km - sn - yo - so - af - oc - ka - be - tg - sd - gu - am - yi - lo - uz - fo - ht - ps - tk - nn - mt - sa - lb - my - bo - tl - mg - as - tt - haw - ln - ha - ba - jw - su tags: - audio - automatic-speech-recognition license: mit library_name: ctranslate2 --- # Whisper small model for CTranslate2 This repository contains the conversion of [openai/whisper-small](https://huggingface.co/openai/whisper-small) to the [CTranslate2](https://github.com/OpenNMT/CTranslate2) model format. This model can be used in CTranslate2 or projects based on CTranslate2 such as [faster-whisper](https://github.com/guillaumekln/faster-whisper). ## Example ```python from faster_whisper import WhisperModel model = WhisperModel("small") segments, info = model.transcribe("audio.mp3") for segment in segments: print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text)) ``` ## Conversion details The original model was converted with the following command: ``` ct2-transformers-converter --model openai/whisper-small --output_dir faster-whisper-small \ --copy_files tokenizer.json --quantization float16 ``` Note that the model weights are saved in FP16. This type can be changed when the model is loaded using the [`compute_type` option in CTranslate2](https://opennmt.net/CTranslate2/quantization.html). ## More information **For more information about the original model, see its [model card](https://huggingface.co/openai/whisper-small).**
Bimal/my_bot_model
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- language: - en tags: - audio - automatic-speech-recognition license: mit library_name: ctranslate2 --- # Whisper medium.en model for CTranslate2 This repository contains the conversion of [openai/whisper-medium.en](https://huggingface.co/openai/whisper-medium.en) to the [CTranslate2](https://github.com/OpenNMT/CTranslate2) model format. This model can be used in CTranslate2 or projects based on CTranslate2 such as [faster-whisper](https://github.com/guillaumekln/faster-whisper). ## Example ```python from faster_whisper import WhisperModel model = WhisperModel("medium.en") segments, info = model.transcribe("audio.mp3") for segment in segments: print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text)) ``` ## Conversion details The original model was converted with the following command: ``` ct2-transformers-converter --model openai/whisper-medium.en --output_dir faster-whisper-medium.en \ --copy_files tokenizer.json --quantization float16 ``` Note that the model weights are saved in FP16. This type can be changed when the model is loaded using the [`compute_type` option in CTranslate2](https://opennmt.net/CTranslate2/quantization.html). ## More information **For more information about the original model, see its [model card](https://huggingface.co/openai/whisper-medium.en).**
Biniam/en_ti_translate
[ "pytorch", "marian", "text2text-generation", "transformers", "translation", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- language: - en - zh - de - es - ru - ko - fr - ja - pt - tr - pl - ca - nl - ar - sv - it - id - hi - fi - vi - he - uk - el - ms - cs - ro - da - hu - ta - 'no' - th - ur - hr - bg - lt - la - mi - ml - cy - sk - te - fa - lv - bn - sr - az - sl - kn - et - mk - br - eu - is - hy - ne - mn - bs - kk - sq - sw - gl - mr - pa - si - km - sn - yo - so - af - oc - ka - be - tg - sd - gu - am - yi - lo - uz - fo - ht - ps - tk - nn - mt - sa - lb - my - bo - tl - mg - as - tt - haw - ln - ha - ba - jw - su tags: - audio - automatic-speech-recognition license: mit library_name: ctranslate2 --- # Whisper medium model for CTranslate2 This repository contains the conversion of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) to the [CTranslate2](https://github.com/OpenNMT/CTranslate2) model format. This model can be used in CTranslate2 or projects based on CTranslate2 such as [faster-whisper](https://github.com/guillaumekln/faster-whisper). ## Example ```python from faster_whisper import WhisperModel model = WhisperModel("medium") segments, info = model.transcribe("audio.mp3") for segment in segments: print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text)) ``` ## Conversion details The original model was converted with the following command: ``` ct2-transformers-converter --model openai/whisper-medium --output_dir faster-whisper-medium \ --copy_files tokenizer.json --quantization float16 ``` Note that the model weights are saved in FP16. This type can be changed when the model is loaded using the [`compute_type` option in CTranslate2](https://opennmt.net/CTranslate2/quantization.html). ## More information **For more information about the original model, see its [model card](https://huggingface.co/openai/whisper-medium).**
BinksSachary/ShaxxBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
Access to model dreeven/page_classification is restricted and you are not in the authorized list. Visit https://huggingface.co/dreeven/page_classification to ask for access.
BitanBiswas/mbert-bengali-ner-finetuned-ner
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-03-23T10:36:07Z
--- language: - en - zh - de - es - ru - ko - fr - ja - pt - tr - pl - ca - nl - ar - sv - it - id - hi - fi - vi - he - uk - el - ms - cs - ro - da - hu - ta - 'no' - th - ur - hr - bg - lt - la - mi - ml - cy - sk - te - fa - lv - bn - sr - az - sl - kn - et - mk - br - eu - is - hy - ne - mn - bs - kk - sq - sw - gl - mr - pa - si - km - sn - yo - so - af - oc - ka - be - tg - sd - gu - am - yi - lo - uz - fo - ht - ps - tk - nn - mt - sa - lb - my - bo - tl - mg - as - tt - haw - ln - ha - ba - jw - su tags: - audio - automatic-speech-recognition license: mit library_name: ctranslate2 --- # Whisper large-v2 model for CTranslate2 This repository contains the conversion of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) to the [CTranslate2](https://github.com/OpenNMT/CTranslate2) model format. This model can be used in CTranslate2 or projects based on CTranslate2 such as [faster-whisper](https://github.com/guillaumekln/faster-whisper). ## Example ```python from faster_whisper import WhisperModel model = WhisperModel("large-v2") segments, info = model.transcribe("audio.mp3") for segment in segments: print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text)) ``` ## Conversion details The original model was converted with the following command: ``` ct2-transformers-converter --model openai/whisper-large-v2 --output_dir faster-whisper-large-v2 \ --copy_files tokenizer.json --quantization float16 ``` Note that the model weights are saved in FP16. This type can be changed when the model is loaded using the [`compute_type` option in CTranslate2](https://opennmt.net/CTranslate2/quantization.html). ## More information **For more information about the original model, see its [model card](https://huggingface.co/openai/whisper-large-v2).**
Blackmist786/DialoGPt-small-transformers4
[ "pytorch" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="marimurta/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Blazeolmo/Scrabunzi
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - stable-diffusion - text-to-image --- ----------------- # NSFW Models trained on iStripper photos from pichunter.com Ally Breelsen - 64 photos, 19200 steps, 60% vanilla 40% best model realist, trigger "albr" Ava Dalush - 62 photos, 18600 steps, 60% vanilla 40% best model realist, trigger "avda" v1.0 - extracted using vanilla base v1.1 - extracted using matching base ----------------- # Be Careful! these models are not intended for commercial use if you do so you might be infringing copyrights and breaking the law please use them responsibly ----------------- civitai.com/user/Powidl43
Bloodwarrior/Chikfalay
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 18.04 +/- 4.66 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r arrandi/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
BlueGamerBeast/DialoGPT-small-Morgana
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -69.74 +/- 103.80 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'ppo' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 300000 'learning_rate': 0.0001 'num_envs': 4 'num_steps': 128 'anneal_lr': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.95 'num_minibatches': 4 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.4 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'gauthamk28/LunarLander-v2' 'batch_size': 512 'minibatch_size': 128} ```
BogdanKuloren/continual-learning-paper-embeddings-model
[ "pytorch", "mpnet", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "MPNetModel" ], "model_type": "mpnet", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: jfforero/distilbert-base-uncased-finetuned-poemas-corto results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # jfforero/distilbert-base-uncased-finetuned-poemas-corto This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 5.1475 - Validation Loss: 5.1458 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'WarmUp', 'config': {'initial_learning_rate': 2e-05, 'decay_schedule_fn': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': -998, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, '__passive_serialization__': True}, 'warmup_steps': 1000, 'power': 1.0, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 5.1475 | 5.1458 | 0 | ### Framework versions - Transformers 4.27.2 - TensorFlow 2.11.0 - Datasets 2.10.1 - Tokenizers 0.13.2
Branex/gpt-neo-2.7B
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - stable-diffusion - text-to-image --- ----------------- # Autumn Skye Beautiful Women autumnskyeart.com v1.0 - 22 images, 2200 steps / 60% vanilla, 40% nai base / nai extraction v1.1 - 60% vanilla, 40% nai base / 60% vanilla, 40% nai extraction v2.0 - 48% vanilla, 32% nai, 12% camelliamix nsfw v1.1, 8% camelliamix v2 base triggers "beautiful woman", "ausk", "talented artist" ----------------- # Be Careful! these models are not intended for commercial use if you do so you might be infringing copyrights and breaking the law please use them responsibly ----------------- civitai.com/user/Powidl43
Brendan/cse244b-hw2-roberta
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="marimurta/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Broadus20/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: praveenseb/product_review_generator results: [] datasets: - amazon_us_reviews pipeline_tag: text-generation --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # praveenseb/product_review_generator This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on a sample of [amazon_us_reviews](https://huggingface.co/datasets/amazon_us_reviews) dataset. The sample was drawn from 'Apparel_v1_00' subset. ## Model description This model can auto generate review text for apparel products on providing product title, review rating (1-5) and review headline as an input prompt. The input prompt should be in the format <|BOS|>product_title<|SEP|>product_rating<|SEP|>review_title<|SEP|>. For example, <|BOS|>Columbia Women's Benton Springs Full-Zip Fleece Jacket<|SEP|>5<|SEP|>Awesome jacket!<|SEP|>. You can find the complete code in my [GitHub repository](https://github.com/praveenseb/product-review-generator). ## Intended uses & limitations This model is only intended to demonstrate the text generation capabilities of transformer-based models. Do not use it commercially or for any real-life purpose . The model is trained specifically on 'Apparel_v1_00' dataset. So, using non-apparel product titles in the input prompt may yield inconsistent results. ## Training procedure Code used for training can found in my [GitHub repository](https://github.com/praveenseb/product-review-generator). ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'ExponentialDecay', 'config': {'initial_learning_rate': 0.0002, 'decay_steps': 1000, 'decay_rate': 0.95, 'staircase': True, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.0} - training_precision: float32 ### Training results | Train Loss | Epoch | |:----------:|:-----:| | 0.7579 | 0 | | 0.6720 | 1 | ### Framework versions - Transformers 4.27.3 - TensorFlow 2.11.0 - Datasets 2.10.1 - Tokenizers 0.13.2
Bryson575x/riceboi
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - spacy - token-classification language: - en model-index: - name: en_pipeline results: - task: name: NER type: token-classification metrics: - name: NER Precision type: precision value: 0.898989899 - name: NER Recall type: recall value: 0.8682926829 - name: NER F Score type: f_score value: 0.8833746898 --- | Feature | Description | | --- | --- | | **Name** | `en_pipeline` | | **Version** | `0.0.0` | | **spaCy** | `>=3.5.1,<3.6.0` | | **Default Pipeline** | `tok2vec`, `ner` | | **Components** | `tok2vec`, `ner` | | **Vectors** | 0 keys, 0 unique vectors (0 dimensions) | | **Sources** | n/a | | **License** | n/a | | **Author** | [n/a]() | ### Label Scheme <details> <summary>View label scheme (7 labels for 1 components)</summary> | Component | Labels | | --- | --- | | **`ner`** | `DURATION`, `LOC`, `MISC`, `MONEY`, `ORG`, `PER`, `REASON` | </details> ### Accuracy | Type | Score | | --- | --- | | `ENTS_F` | 88.34 | | `ENTS_P` | 89.90 | | `ENTS_R` | 86.83 | | `TOK2VEC_LOSS` | 322.84 | | `NER_LOSS` | 4541.55 |
BumBelDumBel/ZORK-AI-TEST
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "generated_from_trainer", "license:mit" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- datasets: - glue --- Model is a fine-tunned version of "bert-fined-tunned-model"
BumBelDumBel/ZORK_AI_FANTASY
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder model-index: - name: vit-base-patch16-224-finetuned-flower results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-finetuned-flower This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.1+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
Buntan/bert-finetuned-ner
[ "pytorch", "tensorboard", "bert", "token-classification", "dataset:conll2003", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q_learning_taxi_v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="makaveli10/q_learning_taxi_v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Buntan/xlm-roberta-base-finetuned-marc-en
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="SpookyWooky5/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Bwehfuk/Ron
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-23T11:56:29Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.48 +/- 2.74 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="SpookyWooky5/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16,451
null
--- language: ["ru"] tags: - russian - classification - sentiment - multiclass widget: - text: "Мне очень жаль" --- ## Sentiment model based on rubert-base-cased-conversational This model was initialized with [rubert-base-cased-conversational](https://huggingface.co/DeepPavlov/rubert-base-cased-conversational) weights and trained on a batch of datasets collected by [Smetanin](https://duckduckgo.com), using the same training sampling presented in [this wonderful work](https://huggingface.co/cointegrated/rubert-tiny-sentiment-balanced). This approach allows for a uniform distribution among different datasets and three classes of sentiment labels: negative, neutral, and positive. Datasets were prepared by David Dale and are hosted [here](https://drive.google.com/file/d/1dir_lixYfReDXxRS5oGGljH8T_f7vVqm/view). I chose rubert-base-cased-conversational weights because, according to Smetanin's work, this model ranks first among all other multilingual and popular Russian language models with BERT base architecture. ### Training and Testing Details This model was trained and tested using the code and hyperparameters from the [rubert-tiny-sentiment-balanced](https://huggingface.co/cointegrated/rubert-tiny-sentiment-balanced) work. ### Labels There are only three labels: negative - 0, neutral - 1, positive - 2 ## Results It outperforms rubert-tiny-sentiment-balanced on four datasets, underperforms on one (linis), and has the same performance on mokoron and rureviews. See [this](https://huggingface.co/cointegrated/rubert-tiny-sentiment-balanced) for the comparison. | Source | Macro F1 | | ----------- | ----------- | | SentiRuEval2016_banks | 0.88 | | SentiRuEval2016_tele | 0.79 | | kaggle_news | 0.73 | | linis | 0.46 | | mokoron | 0.98 | | rureviews | 0.77 | | rusentiment | 0.74 |
CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-glf
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
18
null
--- license: creativeml-openrail-m tags: - text-to-image --- ### arki-20230323-12-analog-cnst-5000-steps on Stable Diffusion via Dreambooth #### model by NickKolok This your the Stable Diffusion model fine-tuned the arki-20230323-12-analog-cnst-5000-steps concept taught to Stable Diffusion with Dreambooth. #It can be used by modifying the `instance_prompt`: **arki** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts)
CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
71
null
--- license: mit tags: - generated_from_trainer metrics: - accuracy model-index: - name: ColD-Fusion-bert-base-uncased-itr23-seed0-finetuned-convincingness-IBM results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ColD-Fusion-bert-base-uncased-itr23-seed0-finetuned-convincingness-IBM This model is a fine-tuned version of [ibm/ColD-Fusion-bert-base-uncased-itr23-seed0](https://huggingface.co/ibm/ColD-Fusion-bert-base-uncased-itr23-seed0) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.0132 - Accuracy: 0.7417 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 270 | 0.5642 | 0.7104 | | 0.5032 | 2.0 | 540 | 0.5624 | 0.7279 | | 0.5032 | 3.0 | 810 | 0.6700 | 0.7344 | | 0.2247 | 4.0 | 1080 | 0.9194 | 0.7373 | | 0.2247 | 5.0 | 1350 | 1.0132 | 0.7417 | ### Framework versions - Transformers 4.27.2 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
CAMeL-Lab/bert-base-arabic-camelbert-da-ner
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
42
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: xinyixiuxiu/albert-xlarge-v2-SST2-finetuned results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # xinyixiuxiu/albert-xlarge-v2-SST2-finetuned This model is a fine-tuned version of [albert-xlarge-v2](https://huggingface.co/albert-xlarge-v2) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.0995 - Train Accuracy: 0.9666 - Validation Loss: 0.2012 - Validation Accuracy: 0.9381 - Epoch: 2 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': 3e-06, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train Accuracy | Validation Loss | Validation Accuracy | Epoch | |:----------:|:--------------:|:---------------:|:-------------------:|:-----:| | 0.2577 | 0.8879 | 0.1899 | 0.9278 | 0 | | 0.1339 | 0.9530 | 0.1549 | 0.9507 | 1 | | 0.0995 | 0.9666 | 0.2012 | 0.9381 | 2 | ### Framework versions - Transformers 4.21.1 - TensorFlow 2.7.0 - Datasets 2.10.1 - Tokenizers 0.12.1
CAMeL-Lab/bert-base-arabic-camelbert-da-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
37
null
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: MarcosMunoz95/Pyramidsss 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀