modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
Augustvember/WokkaBot3
[ "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.52 +/- 2.74 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="djib2011/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Augustvember/WokkaBot7
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - metrics: - type: mean_reward value: 1295.00 +/- 492.10 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga bitcloud2 -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga bitcloud2 -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga bitcloud2 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 400000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gamma', 0.99), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 200000), ('n_timesteps', 10000000.0), ('normalize', False), ('optimize_memory_usage', True), ('policy', 'CnnPolicy'), ('replay_buffer_kwargs', {'handle_timeout_termination': False}), ('target_update_interval', 30000), ('train_freq', 4)]) ```
Augustvember/WokkaBot8
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - fr pipeline_tag: token-classification widget: - text: "La # pyrale du # buis à l'air très friande du # tournesol # semences." example_title: "1 ravageur" - text: "Quelles tactiques le producteur de la # SaskAg utilise-t-il pour protéger ses 13 800 acres de la cécidomyie du blé , de la fausse-teigne des crucifères , des vers-gris et des pucerons." example_title: "4 ravageurs" - text: "puceron cendré sur # colza à surveiller à l ’ automne - symptômes classiques de déformation et décoloration de feuilles ( ici en Normandie ) virus transmis : mosaïque du chou-fleur et / ou du navet ( rare )." example_title: "Ravageur & maladie" - text: "Traitement juste après le triage , un traitement contre la fusariose et contre la mouche grise sur cette variété car elle sera semé après betteraves." example_title: "Maladie & ravageur" - text: "Nous voulons des coquelicots ! Le coquelicot héberge notamment les virus de la jaunisse grave , jaunisse modérée et occidentale de la betterave , virus latent italien de l'artichaut , virus de la mosaïque du navet , virus X de la pomme de terre et le virus du flétrissement de la fève." example_title: "5 maladies" - text: "Plus j’ai du recul sur ma situation d’ancien taupin plus je me dis qu’il faut vraiment cramer les prepas et les écoles d’ingé/de commerce." example_title: "Taupin - prépa" - text: "Vous savez Taupin et ses problèmes de gonades mal hydratées ?Bah c'est aussi sec, la Loire." example_title: "Taupin - sec" - text: "#MercrediCestPermis je vous présente taupin et scuti. Un fléau qui va grandir avec l'arrêt des neonicotinoide. Deux ravageurs de racines qui sont friands de blé maïs pomme de terre et autres cultures Peut provoquer la perte totale. #agriculture #FrAgTW" example_title: "Taupin - ravageur" - text: "Thon juste saisi , crème de betterave , une petite rouille dont j'ignore la constitution , légumes de saison Ça va comme ça ? " example_title: "Rouille - sauce" - text: "Rouille de la # betterave sucrière causée par # Uromyces betae # urédospores # phytopathologie" example_title: "Rouille - maladie" - text: "Colzas qui rougissaient précocement , avec de l ’ oidium , dégâts de campagnols , mouche du chou . . ." example_title: "Mouche - ravageur" - text: "Vacances de Noël : les touristes français visitent Paris à bord d’un bateau mouche." example_title: "Mouche - bateau" --- ### How to use You can use this model directly with a pipeline for token classification: ```python >>>from transformers import pipeline >>>pipe = pipeline(model="ChouBERT/ChouBERT-32-plant-health-ner", aggregation_strategy="simple") >>>pipe(" Attaque de rouille brune en Dordogne sur du blé tendre variété Oregrain !") [] >>>pipe("Soupe de poisson toute prête de carrefour avec fromage râpé, croûtons à l'ail et rouille #TeamFeignasse.") [{'entity_group': 'Maladie', 'score': 0.80249035, 'word': '', 'start': 11, 'end': 12}, {'entity_group': 'Maladie', 'score': 0.80133665, 'word': 'rouille brune', 'start': 12, 'end': 25}] ```
Augustvember/WokkaBot9
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - vi license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 model-index: - name: HuyenNguyen results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # HuyenNguyen This model is a fine-tuned version of [Huyen2310/FPT-S15000](https://huggingface.co/Huyen2310/FPT-S15000) on the Common Voice 11.0 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 450 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
Augustvember/WokkaBot99
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="renee127/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Augustvember/test
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Example Fine-Tuned Model for Unit 2 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) Describe your model here ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('Arch4ngel/ddpm-celebahq-finetuned-butterflies-2epochs') image = pipeline().images[0] image ```
Augustvember/wokka
[ "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: creativeml-openrail-m tags: - text-to-image - safetensors - stable-diffusion --- ### Wriggly's-Extra-Gum-artstyle-ponydiffusion Dreambooth model trained by Kagerage with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb) As the name suggests, this model was trained on that weird hand-drawn looking artwork that you'd find on the inside of a pack of Wriggly's Extra gum, ontop of Pony-diffusion-v2. PROMPTING ADVICE: Best results come from using the trigger word like "gvgtgm traditional print lineart drawing", or "gvgtgm traditional print badly drawn lineart drawing" (Oof...) for a more true to training data image prompt for either "anthro/feral male/female/whatever pony/whatever" or "anthro/feral male/female/whatever pony/whatever with tail/whatever" anything else should be added at the end, so prompt would look like "gvgtgm traditional print lineart drawing, solo anthro female pony with tail, sweater and jeans, snow, park, trees" ![0](https://huggingface.co/Kagerage/wriggly-s-extra-gum-artstyle-ponydiffusion/resolve/main/sample_images/00445-3775115334-gvgtgm%20traditional%20print%20lineart%20drawing%2C%20solo%20anthro%20female%20pony%20with%20tail%2C%20sweater%20and%20jeans%2C%20snow%2C%20park%2C%20trees.png) negative prompt "ugly, red, green, blue, clipped highlights, contrast", or for solo images, "ugly, multiple, red, green, blue, clipped highlights, contrast", or exclude "ugly" for a (Possibly) more true to training data image Recommended CFG is 7 Non-anthro ponies are seemingly very difficult to get, and I'm not sure how to make this model generate them. Sample pictures of this concept: ![1](https://huggingface.co/Kagerage/wriggly-s-extra-gum-artstyle-ponydiffusion/resolve/main/sample_images/00451-250025809-gvgtgm%20traditional%20print%20lineart%20drawing%2C%20solo%20anthro%20female%20unicorn%20pony%20with%20wings%2C%20sweater%20and%20jeans%2C%20city%2C%20night.png) ![2](https://huggingface.co/Kagerage/wriggly-s-extra-gum-artstyle-ponydiffusion/resolve/main/sample_images/00446-646835085-gvgtgm%20traditional%20print%20lineart%20drawing%2C%20solo%20anthro%20female%20pony%20with%20tail%2C%20sweater%20and%20jeans%2C%20field%2C%20dusk.png) ![3](https://huggingface.co/Kagerage/wriggly-s-extra-gum-artstyle-ponydiffusion/resolve/main/sample_images/00551-426350136-gvgtgm%20traditional%20print%20lineart%20drawing%2C%20anthro%20male%20pony%2C%20sweater%20and%20jeans%2C%20in%20crowded%20movie%20theater.png)
Augustvember/wokka2
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- language: - en license: unknown tags: - stable-diffusion - text-to-image --- A reupload of 2 model from a 🧲 link that i found interesting. That 🧲 contain several safetensors model, 200GB in total.
Aurora/community.afpglobal
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: dracero/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Ayham/albert_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
Fusion-in-Decoder (FiD) is a model described in the following paper: > Izacard, Gautier, and Édouard Grave. [Leveraging Passage Retrieval with Generative Models for Open Domain Question Answering](https://aclanthology.org/2021.eacl-main.74/). _Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume_. 2021. We have replicated FiD training with our Wikipedia corpus variants and incorporated the model into our [PyGaggle](https://github.com/castorini/pygaggle) neural text ranking library. Our own efforts are described in the paper entitled: > Pre-Processing Matters! Improved Wikipedia Corpora for Open-Domain Question Answering. This is a FiD-large reader model for the wiki-all-6-3 corpus variant trained on the Natural Questions dataset.
Ayham/albert_roberta_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: cc0-1.0 --- I created this embedding for SD 2.x 768x768 models, it turns everything into your favorite Christmas classic AniMagic stop motion style as popularized by Rudolf the Red Nosed Reindeer and Santa Claus is Coming to Town among several others produced by the same studio! The Unreleased Christmas Stop Motion Mario Kart Movie! ![messages_0 (9).png](https://s3.amazonaws.com/moonup/production/uploads/1671856643956-632177f5b8fc9e78c2ff68d9.png) Prompt: mario kart toy, (rnknbss16 :1.3), highly textured, figurine Negative prompt: cgi, 3d render, videogame Steps: 34, Sampler: Euler a, CFG scale: 7, Seed: 2737353293, Size: 768x768, Model: SD 2.0_Standard_v2-1_768-ema-pruned, Denoising strength: 0.79, Mask blur: 3, aesthetic_score: 4.9 The Upcoming Stop Action Pikachu Movie! ![05329-459369051-pikachu in the style of rnknbss16.png](https://s3.amazonaws.com/moonup/production/uploads/1671856736417-632177f5b8fc9e78c2ff68d9.png) Prompt: pikachu in the style of rnknbss16 Steps: 30, Sampler: Euler a, CFG scale: 7, Seed: 459369051, Size: 768x768, Model: SD 2.0_Standard_v2-1_768-ema-pruned, aesthetic_score: 5.2 ![05330-4076512951-pikachu in the style of rnknbss16-100.png](https://s3.amazonaws.com/moonup/production/uploads/1671856739194-632177f5b8fc9e78c2ff68d9.png) Prompt: pikachu in the style of rnknbss16-100 Steps: 30, Sampler: Euler a, CFG scale: 7, Seed: 4076512951, Size: 768x768, Model: SD 2.0_Standard_v2-1_768-ema-pruned, aesthetic_score: 5.2 Some 2022 Holiday Ads for the Latest Celebs! Donald Trump ![05356-1397465632-a close up of (donald trump_1.) in the style of (rnknbss16 _1.0).png](https://s3.amazonaws.com/moonup/production/uploads/1671856809340-632177f5b8fc9e78c2ff68d9.png) Prompt: a close up of (donald trump:1.) in the style of (rnknbss16 :1.0) Negative prompt: blurry, text, words Steps: 29, Sampler: Euler a, CFG scale: 7, Seed: 1397465632, Size: 768x768, Model: SD 2.0_Standard_v2-1_768-ema-pruned, aesthetic_score: 5.4 Morgan Freeman ![05372-1868403973-morgan freeman in the style of (rnknbss16 _1.0).png](https://s3.amazonaws.com/moonup/production/uploads/1671856831368-632177f5b8fc9e78c2ff68d9.png) Prompt: morgan freeman in the style of (rnknbss16 :1.0) Steps: 29, Sampler: DPM++ 2S a Karras, CFG scale: 7, Seed: 1868403973, Size: 768x768, Model: SD 2.0_Standard_v2-1_768-ema-pruned, aesthetic_score: 5.7 Barack Obama ![05801-3661737292-barack obama in the style of rnknbss16v2-775.png](https://s3.amazonaws.com/moonup/production/uploads/1671857079900-632177f5b8fc9e78c2ff68d9.png) Prompt: barack obama in the style of rnknbss16v2-775 Steps: 47, Sampler: Euler a, CFG scale: 7, Seed: 3661737292, Size: 768x768, Model: SD 2.0_Standard_v2-1_768-ema-pruned And Lastly, The Remake of A Lifetime, Hogwarts Castle From the New Harry Potter Series ![02340-2909664084-Hogwarts school of witchcraft and wizardry in the style of (rnknbss16 _1.0), highly detailed, intricate.png](https://s3.amazonaws.com/moonup/production/uploads/1671857189186-632177f5b8fc9e78c2ff68d9.png) Prompt: Hogwarts school of witchcraft and wizardry in the style of (rnknbss16 :1.0), highly detailed, intricate Negative prompt: blurry Steps: 60, Sampler: Euler a, CFG scale: 7, Seed: 2909664084, Size: 768x768, Model: SD 2.0_Standard_512-depth-ema, Denoising strength: 0.66, Mask blur: 3, aesthetic_score: 6.2 Notes on the use of these: So I didn't really get a chance to fine-tune them as well as I would have liked, but I wanted to get them out there for people to enjoy so I've included the best of what I have. All of these were trained with 90-ish upscaled screen grabs from high quality DVDs of just the 2 movies mentioned above. I did use some of the letters, and postcards, and packages from the opening credits scenes in hopes to be able to reproduce those or something similar (haven't tried) so you will probably want to include the usual "words, text, letters, logos, watermarks..." in your negative prompts to try to weed those out. I also included some of the limited 2d artwork found in those movies, again in hopes to be able to generate that style as well. but that hasn't seemed to affect much except possibly when generating things that have a lot of 2d variations (i.e. comic book characters) so specifying 3d, or that you want a doll of the thing, or a model, or toy of the thing might help a lot with prompting. Otherwise, just saying " thing in the style of rnknbss16" should do the trick! The Models: They're all 16 vectors. rnknbss16: pretty good but was trained too far and/or fast and tends to make hybrid elf/Santa creatures out of everything and is hard to get it to do anything else, although if your concept is strong or present enough in the model it can do pretty well (i.e. Cinderella's castle which is on EVERYTHING Disney). Models rnknbss16-100 through rnknbss16-150 do much better, however these do less well with people and faces, they're better suited for things, creatures, animals, scenery, places, etc. rnknbss16v2: pretty sure this one is overtrained by a good deal, but you might have success with it. rnknbss16v2-750 and rnknbss16v2-775 are the sweet spot for people and characters with this v2 model, it also tends to get clearer outputs without looking as "fuzzy" or "blurry" and almost as a similar quality as VintageHelper embedding. Which brings me to mixing this with things: Using VingateHelper tends to enhance the "old school" vibes and film grain look as well as thematic props and other elements that may appear in the scene, and PhotoHelper embedding tends to create more "clay" models out of things, like with the Hogwarts castle it made it a wide angle clay diorama model of sorts which was cool and unexpected (see below). ![05345-3448665914-Hogwarts castle in the style of (rnknbss16 _1.2), highly detailed, very textured, intricate, shallow depth of field, photohelper.png](https://s3.amazonaws.com/moonup/production/uploads/1671860648087-632177f5b8fc9e78c2ff68d9.png) Prompt: Hogwarts castle in the style of (rnknbss16 :1.2), highly detailed, very textured, intricate, shallow depth of field, photohelper Negative prompt: blurry, text, words Steps: 50, Sampler: Euler a, CFG scale: 7, Seed: 3448665914, Size: 768x768, Model: SD 2.0_Standard_v2-1_768-ema-pruned, aesthetic_score: 5.6
Ayham/bert_bert_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
Fusion-in-Decoder (FiD) is a model described in the following paper: > Izacard, Gautier, and Édouard Grave. [Leveraging Passage Retrieval with Generative Models for Open Domain Question Answering](https://aclanthology.org/2021.eacl-main.74/). _Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume_. 2021. We have replicated FiD training with our Wikipedia corpus variants and incorporated the model into our [PyGaggle](https://github.com/castorini/pygaggle) neural text ranking library. Our own efforts are described in the paper entitled: > Pre-Processing Matters! Improved Wikipedia Corpora for Open-Domain Question Answering. This is a FiD-large reader model for the wiki-all-6-3 corpus variant trained on the TriviaQA dataset.
Ayham/bert_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2022-12-24T05:01:19Z
--- license: creativeml-openrail-m tags: - conversational --- This is just a mirror of [ConvoGPT-1.3B](https://huggingface.co/hakurei/convogpt-staging/tree/main/1.3b-uft) just for archiving purposes.
Ayham/bertgpt2_cnn
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: - vi license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 model-index: - name: Hieu Dam Model Shuffle results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Hieu Dam Model Shuffle This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Dataset by HieuDam dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 450 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
Ayham/distilbert_distilgpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 648.00 +/- 151.10 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga dfm794 -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga dfm794 -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga dfm794 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 128), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 10000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Ayham/distilbert_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- language: - zh tags: - unilm license: apache-2.0 --- # unilm-base-chinese-news-sum ```sh pip install git+https://github.com/Liadrinz/transformers-unilm # 安装兼容HuggingFace的UniLM模型代码 ``` ```py from unilm import UniLMTokenizer, UniLMForConditionalGeneration news_article = ( "12月23日,河北石家庄。8岁哥哥轻车熟路哄睡弟弟,姿势标准动作熟练。" "妈妈杨女士表示:哥哥很喜欢弟弟,因为心思比较细,自己平时带孩子的习惯他都会跟着学习," "哄睡孩子也都会争着来,技巧很娴熟,两人在一块很有爱,自己感到很幸福,平时帮了自己很大的忙,感恩有这么乖的宝宝。" ) tokenizer = UniLMTokenizer.from_pretrained("Yuang/unilm-base-chinese-news-sum") model = UniLMForConditionalGeneration.from_pretrained("Yuang/unilm-base-chinese-news-sum") inputs = tokenizer(news_article, return_tensors="pt") output_ids = model.generate(**inputs, max_new_tokens=16) output_text = tokenizer.decode(output_ids[0]) print(output_text) # "[CLS] <news_article> [SEP] <news_summary> [SEP]" news_summary = output_text.split("[SEP]")[1].strip() print(news_summary) ```
Ayham/distilbert_roberta_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- language: - ja license: other tags: - whisper-event - generated_from_trainer datasets: - Elite35P-Server/EliteVoiceProject metrics: - wer model-index: - name: Whisper Base Japanese Elite results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Elite35P-Server/EliteVoiceProject twitter type: Elite35P-Server/EliteVoiceProject config: twitter split: test args: twitter metrics: - name: Wer type: wer value: 17.073170731707318 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Base Japanese Elite This model is a fine-tuned version of [openai/whisper-base](https://huggingface.co/openai/whisper-base) on the Elite35P-Server/EliteVoiceProject twitter dataset. It achieves the following results on the evaluation set: - Loss: 0.4385 - Wer: 17.0732 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant_with_warmup - lr_scheduler_warmup_steps: 200 - training_steps: 10000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:------:|:-----:|:---------------:|:-------:| | 0.0002 | 111.0 | 1000 | 0.2155 | 9.7561 | | 0.0001 | 222.0 | 2000 | 0.2448 | 12.1951 | | 0.0 | 333.0 | 3000 | 0.2674 | 13.4146 | | 0.0 | 444.0 | 4000 | 0.2943 | 15.8537 | | 0.0 | 555.0 | 5000 | 0.3182 | 17.0732 | | 0.0 | 666.0 | 6000 | 0.3501 | 18.9024 | | 0.0 | 777.0 | 7000 | 0.3732 | 16.4634 | | 0.0 | 888.0 | 8000 | 0.4025 | 17.0732 | | 0.0 | 999.0 | 9000 | 0.4178 | 20.1220 | | 0.0 | 1111.0 | 10000 | 0.4385 | 17.0732 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.1+cu117 - Datasets 2.8.1.dev0 - Tokenizers 0.13.2
Ayham/ernie_gpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Example Fine-Tuned Model for Unit 2 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) Describe your model here ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('zhow/ddpm-celebahq-finetuned-butterflies-2epochs') image = pipeline().images[0] image ```
Ayham/roberta_gpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- license: cc-by-nc-sa-4.0 language: - en thumbnail: "https://huggingface.co/GeneralAwareness/Ppgra/resolve/main/photograph of johnny depp - ppgra.png" tags: - stable-diffusion - v2 - text-to-image - image-to-image - Embedding --- Textual Inversion Embedding by General Awareness For SD 2.x trained on 768x768 images from various sources. Install by downloading the .pt embedding, and put it in the \embeddings folder A pen, pencil, graphite, etc... (depending on how it is called and your negative prompt) embedding that was created with 16 vectors. Use keyword: ppgra Image in ppgra style ![Single Samples](https://huggingface.co/GeneralAwareness/Ppgra/resolve/main/tmpij1nl1l_-imagein.png) ![Single_Samples](https://huggingface.co/GeneralAwareness/Ppgra/resolve/main/tmpwficiu2c-imagein.png) In the style of ppgra ![Single Samples](https://huggingface.co/GeneralAwareness/Ppgra/resolve/main/tmpvlbc0h6s-inthestyleof.png) ![Single_Samples](https://huggingface.co/GeneralAwareness/Ppgra/resolve/main/tmpx3sim0jf-instyleof.png) Photograph of xxxx, ppgra ![Single_Samples](https://huggingface.co/GeneralAwareness/Ppgra/resolve/main/photograph%20of%20johnny%20depp%20-%20ppgra.png)
Ayham/roberta_roberta_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: hasarinduperera/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Ayham/robertagpt2_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: cc-by-4.0 --- # Description: 한국어 수학교육 텍스트로 학습한 토크나이저 - 학습 데이터 저작권: 두산동아, 미래엔, 비상에듀, 지학사, 한국교육과정평가원 - 모델 체크포인트: skt/kogpt2-base-v2 - 개인 연구 목적으로 사용, 개발중인 토크나이저입니다. 학습 데이터 저작권이 저에게 있지 않으므로, 무단 사용을 자제해주세요. # What to do - 수학교육 분야의 여러 tasks에서 사용가능한 언어 모델을 개발중입니다. # How to use ```python >>> from transformers import AutoTokenizer >>> old_tokenizer = AutoTokenizer.from_pretrained('skt/kogpt2-base-v2') >>> tokenizer = AutoTokenizer.from_pretrained('jnsulee/ko_math_tokenizer') example = "다항식의 덧셈은 동류항끼리 모아서 정리한다.이때 두 다항식의 차 A-B는 A에 B의 각 항의 부호를 바꾼 -B를 더한 것과 같다. 즉, A-B=A+(-B)이다." old_tokenizer.tokenize(example) #['▁다', '항', '식의', '▁덧', '셈', '은', '▁동', '류', '항', '끼리', '▁모아서', '▁정리한', '다.', '이', '때', '▁두', '▁다', '항', '식의', '▁차', '▁A', '-B', '는', '▁A', '에', '▁B', '의', '▁각', '▁항의', '▁부', '호를', '▁바꾼', '▁-', 'B', '를', '▁더한', '▁것과', '▁같다.', '▁\n', '즉', ',', '▁A', '-B', '=', 'A', '+', '(', '-B', ')이다.'] tokenizer.tokenize(example) #['▁다항식', '의', '▁덧셈', '은', '▁동류항', '끼리', '▁모아서', '▁정리', '한다', '.', '이', '때', '▁두', '▁다항식', '의', '▁차', '▁A', '-', 'B', '는', '▁A', '에', '▁B', '의', '▁각', '▁항의', '▁부호', '를', '▁바꾼', '▁-', 'B', '를', '▁더한', '▁것', '과', '▁같', '다', '.', '▁\n즉', ',', '▁A', '-', 'B', '=', 'A', '+', '(', '-', 'B', ')', '이다', '.'] ```
Ayham/xlmroberta_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
2022-12-24T07:02:02Z
--- license: openrail library_name: diffusers tags: - TPU - JAX - Flax - stable-diffusion - text-to-image language: - en thumbnail: https://huggingface.co/camenduru/plushies/resolve/main/samples.jpg datasets: - camenduru/plushies inference: false --- Maybe this is the first ever model trained with TPUs and converted to 🧨 PyTorch 🎊🎉 <br/> Trained with google cloud TPUs. ``` Runtime: 3h 26m 44s Steps: 18000 Precision: bf16 Learning Rate: 1e-6 ``` ![plushies](https://huggingface.co/camenduru/plushies/resolve/main/samples.jpg)
Ayham/xlmroberta_large_gpt2_summarization_cnndm
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: mit --- ## GPT-2 Tokenizer with unmerged digits A fork of the GPT-2 tokenizer, which **removes multi-digit tokens**: ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('cyrilzhang/gpt2-numfix') tokenizer('123.45') # [16, 17, 18, 13, 19, 20] gpt2_tokenizer('123.45') # [10163, 13, 2231] ``` Backward-compatible: ```python tokenizer.decode([10163, 46387]) # '<unused123> pigeon' gpt2_tokenizer.decode([10163, 46387]) # '123 pigeon' ``` - This is for my investigations into the arithmetic capabilities of large language models. There is no model here, only a tokenizer. - [PaLM](https://arxiv.org/abs/2204.02311) does this. I think it's very reasonable. - Many models (illustriously, [GPT-3](https://arxiv.org/abs/2005.14165)) don't do this, because they use the GPT-2 tokenizer.
Ayham/xlnet_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 551.50 +/- 169.38 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga jxiao -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga jxiao -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga jxiao ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 3000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Ayham/xlnet_roberta_new_summarization_cnn_dailymail
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 685.00 +/- 174.94 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga SatCat -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga SatCat -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga SatCat ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 80000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.05), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 9e-05), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Ayjayo/DialoGPT-medium-AyjayoAI
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2022-12-24T07:52:19Z
--- language: - vi license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 model-index: - name: HuyenNguyen results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # HuyenNguyen This model is a fine-tuned version of [Huyen2310/FPT-S15000](https://huggingface.co/Huyen2310/FPT-S15000) on the Common Voice 11.0 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 450 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
Aymene/opus-mt-en-ro-finetuned-en-to-ro
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery-h results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Hawk91/q-FrozenLake-v1-4x4-noSlippery-h", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Ayran/DialoGPT-medium-harry-1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: other widget: - text: "Christmas" example_title: "Christmas" - text: "Lego" example_title: "Lego" inference: parameters: temperature: 1.25 repetition_penalty: 1.25 min_length: 96 max_length: 128 --- This is the model trained for this short video: https://www.youtube.com/shorts/hpIUKRTopAY This AI generates Christmas gift ideas. This model was trained on a small dataset webscraped from the Toys-R-Us website. This dataset consisted of search terms and the names of the best selling items corresponding to said search terms. In total, 31 term-list pair training examples were used to train this model.
Azaghast/GPT2-SCP-Miscellaneous
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2022-12-24T09:44:16Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 544.50 +/- 73.26 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga javiervela -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga javiervela -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga javiervela ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
BJTK2/model_name
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Utkarsh-Verma/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
BSC-LT/roberta-base-bne-capitel-ner
[ "pytorch", "roberta", "token-classification", "es", "dataset:bne", "dataset:capitel", "arxiv:1907.11692", "arxiv:2107.07253", "transformers", "national library of spain", "spanish", "bne", "capitel", "ner", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="anuoluwa/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
BSC-LT/roberta-base-ca
[ "pytorch", "roberta", "fill-mask", "ca", "transformers", "masked-lm", "BERTa", "catalan", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
18
null
--- language: - vi license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 model-index: - name: HuyenNguyen results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # HuyenNguyen This model is a fine-tuned version of [Huyen2310/FPT-S15000](https://huggingface.co/Huyen2310/FPT-S15000) on the Common Voice 11.0 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 450 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
BSC-LT/roberta-large-bne-sqac
[ "pytorch", "roberta", "question-answering", "es", "dataset:BSC-TeMU/SQAC", "arxiv:1907.11692", "arxiv:2107.07253", "transformers", "national library of spain", "spanish", "bne", "qa", "question answering", "license:apache-2.0", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
null
--- license: creativeml-openrail-m language: - en tags: - text-to-image - stable-diffusion - stable-diffusion-diffusers #datasets: #- Name/name inference: true widget: - text: disc demon - text: disc demon
Babelscape/wikineural-multilingual-ner
[ "pytorch", "tensorboard", "safetensors", "bert", "token-classification", "de", "en", "es", "fr", "it", "nl", "pl", "pt", "ru", "multilingual", "dataset:Babelscape/wikineural", "transformers", "named-entity-recognition", "sequence-tagger-model", "license:cc-by-nc-sa-4.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
41,608
null
--- language: - uz license: apache-2.0 tags: - automatic-speech-recognition - mozilla-foundation/common_voice_10_0 - AIRI_UZ - generated_from_trainer datasets: - common_voice_10_0 model-index: - name: uzbek_stt results: [] --- ## Oyqiz jamoasi a'zolari tomonidan qilingan STT ning eng yaxshi versiyasi! ### Foziljon To'lqinov, Shaxboz Zohidov, Abduraxim Jabborov, Yahyoxon Rahimov, Mahmud Jumanazarov Bu model [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) va MOZILLA-FOUNDATION/COMMON_VOICE_10_0 - UZ dataseti bilan 100ta epoxta o'qitilgan. O'qitish natijalari: - Xatolik: 0.1963 - So'z xatoligi: 0.2102 ### O'qitish giperparameterlari O'qitish uchun ishlatilgan giperparameterlar: - learning_rate: 0.00003 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - total_train_batch_size: 16 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 100.0 - mixed_precision_training: Native AMP
Bagus/wav2vec2-xlsr-japanese-speech-emotion-recognition
[ "pytorch", "wav2vec2", "audio-classification", "ja", "dataset:jtes", "transformers", "audio", "speech", "speech-emotion-recognition", "has_space" ]
audio-classification
{ "architectures": [ "HubertForSequenceClassification" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="anuoluwa/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Bakkes/BakkesModWiki
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - pytorch - diffusers - stable-diffusion - text-to-image - diffusion-models-class - dreambooth-hackathon - science widget: - text: 3d diagram of the solar system in the style of pprotein --- # DreamBooth model for the pprotein concept trained by jonathang on the jonathang/dreambooth-hackathon-images-protein3 dataset. This is a Stable Diffusion model fine-tuned on the pprotein concept with DreamBooth. It can be used by modifying the `instance_prompt`: **a 3d model of pprotein** This model was created as part of the DreamBooth Hackathon 🔥. Visit the [organisation page](https://huggingface.co/dreambooth-hackathon) for instructions on how to take part! ## Examples <table> <tr> <td>Generated Image of "a 3d diagram of pprotein in the style of "<br>"Kandinsky"</td> <td>Generated Image of "a 3d diagram of pprotein in the style of "<br>"Van Gogh"</td> <td>Generated Image of "a 3d diagram of pprotein in the style of "<br>"Warhol"</td> </tr> <tr> <td align="center"><img src="https://imgur.com/lhDA041.png" style="height:200px"> </td> <td align="center"><img src="https://imgur.com/iug4k7D.png" style="height:200px"> </td> <td align="center"><img src="https://imgur.com/eIMiTVG.png" style="height:200px"> </td> </tr> <tr> <td>Generated Image of "a 3d diagram of pprotein in the style of "<br>"Leonardo da Vinci"</td> <td>Generated Image of "a 3d diagram of pprotein in the style of "<br>"Frida Kahlo"</td> <td>Generated Image of "a 3d diagram of pprotein in the style of "<br>"Salvador Dahli"</td> </tr> <tr> <td align="center"><img src="https://imgur.com/hzKGWC2.png" style="height:200px"> </td> <td align="center"><img src="https://imgur.com/loc8rLa.png" style="height:200px"> </td> <td align="center"><img src="https://imgur.com/8nK81TA.png" style="height:200px"> </td> </tr> <tr> <td>Generated Image of "Tree in the style of"<br>"3d diagram of pprotein"</td> <td>Generated Image of "Soda Can in the style of"<br>"3d diagram of pprotein"</td> <td>Generated Image of "Vase in the style of"<br>"3d diagram of pprotein"</td> </tr> <tr> <td align="center"><img src="https://imgur.com/czOlY11.png" style="height:200px"> </td> <td align="center"><img src="https://imgur.com/uhwueGs.png" style="height:200px"> </td> <td align="center"><img src="https://imgur.com/gSIrHAh.png" style="height:200px"> </td> </tr> </table> ## Description This is a Stable Diffusion model fine-tuned on `thing` images for the science theme. ## Usage ```python from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained('jonathang/pprotein-thing') image = pipeline().images[0] image ```
Balgow/prod_desc
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 599.50 +/- 152.72 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga renee127 -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga renee127 -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga renee127 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
BaptisteDoyen/camembert-base-xnli
[ "pytorch", "tf", "camembert", "text-classification", "fr", "dataset:xnli", "transformers", "zero-shot-classification", "xnli", "nli", "license:mit", "has_space" ]
zero-shot-classification
{ "architectures": [ "CamembertForSequenceClassification" ], "model_type": "camembert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
405,474
null
--- license: creativeml-openrail-m --- This is a fine-tuned version of the [Stable Diffusion model available from KerasCV](https://github.com/keras-team/keras-cv/tree/master/keras_cv/models/stable_diffusion). Fine-tuning code is available [here](https://github.com/sayakpaul/stabe-diffusion-keras-ft). The weights provided in this repository are **only for research purposes**. To know more about the potential use cases and implications, refer to [this resource](https://huggingface.co/CompVis/stable-diffusion-v1-4).
Barbarameerr/Barbara
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('rafaelg/sd-class-butterflies-32') image = pipeline().images[0] image ```
Begimay/Task
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="greedypiggy/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
BertChristiaens/EmojiPredictor
[ "pytorch", "distilbert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2022-12-24T16:55:42Z
Explain Humanity Muhammad Naeem Can you explain humanity Humanity refers to the human race, which includes all people on Earth. It is a term that encompasses the shared characteristics and qualities that define what it means to be human, such as the capacity for reason, emotion, creativity, and culture. As a species, humans have a long and complex history that stretches back over millions of years. Over time, humans have evolved and developed a wide range of cultures, languages, and societies, and have made significant advances in many fields, including science, technology, and the arts. Humanity is also characterized by its capacity for empathy, compassion, and altruism, as well as its capacity for conflict and aggression. These traits, along with many others, make up the complex and diverse nature of humanity.
BhanuSama/gpt2-finetuned-xsum
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 637.00 +/- 222.97 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga ataunal -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga ataunal -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga ataunal ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Bharathdamu/wav2vec2-large-xls-r-300m-hindi3-colab
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- Model Trained by (https://linktr.ee/kukuhtw) using fast-dreambooth (https://github.com/TheLastBen/fast-stable-diffusion) <b>use keyword prompt</b> : 👉👉👉 <b><i>ganjarpranowo</i></b> 👈👈👈 <b>sample prompt below this :</b> <i>ganjarpranowo digital painting, highly detailed, intricate, 3d model, unreal engine realistic render, 8 k, micro detail, intricate, elegant, highly detailed, centered, digital painting, artstation, smooth, sharp focus, illustration, artgerm, tomasz alen kopera, wlop</i> <i>ganjarpranowo 3d model, unreal engine realistic render, 8 k, micro detail, intricate, elegant, highly detailed, centered, digital painting, artstation, smooth, sharp focus, illustration, artgerm, tomasz alen kopera, wlop</i> <i>portrait of ganjarpranowo headshot by mucha, sharp focus, elegant, render, octane, detailed, award winning photography, masterpiece, trending artstation</i> <i>portrait of ganjarpranowo wearing white uniform , trending artstation</i> <i>portrait of smiling male ganjarpranowo, GTA cover, photo realistic, highly detailed, perfect face, art by artgerm</i> <i>portrait of ganjarpranowo wearing casual shirt, digital painting</i> <i>portrait of ganjarpranowo style pencil sketch</i> <i>ganjarpranowo , ultra detailed, brush strokes, digital painting, cinematic, wlop artstation, closeup, pixiv, intense, joyful glare, photorealistic, overpowering, makoto shinkai, rossdraws, andy warhol</i> <i>portrait of a happy ganjarpranowo gentleman , male, cheerful, detailed face, 21th century, highly detailed, cinematic lighting, digital art painting by greg rutkowski</i> <i>ganjarpranowo as a character from pixar, au naturel, PS2, PS1, hyper detailed, digital art, trending in artstation, cinematic lighting, studio quality, smooth render, unreal engine 5 rendered, octane rendered, art style by klimt and nixeu and ian sprigger and wlop and krenz cushart.</i> Sample results pictures of this concept: ![0](https://huggingface.co/kukuhtw/ganjarpranowo/resolve/main/ea95d4e5-a82f-45dc-8b3e-5775c4e60450.jpeg) ![1](https://huggingface.co/kukuhtw/ganjarpranowo/resolve/main/f1897a80-6f31-4e92-9679-12cb3de95517.jpeg) ![2](https://huggingface.co/kukuhtw/ganjarpranowo/resolve/main/7d591163-2c61-4af0-b030-4fa32a09241b.jpeg) ![3](https://huggingface.co/kukuhtw/ganjarpranowo/resolve/main/23290256-65cd-4ef5-be85-c45bf17bf92d.jpeg) ![4](https://huggingface.co/kukuhtw/ganjarpranowo/resolve/main/bee4af5b-bd57-4d37-8ce4-eb465858ce67.jpeg) ![5](https://huggingface.co/kukuhtw/ganjarpranowo/resolve/main/5f7341d7-552c-4b61-bb64-248ae9cc161d.jpeg) ![6](https://huggingface.co/kukuhtw/ganjarpranowo/resolve/main/33f9ffd1-a5f2-4917-86bd-dfd663fd2a7f.jpeg) ![7](https://huggingface.co/kukuhtw/ganjarpranowo/resolve/main/c1f511dc-17b7-44d7-8955-7361d1f38021.jpeg) ![8](https://huggingface.co/kukuhtw/ganjarpranowo/resolve/main/d9ed787f-122f-4e55-a48e-9ea627e53a9c.jpeg) ![9](https://huggingface.co/kukuhtw/ganjarpranowo/resolve/main/d9a8420c-30c4-434c-b27a-9ff522cd58be.jpeg) ![10](https://huggingface.co/kukuhtw/ganjarpranowo/resolve/main/a4a45ff8-a4ee-4212-a92e-c0c4fe65d93e.jpeg) Note : The image that was created may not be satisfactory, so repeat the process until you get a satisfactory result. You can also customize the text prompt according to your creativity. https://ratakan.com/product/clip-art-ganjar-pranowo-EAA https://kukuhtw.gumroad.com/l/oxyuv
Bia18/Beatriz
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 877.00 +/- 321.75 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga chavicoski -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga chavicoski -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga chavicoski ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 10000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Biasface/DDDC
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- tags: - autotrain - stable-diffusion - text-to-image datasets: - clem/autotrain-data-friedeberg-0OIYU5UZXE co2_eq_emissions: emissions: 23.966933198902527 --- # Model Trained Using AutoTrain - Problem type: Dreambooth - Model ID: 2603979056 - CO2 Emissions (in grams): 23.9669
BigSalmon/FormalRobertaa
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - autotrain - stable-diffusion - text-to-image datasets: - clem/autotrain-data-maxdekdt-AER2SE090K co2_eq_emissions: emissions: 86.12664300406121 license: openrail language: - en --- # Model Trained Using AutoTrain - Problem type: Dreambooth - Model ID: 2604679068 - CO2 Emissions (in grams): 86.1266
BigSalmon/FormalRobertaaa
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: bert-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 config: conll2003 split: train args: conll2003 metrics: - name: Precision type: precision value: 0.937448149991704 - name: Recall type: recall value: 0.9508582968697409 - name: F1 type: f1 value: 0.9441056061492189 - name: Accuracy type: accuracy value: 0.9864308000235474 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-ner This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0614 - Precision: 0.9374 - Recall: 0.9509 - F1: 0.9441 - Accuracy: 0.9864 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.089 | 1.0 | 1756 | 0.0749 | 0.9104 | 0.9280 | 0.9191 | 0.9812 | | 0.0331 | 2.0 | 3512 | 0.0614 | 0.9299 | 0.9470 | 0.9384 | 0.9858 | | 0.0169 | 3.0 | 5268 | 0.0614 | 0.9374 | 0.9509 | 0.9441 | 0.9864 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
BigSalmon/GPT2HardArticleEasyArticle
[ "pytorch", "jax", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('KatrinaGal/sd-class-butterflies-32') image = pipeline().images[0] image ```
BigSalmon/GPTHeHe
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - clinc_oos metrics: - accuracy model-index: - name: distilbert-base-uncased-distilled-clinc results: - task: name: Text Classification type: text-classification dataset: name: clinc_oos type: clinc_oos config: plus split: train args: plus metrics: - name: Accuracy type: accuracy value: 0.9467741935483871 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-distilled-clinc This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the clinc_oos dataset. It achieves the following results on the evaluation set: - Loss: 0.2082 - Accuracy: 0.9468 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 9 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 318 | 1.1524 | 0.7265 | | 1.4545 | 2.0 | 636 | 0.5695 | 0.8639 | | 1.4545 | 3.0 | 954 | 0.3365 | 0.9148 | | 0.5287 | 4.0 | 1272 | 0.2569 | 0.9326 | | 0.2676 | 5.0 | 1590 | 0.2310 | 0.9397 | | 0.2676 | 6.0 | 1908 | 0.2184 | 0.9458 | | 0.2079 | 7.0 | 2226 | 0.2121 | 0.9448 | | 0.1885 | 8.0 | 2544 | 0.2094 | 0.9461 | | 0.1885 | 9.0 | 2862 | 0.2082 | 0.9468 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
BigSalmon/GPTNeo350MInformalToFormalLincoln
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: bert-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 config: conll2003 split: train args: conll2003 metrics: - name: Precision type: precision value: 0.9339513325608343 - name: Recall type: recall value: 0.9495119488387749 - name: F1 type: f1 value: 0.9416673620963031 - name: Accuracy type: accuracy value: 0.9859745687878966 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-ner This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0641 - Precision: 0.9340 - Recall: 0.9495 - F1: 0.9417 - Accuracy: 0.9860 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0887 | 1.0 | 1756 | 0.0753 | 0.9149 | 0.9318 | 0.9233 | 0.9816 | | 0.033 | 2.0 | 3512 | 0.0647 | 0.9304 | 0.9493 | 0.9398 | 0.9858 | | 0.0176 | 3.0 | 5268 | 0.0641 | 0.9340 | 0.9495 | 0.9417 | 0.9860 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
BigSalmon/GPTNeo350MInformalToFormalLincoln3
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="Convolution/Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
BigSalmon/GPTNeo350MInformalToFormalLincoln5
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: uzroberta-sentiment-analysis results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # uzroberta-sentiment-analysis This model is a fine-tuned version of [rifkat/uztext-3Gb-BPE-Roberta](https://huggingface.co/rifkat/uztext-3Gb-BPE-Roberta) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.0473 - Accuracy: 0.8257 - F1: 0.8287 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 6 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.3411 | 1.0 | 1156 | 0.4061 | 0.8276 | 0.8303 | | 0.2319 | 2.0 | 2312 | 0.4074 | 0.8336 | 0.8359 | | 0.1629 | 3.0 | 3468 | 0.5250 | 0.8394 | 0.8416 | | 0.1082 | 4.0 | 4624 | 0.8780 | 0.8180 | 0.8219 | | 0.0712 | 5.0 | 5780 | 0.9741 | 0.8321 | 0.8349 | | 0.0537 | 6.0 | 6936 | 1.0473 | 0.8257 | 0.8287 | ### Framework versions - Transformers 4.27.3 - Pytorch 2.0.0+cu117 - Datasets 2.11.0 - Tokenizers 0.12.1
BigSalmon/InfillFormalLincoln
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 279.08 +/- 16.88 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BigSalmon/InformalToFormalLincoln15
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- license: openrail --- # ⚠️ Type of model/library unknown. # Feel free to open a Pull request # for integration of the huggingface model hub # into the corresponding library =)
BigSalmon/InformalToFormalLincoln16
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="mjschock/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
BigSalmon/InformalToFormalLincoln21
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: RegisGraptin/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
BigSalmon/InformalToFormalLincoln23
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### ANYTHING-MIDJOURNEY-V-4.1 Dreambooth model trained by Joeythemonster with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb) Sample pictures of this concept:
BigSalmon/InformalToFormalLincoln25
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: geocoder_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # geocoder_model This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2632 - Accuracy: {'accuracy': 0.9005447386872337} - F1: {'f1': 0.8323636363636362} ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------------------------------:|:--------------------------:| | 0.26 | 1.0 | 4636 | 0.2405 | {'accuracy': 0.8972547327544361} | {'f1': 0.827866630523177} | | 0.2069 | 2.0 | 9272 | 0.2632 | {'accuracy': 0.9005447386872337} | {'f1': 0.8323636363636362} | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Tokenizers 0.13.2
BigSalmon/MrLincoln12
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: ppo results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 286.28 +/- 15.81 name: mean_reward verified: false --- # **ppo** Agent playing **LunarLander-v2** This is a trained model of a **ppo** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BigSalmon/MrLincoln13
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - spacy - token-classification language: - en model-index: - name: en_engagement_spl_RoBERTa_acad results: - task: name: NER type: token-classification metrics: - name: NER Precision type: precision value: 0.0 - name: NER Recall type: recall value: 0.0 - name: NER F Score type: f_score value: 0.0 - task: name: TAG type: token-classification metrics: - name: TAG (XPOS) Accuracy type: accuracy value: 0.0 - task: name: LEMMA type: token-classification metrics: - name: Lemma Accuracy type: accuracy value: 0.0 - task: name: UNLABELED_DEPENDENCIES type: token-classification metrics: - name: Unlabeled Attachment Score (UAS) type: f_score value: 0.0 - task: name: LABELED_DEPENDENCIES type: token-classification metrics: - name: Labeled Attachment Score (LAS) type: f_score value: 0.0 - task: name: SENTS type: token-classification metrics: - name: Sentences F-Score type: f_score value: 0.8508399109 --- | Feature | Description | | --- | --- | | **Name** | `en_engagement_spl_RoBERTa_acad` | | **Version** | `0.6.0` | | **spaCy** | `>=3.4.4,<3.5.0` | | **Default Pipeline** | `transformer`, `parser`, `tagger`, `ner`, `attribute_ruler`, `lemmatizer`, `trainable_transformer`, `spancat` | | **Components** | `transformer`, `parser`, `tagger`, `ner`, `attribute_ruler`, `lemmatizer`, `trainable_transformer`, `spancat` | | **Vectors** | 0 keys, 0 unique vectors (0 dimensions) | | **Sources** | n/a | | **License** | n/a | | **Author** | [n/a]() | ### Label Scheme <details> <summary>View label scheme (124 labels for 4 components)</summary> | Component | Labels | | --- | --- | | **`parser`** | `ROOT`, `acl`, `acomp`, `advcl`, `advmod`, `agent`, `amod`, `appos`, `attr`, `aux`, `auxpass`, `case`, `cc`, `ccomp`, `compound`, `conj`, `csubj`, `csubjpass`, `dative`, `dep`, `det`, `dobj`, `expl`, `intj`, `mark`, `meta`, `neg`, `nmod`, `npadvmod`, `nsubj`, `nsubjpass`, `nummod`, `oprd`, `parataxis`, `pcomp`, `pobj`, `poss`, `preconj`, `predet`, `prep`, `prt`, `punct`, `quantmod`, `relcl`, `xcomp` | | **`tagger`** | `$`, `''`, `,`, `-LRB-`, `-RRB-`, `.`, `:`, `ADD`, `AFX`, `CC`, `CD`, `DT`, `EX`, `FW`, `HYPH`, `IN`, `JJ`, `JJR`, `JJS`, `LS`, `MD`, `NFP`, `NN`, `NNP`, `NNPS`, `NNS`, `PDT`, `POS`, `PRP`, `PRP$`, `RB`, `RBR`, `RBS`, `RP`, `SYM`, `TO`, `UH`, `VB`, `VBD`, `VBG`, `VBN`, `VBP`, `VBZ`, `WDT`, `WP`, `WP$`, `WRB`, `XX`, ```` | | **`ner`** | `CARDINAL`, `DATE`, `EVENT`, `FAC`, `GPE`, `LANGUAGE`, `LAW`, `LOC`, `MONEY`, `NORP`, `ORDINAL`, `ORG`, `PERCENT`, `PERSON`, `PRODUCT`, `QUANTITY`, `TIME`, `WORK_OF_ART` | | **`spancat`** | `MONOGLOSS`, `ENDORSE`, `ENDOPHORIC`, `ENTERTAIN`, `PRONOUNCE`, `DENY`, `COUNTER`, `JUSTIFYING`, `ATTRIBUTE`, `SOURCES`, `CITATION`, `CONCUR` | </details> ### Accuracy | Type | Score | | --- | --- | | `DEP_UAS` | 0.00 | | `DEP_LAS` | 0.00 | | `DEP_LAS_PER_TYPE` | 0.00 | | `SENTS_P` | 82.24 | | `SENTS_R` | 88.13 | | `SENTS_F` | 85.08 | | `TAG_ACC` | 0.00 | | `ENTS_F` | 0.00 | | `ENTS_P` | 0.00 | | `ENTS_R` | 0.00 | | `LEMMA_ACC` | 0.00 | | `SPANS_SC_F` | 72.96 | | `SPANS_SC_P` | 75.47 | | `SPANS_SC_R` | 70.61 | | `TRAINABLE_TRANSFORMER_LOSS` | 651.38 | | `SPANCAT_LOSS` | 93570.83 |
BigSalmon/MrLincoln2
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bert-large-uncased-finetuned-bert-large-uncase-p2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-large-uncased-finetuned-bert-large-uncase-p2 This model is a fine-tuned version of [bert-large-uncased](https://huggingface.co/bert-large-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0980 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 0.1081 | 1.0 | 5696 | 0.0977 | | 0.1008 | 2.0 | 11392 | 0.0980 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
BigSalmon/MrLincoln7
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- pipeline_tag: text-classification language: en license: apache-2.0 tags: - sentiment-analysis widget: - text: I am very happy. example_title: English --- Enter comments and find out the sentiment of the comment
BigSalmon/MrLincoln8
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: other tags: - generated_from_trainer model-index: - name: opt-350m results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # opt-350m This model is a fine-tuned version of [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Tokenizers 0.13.2
BigSalmon/ParaphraseParentheses
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 288.67 +/- 17.67 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BigSalmon/PhraseBerta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: mit tags: - generated_from_trainer model-index: - name: roberta-retrained-350k results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-retrained-350k This model is a fine-tuned version of [bitsanlp/roberta-retrained_100k](https://huggingface.co/bitsanlp/roberta-retrained_100k) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 12 ### Training results ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
BigSalmon/Points
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: Musha-the-Yusha/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
BigSalmon/T52
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
8
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: ohss117/bert-finetuned-ner_v3 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # ohss117/bert-finetuned-ner_v3 This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.0106 - Validation Loss: 0.0562 - Epoch: 4 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 4390, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 0.1762 | 0.0682 | 0 | | 0.0449 | 0.0563 | 1 | | 0.0263 | 0.0554 | 2 | | 0.0160 | 0.0547 | 3 | | 0.0106 | 0.0562 | 4 | ### Framework versions - Transformers 4.25.1 - TensorFlow 2.10.1 - Datasets 2.8.0 - Tokenizers 0.13.2
BigSalmon/TS3
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible", "has_space" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: - en tags: - webgpt - regression - reward-model license: "apache-2.0" datasets: - openai/webgpt_comparisons metrics: - accuracy --- # Reward Model pretrained on openai/webgpt_comparison Reward model finetuned from existing pretrain model. Things that aligned with the orignal papers * Overfits easily using rank loss * Small learning rate Different from the papers * Small model performs bad due to lack of world knowledge, since the validation accuracy doesn't even reach 60%. OpenAI RM had 6B parameters. * Train using a 80-20 train-validation split on torch AMP settings Other models I had tried * bloomz-560m : embedding size doesn't worth the training, since this dataset only contain english prompt * gpt2-large : not stable * gpt2-base : not stable # Performance on validation split | model | val acc | val loss (rank loss) | |---|---|---| | [roberta-base](https://huggingface.co/theblackcat102/roberta-base-webgpt-rm) | 56.21 | 0.71 | | [roberta-large](https://huggingface.co/theblackcat102/roberta-large-webgpt-rm) | 57.89 | 0.67 | | [electra-base](https://huggingface.co/theblackcat102/electra-base-webgpt-rm) | 57.02 | 0.70 | | [electra-large](https://huggingface.co/theblackcat102/electra-large-webgpt-rm) | 58.75 | 0.69 | Tensorboard logs are located under runs/ # Note: * You will have to reweight this model output such that the mean rewards equals to 0
Bilz/DialoGPT-small-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - en license: creativeml-openrail-m tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers inference: true --- # Smelon.com备份 # Anything备份版 # Anything V3 欢迎使用 Anything V3 - weebs 的潜在扩散模型。该模型旨在通过几个提示生成高质量、高度详细的动漫风格。与其他动漫风格的 Stable Diffusion 模型一样,它也支持 danbooru 标签生成图像。 例如 **_1girl, white hair, golden eyes, beautiful eyes, detail, flower meadow, cumulonimbus clouds, lighting, detailed sky, garden_** ## 🧨 Diffusers 该模型可以像任何其他稳定扩散模型一样使用。想要查询更多的信息, 请看 [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion). 您还可以将模型导出到 [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx) [MPS](https://huggingface.co/docs/diffusers/optimization/mps) 或者 [FLAX/JAX](). ```python from diffusers import StableDiffusionPipeline import torch model_id = "Linaqruf/anything-v3.0" branch_name= "diffusers" pipe = StableDiffusionPipeline.from_pretrained(model_id, revision=branch_name, torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "pikachu" image = pipe(prompt).images[0] image.save("./pikachu.png") ``` ## 例子 以下是使用此模型生成的一些图像示例: **动漫女孩:** ![Anime Girl](https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/1girl.png) ``` 1girl, brown hair, green eyes, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden Steps: 50, Sampler: DDIM, CFG scale: 12 ``` **动漫男孩:** ![Anime Boy](https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/1boy.png) ``` 1boy, medium hair, blonde hair, blue eyes, bishounen, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden Steps: 50, Sampler: DDIM, CFG scale: 12 ``` **风景:** ![Scenery](https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/scenery.png) ``` scenery, shibuya tokyo, post-apocalypse, ruins, rust, sky, skyscraper, abandoned, blue sky, broken window, building, cloud, crane machine, outdoors, overgrown, pillar, sunset Steps: 50, Sampler: DDIM, CFG scale: 12 ``` ## 许可证 该模型是开放访问的,所有人都可以使用,CreativeML OpenRAIL-M 许可证进一步指定了权利和使用。 CreativeML OpenRAIL许可证规定。 1. 你不能使用该模型来故意生产或分享非法或有害的产出或内容。 2. 作者对你产生的产出没有任何权利,你可以自由使用它们,并对它们的使用负责,不得违反许可证中的规定。 3. 你可以重新分配权重,并在商业上和/或作为服务使用该模型。如果你这样做,请注意你必须包括与许可证中相同的使用限制,并将CreativeML OpenRAIL-M的副本分享给你的所有用户(请完全和仔细地阅读许可证) [请在此阅读完整的许可证](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
BinksSachary/DialoGPT-small-shaxx
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: creativeml-openrail-m tags: - pytorch - diffusers - stable-diffusion - text-to-image - diffusion-models-class - dreambooth-hackathon - food widget: - text: A photo of cmexsks drink with ice cream on top --- # DreamBooth model for the cmexsks concept trained by shahp7575 on the shahp7575/chemex dataset. This is a Stable Diffusion model fine-tuned on the cmexsks concept with DreamBooth. It can be used by modifying the `instance_prompt`: **a photo of cmexsks drink** This model was created as part of the DreamBooth Hackathon 🔥. Visit the [organisation page](https://huggingface.co/dreambooth-hackathon) for instructions on how to take part! ## Description This is a Stable Diffusion model fine-tuned on `drink` images for the food theme. ## Usage ```python from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained('shahp7575/chemex-coffee') image = pipeline().images[0] image ```
BlindMan820/Sarcastic-News-Headlines
[ "pytorch", "distilbert", "text-classification", "English", "dataset:Kaggle Dataset", "transformers", "Text", "Sequence-Classification", "Sarcasm", "DistilBert" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: 20NG_BERT_1E results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 20NG_BERT_1E This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.1 - Datasets 2.8.0 - Tokenizers 0.13.2
Bloodwarrior/Chikfalay
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 268.86 +/- 11.55 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BobBraico/distilbert-base-uncased-finetuned-imdb-accelerate
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - vi license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 model-index: - name: HuyenNguyen results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # HuyenNguyen This model is a fine-tuned version of [Huyen2310/FPT-S15000](https://huggingface.co/Huyen2310/FPT-S15000) on the Common Voice 11.0 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 450 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
Brendan/cse244b-hw2-roberta
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Paul123321/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Broadus20/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="hungchiayu/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Brokette/projetCS
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: 20NG_ELECTRA_5E results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 20NG_ELECTRA_5E This model is a fine-tuned version of [google/electra-small-discriminator](https://huggingface.co/google/electra-small-discriminator) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.8534 - Accuracy: 0.1733 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.9938 | 0.07 | 50 | 2.9881 | 0.1067 | | 2.9879 | 0.14 | 100 | 2.9852 | 0.1133 | | 2.9842 | 0.21 | 150 | 2.9796 | 0.1333 | | 2.9722 | 0.28 | 200 | 2.9653 | 0.1733 | | 2.9559 | 0.35 | 250 | 2.9458 | 0.18 | | 2.9368 | 0.42 | 300 | 2.9257 | 0.1667 | | 2.9234 | 0.49 | 350 | 2.9101 | 0.18 | | 2.9094 | 0.56 | 400 | 2.8926 | 0.1733 | | 2.898 | 0.64 | 450 | 2.8810 | 0.18 | | 2.8941 | 0.71 | 500 | 2.8715 | 0.18 | | 2.8742 | 0.78 | 550 | 2.8646 | 0.1533 | | 2.8765 | 0.85 | 600 | 2.8597 | 0.1867 | | 2.8738 | 0.92 | 650 | 2.8555 | 0.1733 | | 2.866 | 0.99 | 700 | 2.8534 | 0.1733 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.1 - Datasets 2.8.0 - Tokenizers 0.13.2
Brona/poc_de
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: 20NG_ALBERT_5E results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 20NG_ALBERT_5E This model is a fine-tuned version of [albert-base-v2](https://huggingface.co/albert-base-v2) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.2209 - Accuracy: 0.6067 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.8602 | 0.07 | 50 | 2.5794 | 0.2133 | | 2.3635 | 0.14 | 100 | 2.0956 | 0.38 | | 2.1526 | 0.21 | 150 | 1.9011 | 0.4467 | | 1.9014 | 0.28 | 200 | 1.6340 | 0.5067 | | 1.6736 | 0.35 | 250 | 1.5457 | 0.5467 | | 1.5563 | 0.42 | 300 | 1.5041 | 0.5533 | | 1.4338 | 0.49 | 350 | 1.3933 | 0.5933 | | 1.3348 | 0.56 | 400 | 1.4123 | 0.54 | | 1.2879 | 0.64 | 450 | 1.3352 | 0.6333 | | 1.2864 | 0.71 | 500 | 1.3027 | 0.62 | | 1.2162 | 0.78 | 550 | 1.2734 | 0.6267 | | 1.1786 | 0.85 | 600 | 1.2695 | 0.5933 | | 1.1702 | 0.92 | 650 | 1.2379 | 0.5933 | | 1.2338 | 0.99 | 700 | 1.2209 | 0.6067 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.1 - Datasets 2.8.0 - Tokenizers 0.13.2
Bryanwong/wangchanberta-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: akghxhs55/ppo-huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Buntan/xlm-roberta-base-finetuned-marc-en
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: cc-by-nc-nd-4.0 language: - en tags: - stable-diffusion - text-to-image - diffusers --- This model is named "Cinematic Diffusion". It has been trained using Stable Diffusion 1.5 to generate cinematic images. To utilize it, you must include the keyword "syberart" at the beginning of your prompt. This model performs best in the 16:9 aspect ratio, although it can also produce good results in a square format. It is well-suited for creating 'screengrabs' of movies from a variety of genres including historical, sci-fi, fantasy, spy, horror, western, mystery, comedy, superhero, anime, and more. It can also be used for creating realistic portraits, landscapes, and other types of images. For optimal results, it is recommended to use the default settings on Automatic 1111. It works on SD 1.5 and regular SD 2.1 512px Sample images ![00035-3537075825-syberart Create a dramatic and action-packed portrait of a young woman in full combat gear, armed and ready to fight against the.png](https://s3.amazonaws.com/moonup/production/uploads/1672316746436-634fe331cfefce6e57796151.png) ![00039-2376361081-syberart Create a dramatic and action-packed portrait of a young woman in full combat gear, armed and ready to fight against the.png](https://s3.amazonaws.com/moonup/production/uploads/1672316746444-634fe331cfefce6e57796151.png) ![00049-3224348556-syberart Create a tense and dramatic screenshot of a group of people being interrogated by an alien species, set in the year 298.png](https://s3.amazonaws.com/moonup/production/uploads/1672316893401-634fe331cfefce6e57796151.png) ![00053-2601147244-syberart Create a tense and dramatic screenshot of a group of people being interrogated by a very weird alien species, set in th.png](https://s3.amazonaws.com/moonup/production/uploads/1672316893418-634fe331cfefce6e57796151.png) ![00065-3257977273-Create a powerful and emotive portrait of a woman as she leads a group of human rebels in the fight against the alien invasion.png](https://s3.amazonaws.com/moonup/production/uploads/1672316893399-634fe331cfefce6e57796151.png) ![00067-3257977275-Create a powerful and emotive portrait of a woman as she leads a group of human rebels in the fight against the alien invasion.png](https://s3.amazonaws.com/moonup/production/uploads/1672316893417-634fe331cfefce6e57796151.png) ![00036-2376361078-syberart Create a dramatic and action-packed portrait of a young woman in full combat gear, armed and ready to fight against the.png](https://s3.amazonaws.com/moonup/production/uploads/1672316415156-634fe331cfefce6e57796151.png) ![00048-3224348555-syberart Create a tense and dramatic screenshot of a group of people being interrogated by an alien species, set in the year 298.png](https://s3.amazonaws.com/moonup/production/uploads/1672316415116-634fe331cfefce6e57796151.png) ![00053-2601147244-syberart Create a tense and dramatic screenshot of a group of people being interrogated by a very weird alien species, set in alien attacker, with a look of fierce determination and bravery. The p.png](https://s3.amazonaws.com/moonup/production/uploads/1672316415143-634fe331cfefce6e57796151.png) ![00070-3390290167-Create a highly realistic and cinematic portrait of a woman as she stands tall and unyielding against the alien threat, her weap.png](https://s3.amazonaws.com/moonup/production/uploads/1672316893409-634fe331cfefce6e57796151.png)
CAMeL-Lab/bert-base-arabic-camelbert-ca-ner
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
85
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 320 with parameters: ``` {'batch_size': 4, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 10, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 3200, "warmup_steps": 320, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
CAMeL-Lab/bert-base-arabic-camelbert-ca-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
42
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('Thangaraj/sd-class-burger-64') image = pipeline().images[0] image ```
CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16,451
null
--- language: - en license: creativeml-openrail-m tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers inference: true duplicated_from: Linaqruf/anything-v3.0 --- # Anything V3 Welcome to Anything V3 - a latent diffusion model for weebs. This model is intended to produce high-quality, highly detailed anime style with just a few prompts. Like other anime-style Stable Diffusion models, it also supports danbooru tags to generate images. e.g. **_1girl, white hair, golden eyes, beautiful eyes, detail, flower meadow, cumulonimbus clouds, lighting, detailed sky, garden_** ## Gradio We support a [Gradio](https://github.com/gradio-app/gradio) Web UI to run Anything-V3.0: [Open in Spaces](https://huggingface.co/spaces/akhaliq/anything-v3.0) ## 🧨 Diffusers This model can be used just like any other Stable Diffusion model. For more information, please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion). You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX](). ```python from diffusers import StableDiffusionPipeline import torch model_id = "Linaqruf/anything-v3.0" branch_name= "diffusers" pipe = StableDiffusionPipeline.from_pretrained(model_id, revision=branch_name, torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "pikachu" image = pipe(prompt).images[0] image.save("./pikachu.png") ``` ## Examples Below are some examples of images generated using this model: **Anime Girl:** ![Anime Girl](https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/1girl.png) ``` 1girl, brown hair, green eyes, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden Steps: 50, Sampler: DDIM, CFG scale: 12 ``` **Anime Boy:** ![Anime Boy](https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/1boy.png) ``` 1boy, medium hair, blonde hair, blue eyes, bishounen, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden Steps: 50, Sampler: DDIM, CFG scale: 12 ``` **Scenery:** ![Scenery](https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/scenery.png) ``` scenery, shibuya tokyo, post-apocalypse, ruins, rust, sky, skyscraper, abandoned, blue sky, broken window, building, cloud, crane machine, outdoors, overgrown, pillar, sunset Steps: 50, Sampler: DDIM, CFG scale: 12 ``` ## License This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
CAMeL-Lab/bert-base-arabic-camelbert-ca-sentiment
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
73
null
--- tags: - FrozenLake-v1-8x8-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-8x8-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-8x8-no_slippery type: FrozenLake-v1-8x8-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="mo-shadfar/q-FrozenLake-v1-8x8-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
CAMeL-Lab/bert-base-arabic-camelbert-da-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Nishant91/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
CAMeL-Lab/bert-base-arabic-camelbert-da
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
449
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids library_name: ml-agents --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Write your model_id: akanametov/MLAgents-Pyramids 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
CAMeL-Lab/bert-base-arabic-camelbert-msa-half
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
null
--- license: cc-by-4.0 metrics: - bleu4 - meteor - rouge-l - bertscore - moverscore language: es datasets: - lmqg/qg_esquad pipeline_tag: text2text-generation tags: - answer extraction widget: - text: "<hl> En la diáspora somalí, múltiples eventos islámicos de recaudación de fondos se llevan a cabo cada año en ciudades como Birmingham, Londres, Toronto y Minneapolis, donde los académicos y profesionales somalíes dan conferencias y responden preguntas de la audiencia. <hl> El propósito de estos eventos es recaudar dinero para nuevas escuelas o universidades en Somalia, para ayudar a los somalíes que han sufrido como consecuencia de inundaciones y / o sequías, o para reunir fondos para la creación de nuevas mezquitas como." example_title: "Answering Extraction Example 1" - text: "<hl> Los estudiosos y los histori a dores están divididos en cuanto a qué evento señala el final de la era helenística. <hl> El período helenístico se puede ver que termina con la conquista final del corazón griego por Roma en 146 a. C. tras la guerra aquea, con la derrota final del reino ptolemaico en la batalla de Actium en 31 a. Helenístico se distingue de helénico en que el primero abarca toda la esfera de influencia griega antigua directa, mientras que el segundo se refiere a la propia Grecia." example_title: "Answering Extraction Example 2" model-index: - name: lmqg/mt5-small-esquad-ae results: - task: name: Text2text Generation type: text2text-generation dataset: name: lmqg/qg_esquad type: default args: default metrics: - name: BLEU4 (Answer Extraction) type: bleu4_answer_extraction value: 24.92 - name: ROUGE-L (Answer Extraction) type: rouge_l_answer_extraction value: 48.75 - name: METEOR (Answer Extraction) type: meteor_answer_extraction value: 41.91 - name: BERTScore (Answer Extraction) type: bertscore_answer_extraction value: 89.86 - name: MoverScore (Answer Extraction) type: moverscore_answer_extraction value: 80.26 - name: AnswerF1Score (Answer Extraction) type: answer_f1_score__answer_extraction value: 73.93 - name: AnswerExactMatch (Answer Extraction) type: answer_exact_match_answer_extraction value: 56.14 --- # Model Card of `lmqg/mt5-small-esquad-ae` This model is fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) for answer extraction on the [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) (dataset_name: default) via [`lmqg`](https://github.com/asahi417/lm-question-generation). ### Overview - **Language model:** [google/mt5-small](https://huggingface.co/google/mt5-small) - **Language:** es - **Training data:** [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) (default) - **Online Demo:** [https://autoqg.net/](https://autoqg.net/) - **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation) - **Paper:** [https://arxiv.org/abs/2210.03992](https://arxiv.org/abs/2210.03992) ### Usage - With [`lmqg`](https://github.com/asahi417/lm-question-generation#lmqg-language-model-for-question-generation-) ```python from lmqg import TransformersQG # initialize model model = TransformersQG(language="es", model="lmqg/mt5-small-esquad-ae") # model prediction answers = model.generate_a("a noviembre , que es también la estación lluviosa.") ``` - With `transformers` ```python from transformers import pipeline pipe = pipeline("text2text-generation", "lmqg/mt5-small-esquad-ae") output = pipe("<hl> En la diáspora somalí, múltiples eventos islámicos de recaudación de fondos se llevan a cabo cada año en ciudades como Birmingham, Londres, Toronto y Minneapolis, donde los académicos y profesionales somalíes dan conferencias y responden preguntas de la audiencia. <hl> El propósito de estos eventos es recaudar dinero para nuevas escuelas o universidades en Somalia, para ayudar a los somalíes que han sufrido como consecuencia de inundaciones y / o sequías, o para reunir fondos para la creación de nuevas mezquitas como.") ``` ## Evaluation - ***Metric (Answer Extraction)***: [raw metric file](https://huggingface.co/lmqg/mt5-small-esquad-ae/raw/main/eval/metric.first.answer.paragraph_sentence.answer.lmqg_qg_esquad.default.json) | | Score | Type | Dataset | |:-----------------|--------:|:--------|:-----------------------------------------------------------------| | AnswerExactMatch | 56.14 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) | | AnswerF1Score | 73.93 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) | | BERTScore | 89.86 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) | | Bleu_1 | 36.7 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) | | Bleu_2 | 31.79 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) | | Bleu_3 | 28.08 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) | | Bleu_4 | 24.92 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) | | METEOR | 41.91 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) | | MoverScore | 80.26 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) | | ROUGE_L | 48.75 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) | ## Training hyperparameters The following hyperparameters were used during fine-tuning: - dataset_path: lmqg/qg_esquad - dataset_name: default - input_types: ['paragraph_sentence'] - output_types: ['answer'] - prefix_types: None - model: google/mt5-small - max_length: 512 - max_length_output: 32 - epoch: 13 - batch: 32 - lr: 0.0005 - fp16: False - random_seed: 1 - gradient_accumulation_steps: 2 - label_smoothing: 0.15 The full configuration can be found at [fine-tuning config file](https://huggingface.co/lmqg/mt5-small-esquad-ae/raw/main/trainer_config.json). ## Citation ``` @inproceedings{ushio-etal-2022-generative, title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration", author = "Ushio, Asahi and Alva-Manchego, Fernando and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", month = dec, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-pos-glf
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 253.57 +/- 14.93 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-quarter
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### The-Owl-House-Diffusion Dreambooth model trained by Theodora527 with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb) Use "tohdana" as trigger token. So something like "tohdana, a close up of an elf girl", "tohdana, a castle in middle of a swamp" ![0](https://huggingface.co/Theodora527/the-owl-house-diffusion/resolve/main/sample_images/The_Owl_House_Diffusion.png)
CAMeL-Lab/bert-base-arabic-camelbert-msa-sentiment
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
574
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="akgeni/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-sixteenth
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: smi-egor/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
CBreit00/DialoGPT_small_Rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy widget: - '全国高校科研质量排名,清华北大浙大无缘前五。' model-index: - name: classification_tnews_arg results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # classification_tnews_arg This model is a fine-tuned version of [hfl/chinese-roberta-wwm-ext](https://huggingface.co/hfl/chinese-roberta-wwm-ext) on an augmented small subset of TNEWS dataset. It achieves the following results on the evaluation set: - Loss: 1.1794 - Accuracy: 0.7133 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 10 - eval_batch_size: 10 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.3992 | 1.0 | 150 | 1.2185 | 0.66 | | 0.1415 | 2.0 | 300 | 1.1324 | 0.6933 | | 0.0376 | 3.0 | 450 | 1.1794 | 0.7133 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
CLAck/en-km
[ "pytorch", "marian", "text2text-generation", "transformers", "translation", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 295.51 +/- 20.94 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) ```python import gym from huggingface_sb3 import load_from_hub, package_to_hub, push_to_hub from huggingface_hub import notebook_login # To log to our Hugging Face account to be able to upload models to the Hub. from stable_baselines3 import PPO from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.env_util import make_vec_env # Create the environment env = make_vec_env('LunarLander-v2', n_envs=16) model = PPO( policy = 'MlpPolicy', env = env, n_steps = 1024, batch_size = 64, n_epochs = 4, gamma = 0.999, gae_lambda = 0.98, ent_coef = 0.01, learning_rate=0.0004, verbose=1) # Train it for 3,000,000 timesteps model.learn(total_timesteps=3000000) # Save the model model_name = "ppo-LunarLander-v2" model.save(model_name) ... ```
CLTL/icf-domains
[ "pytorch", "roberta", "nl", "transformers", "license:mit", "text-classification" ]
text-classification
{ "architectures": [ "RobertaForMultiLabelSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
35
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: taxi_v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.48 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="rishipatel92/taxi_v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
CLTL/icf-levels-fac
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- language: eng license: mit dataset: Logical Fallacy Dataset --- # distilbert-base-fallacy-classification This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the [Logical Fallacy Dataset](https://github.com/causalNLP/logical-fallacy). ## Model description The model is fine-tuned for text classification of logical fallacies. There are a total of 14 classes: ad hominem, ad populum, appeal to emotion, circular reasoning, equivocation, fallacy of credibility, fallacy of extension, fallacy of logic, fallacy of relevance, false causality, false dilemma, faulty generalization, intentional, and miscellaneous. ## Example Pipeline ```python from transformers import pipeline text = "We know that the earth is flat because it looks and feels flat." model_path = "q3fer/distilbert-base-fallacy-classification" pipe = pipeline("text-classification", model=model_path, tokenizer=model_path) pipe(text) ``` ``` [{'label': 'circular reasoning', 'score': 0.951125979423523}] ``` ## Full Classification Example ```python import torch from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification model = AutoModelForSequenceClassification.from_pretrained("q3fer/distilbert-base-fallacy-classification") tokenizer = AutoTokenizer.from_pretrained("q3fer/distilbert-base-fallacy-classification") text = "We know that the earth is flat because it looks and feels flat." inputs = tokenizer(text, return_tensors='pt') with torch.no_grad(): logits = model(**inputs) scores = logits[0][0] scores = torch.nn.Softmax(dim=0)(scores) _, ranking = torch.topk(scores, k=scores.shape[0]) ranking = ranking.tolist() results = [f"{i+1}) {model.config.id2label[ranking[i]]} {scores[ranking[i]]:.4f}" for i in range(scores.shape[0])] print('\n'.join(results)) ``` ``` 1) circular reasoning 0.9511 2) fallacy of logic 0.0154 3) equivocation 0.0080 4) fallacy of credibility 0.0069 5) ad populum 0.0028 6) fallacy of extension 0.0025 7) intentional 0.0024 8) faulty generalization 0.0021 9) appeal to emotion 0.0021 10) fallacy of relevance 0.0019 11) false dilemma 0.0017 12) ad hominem 0.0013 13) false causality 0.0012 14) miscellaneous 0.0004 ``` ## Training and evaluation data The [Logical Fallacy Dataset](https://github.com/causalNLP/logical-fallacy) is used for training and evaluation. Jin, Z., Lalwani, A., Vaidhya, T., Shen, X., Ding, Y., Lyu, Z., ... Schölkopf, B. (2022). Logical Fallacy Detection. arXiv. https://doi.org/10.48550/arxiv.2202.13758 ## Training procedure The following hyperparameters were used during fine-tuning: - learning_rate : 2e-5 - warmup steps : 0 - batch_size: 16 - num_epochs: 8 - batches_per_epoch: 122 - total_train_steps: 976
CTBC/ATS
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### Model Dreambooth concept Ira-Olympus-v3-3600 được train bởi hr16 bằng [Shinja Zero SoTA DreamBooth_Stable_Diffusion](https://colab.research.google.com/drive/1G7qx6M_S1PDDlsWIMdbZXwdZik6sUlEh) notebook <br> Test concept bằng [Shinja Zero no Notebook](https://colab.research.google.com/drive/1Hp1ZIjPbsZKlCtomJVmt2oX7733W44b0) <br> Hoặc test bằng `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb) Ảnh mẫu của concept: WIP
CallumRai/HansardGPT2
[ "pytorch", "jax", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="ankandrew/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
CalvinHuang/mt5-small-finetuned-amazon-en-es
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "transformers", "summarization", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
summarization
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: CyantifiCQ/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Cameron/BERT-mdgender-convai-ternary
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
38
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: Q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="ankandrew/Q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Cameron/BERT-mdgender-wizard
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 250.25 +/- 34.78 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Cameron/BERT-rtgender-opgender-annotations
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 261.75 +/- 26.89 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```