modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
Davlan/xlm-roberta-base-finetuned-chichewa
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2023-01-01T15:57:25Z
--- library_name: stable-baselines3 tags: - HalfCheetahBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: HalfCheetahBulletEnv-v0 type: HalfCheetahBulletEnv-v0 metrics: - type: mean_reward value: -798.25 +/- 732.56 name: mean_reward verified: false --- # **A2C** Agent playing **HalfCheetahBulletEnv-v0** This is a trained model of a **A2C** agent playing **HalfCheetahBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Davlan/xlm-roberta-base-finetuned-english
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - generated_from_trainer metrics: - wer model-index: - name: libri-alpha-0.5-Temp-1-processor-change results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # libri-alpha-0.5-Temp-1-processor-change This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset. It achieves the following results on the evaluation set: - Loss: 91.9750 - Wer: 0.1187 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 569.0646 | 0.75 | 100 | 175.3549 | 0.1589 | | 440.3574 | 1.49 | 200 | 146.3654 | 0.1640 | | 398.2328 | 2.24 | 300 | 128.7082 | 0.1562 | | 357.5816 | 2.99 | 400 | 117.7871 | 0.1495 | | 344.3317 | 3.73 | 500 | 111.0376 | 0.1417 | | 331.0486 | 4.48 | 600 | 106.5447 | 0.1398 | | 321.4498 | 5.22 | 700 | 105.1187 | 0.1363 | | 305.8177 | 5.97 | 800 | 103.2541 | 0.1365 | | 304.2076 | 6.72 | 900 | 105.3060 | 0.1385 | | 297.746 | 7.46 | 1000 | 101.1069 | 0.1307 | | 285.7675 | 8.21 | 1100 | 99.9853 | 0.1303 | | 284.6546 | 8.96 | 1200 | 98.5235 | 0.1292 | | 281.672 | 9.7 | 1300 | 97.8004 | 0.1295 | | 281.0029 | 10.45 | 1400 | 96.9385 | 0.1278 | | 283.847 | 11.19 | 1500 | 96.3700 | 0.1275 | | 274.4053 | 11.94 | 1600 | 95.9557 | 0.1281 | | 271.8855 | 12.69 | 1700 | 95.5764 | 0.1250 | | 275.416 | 13.43 | 1800 | 95.0451 | 0.1266 | | 267.7354 | 14.18 | 1900 | 94.6620 | 0.1242 | | 273.9816 | 14.93 | 2000 | 95.0889 | 0.1241 | | 263.9812 | 15.67 | 2100 | 94.4231 | 0.1241 | | 258.6033 | 16.42 | 2200 | 93.8011 | 0.1225 | | 260.4275 | 17.16 | 2300 | 94.0336 | 0.1210 | | 258.7905 | 17.91 | 2400 | 93.4633 | 0.1216 | | 255.6817 | 18.66 | 2500 | 93.0448 | 0.1212 | | 252.3298 | 19.4 | 2600 | 92.9945 | 0.1216 | | 250.5598 | 20.15 | 2700 | 92.9767 | 0.1200 | | 249.4384 | 20.9 | 2800 | 93.1555 | 0.1203 | | 255.6291 | 21.64 | 2900 | 92.7784 | 0.1208 | | 249.5222 | 22.39 | 3000 | 92.5792 | 0.1203 | | 250.498 | 23.13 | 3100 | 92.4570 | 0.1205 | | 252.2656 | 23.88 | 3200 | 92.3685 | 0.1199 | | 248.1438 | 24.63 | 3300 | 92.3731 | 0.1198 | | 240.2946 | 25.37 | 3400 | 92.1875 | 0.1192 | | 256.2254 | 26.12 | 3500 | 91.9586 | 0.1192 | | 248.603 | 26.87 | 3600 | 91.9599 | 0.1191 | | 252.9337 | 27.61 | 3700 | 92.1080 | 0.1189 | | 250.9757 | 28.36 | 3800 | 92.1051 | 0.1188 | | 248.7415 | 29.1 | 3900 | 91.9927 | 0.1187 | | 248.7394 | 29.85 | 4000 | 91.9750 | 0.1187 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1 - Datasets 2.7.1 - Tokenizers 0.11.0
Davlan/xlm-roberta-base-finetuned-hausa
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
234
null
--- tags: - generated_from_trainer metrics: - wer model-index: - name: no_distil_librispeech_100_clean_6_attention results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # no_distil_librispeech_100_clean_6_attention This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2333.5361 - Wer: 1.1878 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 8374.3469 | 0.75 | 100 | 2762.3215 | 1.0 | | 4303.0578 | 1.49 | 200 | 2503.2461 | 1.0 | | 4307.1169 | 2.24 | 300 | 2498.8477 | 1.0 | | 4236.7513 | 2.99 | 400 | 2489.2173 | 1.0 | | 4242.8606 | 3.73 | 500 | 2405.0710 | 1.0801 | | 4132.4353 | 4.48 | 600 | 2333.5361 | 1.1878 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1 - Datasets 2.7.0 - Tokenizers 0.11.0
Davlan/xlm-roberta-base-finetuned-igbo
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
68
null
--- license: mit tags: - generated_from_trainer model-index: - name: roberta-retrained-500k results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-retrained-500k This model is a fine-tuned version of [bitsanlp/roberta-retrained-350k](https://huggingface.co/bitsanlp/roberta-retrained-350k) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 16 ### Training results ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
Davlan/xlm-roberta-base-finetuned-swahili
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
40
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -177.15 +/- 20.55 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Davlan/xlm-roberta-base-finetuned-wolof
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - Breakout-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Breakout-v5 type: Breakout-v5 metrics: - type: mean_reward value: 586.40 +/- 280.74 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Breakout-v5** This is a trained model of a PPO agent playing Breakout-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/ppo_atari_envpool_xla_jax_scan.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[ppo_atari_envpool_xla_jax_scan]" python -m cleanrl_utils.enjoy --exp-name ppo_atari_envpool_xla_jax_scan --env-id Breakout-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/vwxyzjn/Breakout-v5-ppo_atari_envpool_xla_jax_scan-seed1/raw/main/ppo_atari_envpool_xla_jax_scan.py curl -OL https://huggingface.co/vwxyzjn/Breakout-v5-ppo_atari_envpool_xla_jax_scan-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/vwxyzjn/Breakout-v5-ppo_atari_envpool_xla_jax_scan-seed1/raw/main/poetry.lock poetry install --all-extras python ppo_atari_envpool_xla_jax_scan.py --track --save-model --upload-model --env-id Breakout-v5 --seed 1 ``` # Hyperparameters ```python {'anneal_lr': True, 'batch_size': 1024, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'ent_coef': 0.01, 'env_id': 'Breakout-v5', 'exp_name': 'ppo_atari_envpool_xla_jax_scan', 'gae_lambda': 0.95, 'gamma': 0.99, 'hf_entity': '', 'learning_rate': 0.00025, 'max_grad_norm': 0.5, 'minibatch_size': 256, 'norm_adv': True, 'num_envs': 8, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 9765, 'save_model': True, 'seed': 1, 'target_kl': None, 'torch_deterministic': True, 'total_timesteps': 10000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanRL'} ```
Davlan/xlm-roberta-base-finetuned-yoruba
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: ivi137/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Davlan/xlm-roberta-base-finetuned-zulu
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 199.49 +/- 69.02 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Dean/summarsiation
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Breakout-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Breakout-v5 type: Breakout-v5 metrics: - type: mean_reward value: 457.10 +/- 134.14 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Breakout-v5** This is a trained model of a PPO agent playing Breakout-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/ppo_atari_envpool_xla_jax_scan.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[ppo_atari_envpool_xla_jax_scan]" python -m cleanrl_utils.enjoy --exp-name ppo_atari_envpool_xla_jax_scan --env-id Breakout-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/vwxyzjn/Breakout-v5-ppo_atari_envpool_xla_jax_scan-seed3/raw/main/ppo_atari_envpool_xla_jax_scan.py curl -OL https://huggingface.co/vwxyzjn/Breakout-v5-ppo_atari_envpool_xla_jax_scan-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/vwxyzjn/Breakout-v5-ppo_atari_envpool_xla_jax_scan-seed3/raw/main/poetry.lock poetry install --all-extras python ppo_atari_envpool_xla_jax_scan.py --track --save-model --upload-model --env-id Breakout-v5 --seed 3 ``` # Hyperparameters ```python {'anneal_lr': True, 'batch_size': 1024, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'ent_coef': 0.01, 'env_id': 'Breakout-v5', 'exp_name': 'ppo_atari_envpool_xla_jax_scan', 'gae_lambda': 0.95, 'gamma': 0.99, 'hf_entity': '', 'learning_rate': 0.00025, 'max_grad_norm': 0.5, 'minibatch_size': 256, 'norm_adv': True, 'num_envs': 8, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 9765, 'save_model': True, 'seed': 3, 'target_kl': None, 'torch_deterministic': True, 'total_timesteps': 10000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanRL'} ```
Declan/CNN_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: creativeml-openrail-m tags: - pytorch - diffusers - stable-diffusion - text-to-image - diffusion-models-class - dreambooth-hackathon - wildcard datasets: akanametov/minions-dataset widget: - text: a photo of stuart minion on the Moon --- # DreamBooth model for the stuart concept trained by akanametov on the akanametov/minions-dataset dataset. This is a Stable Diffusion model fine-tuned on the stuart concept with DreamBooth. It can be used by modifying the `instance_prompt`: **a photo of stuart minion** This model was created as part of the DreamBooth Hackathon 🔥. Visit the [organisation page](https://huggingface.co/dreambooth-hackathon) for instructions on how to take part! ## Description This is a Stable Diffusion model fine-tuned on `minion` images for the wildcard theme. ## Usage ```python from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained('akanametov/stuart-minion') image = pipeline().images[0] image ```
Declan/FoxNews_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - yolov5 - yolo - vision - object-detection - pytorch library_name: yolov5 library_version: 7.0.6 inference: false datasets: - keremberke/forklift-object-detection model-index: - name: keremberke/yolov5m-forklift results: - task: type: object-detection dataset: type: keremberke/forklift-object-detection name: keremberke/forklift-object-detection split: validation metrics: - type: precision # since [email protected] is not available on hf.co/metrics value: 0.8515819366709647 # min: 0.0 - max: 1.0 name: [email protected] --- <div align="center"> <img width="640" alt="keremberke/yolov5m-forklift" src="https://huggingface.co/keremberke/yolov5m-forklift/resolve/main/sample_visuals.jpg"> </div> ### How to use - Install [yolov5](https://github.com/fcakyon/yolov5-pip): ```bash pip install -U yolov5 ``` - Load model and perform prediction: ```python import yolov5 # load model model = yolov5.load('keremberke/yolov5m-forklift') # set model parameters model.conf = 0.25 # NMS confidence threshold model.iou = 0.45 # NMS IoU threshold model.agnostic = False # NMS class-agnostic model.multi_label = False # NMS multiple labels per box model.max_det = 1000 # maximum number of detections per image # set image img = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # perform inference results = model(img, size=640) # inference with test time augmentation results = model(img, augment=True) # parse results predictions = results.pred[0] boxes = predictions[:, :4] # x1, y1, x2, y2 scores = predictions[:, 4] categories = predictions[:, 5] # show detection bounding boxes on image results.show() # save results into "results/" folder results.save(save_dir='results/') ``` - Finetune the model on your custom dataset: ```bash yolov5 train --data data.yaml --img 640 --batch 16 --weights keremberke/yolov5m-forklift --epochs 10 ``` **More models available at: [awesome-yolov5-models](https://github.com/keremberke/awesome-yolov5-models)**
Declan/FoxNews_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('parayiv/sd-class-butterflies-32') image = pipeline().images[0] image ```
Declan/HuffPost_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - espnet - audio - automatic-speech-recognition language: en datasets: - librispeech_100 license: cc-by-4.0 --- ## ESPnet2 ASR model ### `pyf98/librispeech_100_ctc_e_branchformer` This model was trained by Yifan Peng using librispeech_100 recipe in [espnet](https://github.com/espnet/espnet/). References: - [E-Branchformer: Branchformer with Enhanced merging for speech recognition (SLT 2022)](https://arxiv.org/abs/2210.00077) - [Branchformer: Parallel MLP-Attention Architectures to Capture Local and Global Context for Speech Recognition and Understanding (ICML 2022)](https://proceedings.mlr.press/v162/peng22a.html) ### Demo: How to use in ESPnet2 Follow the [ESPnet installation instructions](https://espnet.github.io/espnet/installation.html) if you haven't done that already. ```bash cd espnet git checkout 5fbaedd0555de4e205172d9e5b34a98cbf9d265e pip install -e . cd egs2/librispeech_100/asr1 ./run.sh --skip_data_prep false --skip_train true --download_model pyf98/librispeech_100_ctc_e_branchformer ``` <!-- Generated by scripts/utils/show_asr_result.sh --> # RESULTS ## Environments - date: `Sun Jan 1 15:05:07 CST 2023` - python version: `3.9.15 (main, Nov 24 2022, 14:31:59) [GCC 11.2.0]` - espnet version: `espnet 202211` - pytorch version: `pytorch 1.12.1` - Git hash: `b12e08c955276daa015cc40cf4f5977d87233db2` - Commit date: `Thu Dec 29 07:10:24 2022 -0500` ## asr_train_asr_ctc_e_branchformer_e12_raw_en_bpe5000_sp ### WER |dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err| |---|---|---|---|---|---|---|---|---| |decode_ctc_bs1_asr_model_valid.cer_ctc.ave/dev_clean|2703|54402|91.8|7.5|0.7|1.0|9.2|70.1| |decode_ctc_bs1_asr_model_valid.cer_ctc.ave/dev_other|2864|50948|80.4|17.4|2.2|2.8|22.4|87.8| |decode_ctc_bs1_asr_model_valid.cer_ctc.ave/test_clean|2620|52576|91.5|7.7|0.8|1.1|9.6|70.3| |decode_ctc_bs1_asr_model_valid.cer_ctc.ave/test_other|2939|52343|79.5|18.1|2.4|2.6|23.1|88.6| ### CER |dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err| |---|---|---|---|---|---|---|---|---| |decode_ctc_bs1_asr_model_valid.cer_ctc.ave/dev_clean|2703|288456|97.1|1.2|1.7|1.1|4.0|70.1| |decode_ctc_bs1_asr_model_valid.cer_ctc.ave/dev_other|2864|265951|91.2|4.5|4.3|3.0|11.8|87.8| |decode_ctc_bs1_asr_model_valid.cer_ctc.ave/test_clean|2620|281530|97.0|1.3|1.7|1.2|4.2|70.3| |decode_ctc_bs1_asr_model_valid.cer_ctc.ave/test_other|2939|272758|90.8|4.5|4.7|3.0|12.2|88.6| ### TER |dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err| |---|---|---|---|---|---|---|---|---| |decode_ctc_bs1_asr_model_valid.cer_ctc.ave/dev_clean|2703|69558|89.6|5.9|4.5|0.9|11.3|70.1| |decode_ctc_bs1_asr_model_valid.cer_ctc.ave/dev_other|2864|64524|77.9|14.8|7.2|3.0|25.1|87.8| |decode_ctc_bs1_asr_model_valid.cer_ctc.ave/test_clean|2620|66983|89.6|6.0|4.4|1.0|11.4|70.3| |decode_ctc_bs1_asr_model_valid.cer_ctc.ave/test_other|2939|66650|77.2|15.2|7.6|2.8|25.7|88.6| ## ASR config <details><summary>expand</summary> ``` config: /scratch/bbjs/peng6/espnet-arch/egs2/librispeech_100/asr1/conf/tuning/train_asr_ctc_e_branchformer_e12.yaml print_config: false log_level: INFO dry_run: false iterator_type: sequence output_dir: exp/asr_train_asr_ctc_e_branchformer_e12_raw_en_bpe5000_sp ngpu: 1 seed: 2022 num_workers: 4 num_att_plot: 3 dist_backend: nccl dist_init_method: env:// dist_world_size: null dist_rank: null local_rank: 0 dist_master_addr: null dist_master_port: null dist_launcher: null multiprocessing_distributed: false unused_parameters: false sharded_ddp: false cudnn_enabled: true cudnn_benchmark: false cudnn_deterministic: true collect_stats: false write_collected_feats: false max_epoch: 70 patience: null val_scheduler_criterion: - valid - loss early_stopping_criterion: - valid - loss - min best_model_criterion: - - valid - cer_ctc - min keep_nbest_models: 10 nbest_averaging_interval: 0 grad_clip: 5.0 grad_clip_type: 2.0 grad_noise: false accum_grad: 4 no_forward_run: false resume: true train_dtype: float32 use_amp: true log_interval: null use_matplotlib: true use_tensorboard: true create_graph_in_tensorboard: false use_wandb: false wandb_project: null wandb_id: null wandb_entity: null wandb_name: null wandb_model_log_interval: -1 detect_anomaly: false pretrain_path: null init_param: [] ignore_init_mismatch: false freeze_param: [] num_iters_per_epoch: null batch_size: 20 valid_batch_size: null batch_bins: 16000000 valid_batch_bins: null train_shape_file: - exp/asr_stats_raw_en_bpe5000_sp/train/speech_shape - exp/asr_stats_raw_en_bpe5000_sp/train/text_shape.bpe valid_shape_file: - exp/asr_stats_raw_en_bpe5000_sp/valid/speech_shape - exp/asr_stats_raw_en_bpe5000_sp/valid/text_shape.bpe batch_type: numel valid_batch_type: null fold_length: - 80000 - 150 sort_in_batch: descending sort_batch: descending multiple_iterator: false chunk_length: 500 chunk_shift_ratio: 0.5 num_cache_chunks: 1024 train_data_path_and_name_and_type: - - dump/raw/train_clean_100_sp/wav.scp - speech - kaldi_ark - - dump/raw/train_clean_100_sp/text - text - text valid_data_path_and_name_and_type: - - dump/raw/dev/wav.scp - speech - kaldi_ark - - dump/raw/dev/text - text - text allow_variable_data_keys: false max_cache_size: 0.0 max_cache_fd: 32 valid_max_cache_size: null optim: adam optim_conf: lr: 0.002 weight_decay: 1.0e-06 scheduler: warmuplr scheduler_conf: warmup_steps: 15000 token_list: - <blank> - <unk> - ▁THE - S - ▁AND - ▁OF - ▁TO - ▁A - ▁IN - ED - ▁I - ▁HE - ▁WAS - ▁THAT - ING - ▁IT - '''' - ▁HIS - ▁HAD - ▁WITH - ▁YOU - ▁FOR - T - ▁AS - ▁HER - LY - ▁NOT - ▁BUT - ▁SHE - ▁BE - D - E - ▁IS - ▁AT - ▁ON - ▁HIM - ▁THEY - ▁BY - ▁HAVE - Y - ▁MY - ▁SO - ▁ALL - ▁THIS - ▁WERE - ▁WHICH - ▁ME - ▁FROM - ▁ONE - ▁SAID - ▁WE - N - ER - ▁NO - ▁THERE - ▁WHEN - ▁AN - ▁THEIR - ▁OR - ▁WOULD - ▁WHO - ▁THEM - R - ▁IF - ▁WHAT - ▁ARE - ▁BEEN - ▁OUT - ▁UP - M - ▁WILL - ▁DO - ▁MAN - ▁COULD - C - ▁THEN - ▁INTO - ▁MORE - ▁SOME - ES - P - ▁VERY - ▁NOW - ▁YOUR - ▁LITTLE - ▁TIME - ▁ABOUT - ▁DID - ▁THAN - ▁LIKE - ▁HAS - L - G - AL - IN - ▁UPON - ▁CAN - ▁WELL - ▁OTHER - ▁OVER - US - ▁TWO - ▁ONLY - ▁ANY - ▁OUR - O - EN - RE - ▁MADE - U - ▁AFTER - ▁SEE - ▁S - ▁DOWN - ▁BEFORE - LL - ST - B - ▁OLD - ▁DAY - ▁MISS - ▁GREAT - ▁US - ▁KNOW - OR - ▁SUCH - ▁GOOD - ▁WAY - A - ▁THESE - ▁CAME - ▁UN - ▁SHOULD - ▁HOW - ▁MISTER - ▁GO - ▁MUCH - ▁WHERE - ▁MUST - ▁NEVER - ▁COME - ▁BACK - ION - 'ON' - ▁LONG - F - ▁AGAIN - ▁FIRST - LE - ▁MEN - ▁EVEN - NESS - ▁MIGHT - ▁OWN - ▁MAY - K - ▁HIMSELF - ▁SAY - ▁JUST - ▁THROUGH - ▁RE - ▁AM - ▁ITS - ▁WENT - ▁THOUGHT - ▁ - ▁DE - ▁MAKE - I - ▁HAND - ▁THINK - ▁HOUSE - ▁HERE - IC - H - ATION - ▁LIFE - IT - ▁EYES - ▁MOST - ▁WITHOUT - ▁TOO - ▁THOSE - ABLE - ▁EVERY - ▁DON - ▁MANY - ▁AWAY - ITY - VE - W - ▁STILL - ▁BEING - ▁C - ▁LAST - ▁NIGHT - ▁O - ▁HEAD - AN - ▁FOUND - ▁NOTHING - ▁YOUNG - ▁WHILE - ▁TAKE - ▁GET - ▁PEOPLE - RO - ▁OFF - ▁THOUGH - EST - ▁YET - ▁THREE - TH - ▁RIGHT - ▁UNDER - AR - ▁FACE - IES - ▁ROOM - ▁NEW - ▁SAW - RA - V - ▁ASKED - ▁TELL - ERS - ▁SAME - MENT - ▁HEART - LESS - ▁WORK - ▁PLACE - ▁ANOTHER - ▁EVER - ▁LEFT - ▁SHALL - ▁FATHER - ▁PUT - ▁ONCE - ▁TOOK - ▁LET - ▁ALWAYS - ▁SEEMED - ▁PART - IL - UR - ▁WHY - ▁TOLD - ▁GIVE - ▁LOVE - CE - ▁MIND - ▁LOOKED - ▁HEARD - ▁SOON - ▁LOOK - ▁MOTHER - ▁FAR - IVE - ▁BECAUSE - ▁HOME - OUS - ▁T - EL - ▁D - ▁SOMETHING - ▁SIDE - ▁KING - IS - ATE - ▁MOMENT - ENT - RY - ▁THINGS - ▁ST - ▁LIGHT - ▁FIND - ▁GOING - ▁THING - ▁WORLD - IR - AT - ▁WATER - ▁END - ▁DOOR - ISH - ▁KNEW - ▁WOMAN - ▁SIR - ▁EACH - RI - ▁HAVING - ▁AGAINST - ▁FEW - ▁E - ▁BEGAN - ▁BETTER - ▁YES - ▁NAME - ▁ENOUGH - ET - ▁HARD - ▁VOICE - ▁YEARS - ▁GOT - ▁WHOLE - ▁WHITE - ▁WANT - ▁GIRL - ▁DONE - ▁SEEN - ▁HUNDRED - ▁CALLED - ▁BETWEEN - ▁MORNING - FUL - AS - ▁FELT - TER - ▁KIND - X - CH - ▁HERSELF - ANT - ▁TOWARD - ▁HALF - ▁OH - ▁AMONG - ▁HOWEVER - ▁TURNED - ▁ALSO - ▁BOTH - ▁POOR - ▁PERHAPS - ▁REPLIED - ▁COURSE - UL - ▁QUITE - ▁REST - ▁DOES - ▁MYSELF - NG - LO - ANCE - ▁MA - ▁SET - ▁SMALL - ▁B - ▁SURE - ▁F - ▁GAVE - ▁PRESENT - ▁HIGH - ▁ALMO - ▁R - CK - ▁WHOM - ▁NEAR - ▁CARE - ▁WAR - ▁GOD - ▁TOGETHER - ▁SAT - ▁SHOW - TE - NE - ▁BEST - ▁UNTIL - ▁OPEN - ▁W - ▁FOUR - ▁DEAR - ▁HANDS - ▁WORDS - ▁SINCE - ▁LAND - ▁DIS - MAN - ▁ANYTHING - ▁FEET - ▁NEXT - ▁GENERAL - LING - ▁LAY - ▁NOR - ▁STOOD - ▁BLACK - ▁POWER - ▁BROUGHT - Z - IE - ▁ROUND - ▁BELIEVE - ▁LARGE - ▁ALONG - ▁HELP - ▁DAYS - ▁FIVE - ▁K - ▁HOPE - AM - ▁CO - ▁KEEP - ▁FULL - ▁WALK - ▁MASTER - ATED - ▁NATURE - ▁JOHN - ▁POINT - ▁DUR - ▁MATTER - ▁MONEY - ▁CHILD - ▁LOOKING - ▁RATHER - ▁AIR - IA - ▁P - ▁TWENTY - ▁FIRE - OL - ▁LESS - ▁SHORT - ▁PASSED - ▁INDEED - TY - ▁CASE - ▁WORD - ▁WISH - ▁COUNTRY - LED - ID - ▁BOY - ▁SOUND - ▁FORM - ▁CRIED - LA - ▁FRIEND - TON - ▁FACT - ▁UNCLE - ▁TAKEN - ▁AL - ▁TEN - IAN - ▁GONE - ▁SEA - ▁REASON - TING - ▁WHOSE - ▁OTHERS - AC - ▁LI - ▁DEATH - ▁CERTAIN - ▁ANSWERED - ▁THEMSELVES - ▁LADY - ▁STATE - ▁CAR - ▁WIFE - ▁THOUSAND - ▁TRUE - ▁BEHIND - AGE - ▁DOCTOR - ▁FEAR - ▁OFTEN - OM - ▁TILL - ▁HA - IOUS - ▁AROUND - IST - ▁SENT - ▁SPEAK - ▁WOMEN - ▁GROUND - VER - ENCE - NA - ▁TALK - ▁CHILDREN - TION - CO - MO - ▁HEAR - ▁ORDER - ▁LEAVE - ▁PRO - ▁ALREADY - ▁LA - ▁FINE - SE - ▁BA - PP - ▁THUS - AD - ▁NEED - ▁SIGHT - ▁CALL - ▁FELL - ▁MANNER - MP - ▁BECAME - UM - ▁WATCH - OW - ▁FOOT - ▁CANNOT - ▁BODY - ▁TOWN - ▁LIVE - INE - ▁RETURNED - ▁WONDER - MA - ▁G - UT - ▁CLOSE - UN - IM - ▁ALONE - ▁DIDN - ▁LORD - ▁RED - ARY - ▁GIVEN - ▁SIX - ▁EVERYTHING - ▁DARK - ▁DEAD - ▁STRONG - ▁SON - ▁COMING - URE - ▁HELD - ▁ABOVE - ▁REALLY - ▁BEAUTIFUL - ▁SECOND - ARD - ▁EVENING - ▁CON - ▁HOUR - ▁FELLOW - ▁ROSE - ▁PERSON - ▁EX - ▁CH - ▁FORCE - ▁MO - ▁ARM - ▁CAUSE - ▁TURN - ▁CITY - ▁DOUBT - ▁QUESTION - TIC - ▁DEEP - ▁HAIR - ICAL - ▁MEAN - ▁DI - ▁CLEAR - ▁SOMETIMES - ▁STRANGE - ▁FEEL - ▁HO - ▁IMP - WARD - AUGHT - ▁CAPTAIN - ▁USE - ▁UNDERSTAND - ▁KEPT - ▁BR - ▁WOOD - ▁PRE - ▁YEAR - ▁TI - ▁LEAST - ▁BED - ▁SA - ▁TABLE - ▁BECOME - ▁FREE - ▁FAMILY - ME - ▁EYE - ▁WHETHER - ▁MAKING - ▁WITHIN - ▁SORT - ▁ANSWER - ▁PO - ▁SAYS - ▁EARTH - ▁RETURN - ▁SUDDENLY - ▁FRIENDS - ▁GREEN - ▁SUN - ▁FAIR - ▁TH - ▁FALL - ▁EITHER - ▁BO - ▁PRINCE - ▁THOU - ▁ITSELF - ▁CHURCH - ▁BIG - ▁ABLE - ▁DIFFERENT - ▁SEVERAL - ▁DAUGHTER - ▁WON - ▁WIND - ▁BAD - ▁LOST - ▁READ - ▁STORY - ▁APPEARED - DE - ▁NUMBER - ▁SP - ▁LOW - ▁ROAD - ▁POSSIBLE - ▁HUMAN - ▁RIVER - ▁STREET - ▁GA - ▁COLD - ▁MET - ▁ACT - ▁BROTHER - ▁AGE - ▁KNOWN - ▁CONTINUED - ▁BRING - ▁ILL - ▁RUN - ▁LAW - ▁SUBJECT - ▁CUT - J - PER - ▁PA - ▁TROUBLE - ▁GLAD - HE - ▁SLEEP - MEN - ▁LATE - ▁MEANS - ▁ASK - ▁REACHED - ▁RAN - AK - ▁HORSE - ▁USED - WAY - OP - ▁WINDOW - ▁SNOW - ▁PAST - ▁OBJECT - ▁THEREFORE - IONS - ▁TREE - ▁COMP - ▁BLUE - CA - ▁VI - ▁SIGN - ▁EIGHTEEN - ▁GARDEN - ▁BUSINESS - ▁PETER - ▁FOLLOWED - ▁SEEM - ▁HOLD - ▁HAPPY - ▁LONGER - ▁ACROSS - ▁BU - BE - ▁ELSE - ▁PLAY - ▁SOUL - ▁STAND - ▁ARMS - ▁SCHOOL - ▁PRINCESS - ▁CERTAINLY - LT - ▁ENGLISH - ▁SEVEN - ▁PER - ▁IDEA - ▁LE - ▁BOOK - ▁FEELING - ▁HUSBAND - ▁LINE - PT - THOUGH - ▁OUGHT - ▁RICH - IP - ▁VIEW - ▁DREAM - ▁SENSE - ▁LO - ▁READY - ▁CARRIED - ▁M - ▁REGARD - ▁CHANCE - ▁WANTED - ▁LIVED - ▁LATER - ▁INTEREST - ▁EN - ▁EFFECT - ▁CLA - ▁CHANGE - ▁CA - ▁REAL - ▁SUPPOSE - LES - ▁ART - ▁TIMES - ▁MAR - IF - ▁WILD - ▁ADDED - ▁LETTER - IAL - ▁THANK - ▁PARTY - LAND - ▁PAY - ▁BREATH - ▁TAKING - ▁COURT - ▁COUNT - ILY - ▁COMMON - ▁PUBLIC - ▁PURPOSE - ▁PRETTY - ▁TRUTH - ▁STAY - ▁EM - NT - ▁SH - ▁REMEMBER - ▁ENTERED - ▁RECEIVED - RED - ▁SPOKE - ▁USUAL - ▁THY - ▁FIGURE - ▁LED - ▁TREES - ▁TRIED - ▁FORWARD - NED - ▁HAT - ▁BLOOD - ▁BEYOND - ▁BANK - ▁LIVING - ▁JOY - ▁HOURS - ▁ENGLAND - ▁STONE - VI - GE - ▁SWEET - ▁POSITION - ▁FRONT - ▁GIRLS - ▁VISIT - ▁CHARACTER - ▁SPIRIT - ▁TA - BO - QUE - QUI - ▁OPENED - ▁OCCASION - ▁MEET - ▁EIGHT - ▁REMAIN - ▁PASS - TO - ▁NORTH - ▁SERVICE - ▁SISTER - ▁SE - ▁BEAR - ▁PLEASURE - ▁CHIEF - ▁FOREST - ▁BELL - ▁EXPERIENCE - ▁STRUCK - ▁CARRY - ORY - ▁WARM - 'NO' - ▁WORTH - ▁SAYING - ▁SILENCE - ▁CROSS - ▁JE - ▁H - ▁BEAUTY - PH - ▁DEAL - KE - ▁SECRET - DY - ▁MILES - ▁LU - ▁DOING - ▁BOYS - ▁CROWD - ▁ACCOUNT - REW - ISM - TI - ▁FE - ▁NONE - ▁RO - ▁NEARLY - ▁CHA - ▁YOUTH - ▁CAP - HA - ▁BIT - ▁LIE - ▁ATTENTION - ▁STANDING - ▁STAR - ▁RESPECT - ▁FURTHER - ATIONS - ▁ROCK - ▁BOW - EM - ▁EARLY - ▁MOUTH - ▁BOAT - UB - ▁IMMEDIATELY - ▁EXCEPT - SHIP - ▁PICTURE - ▁BRIGHT - ▁WA - ▁GREW - ▁LEAD - ▁CUR - ▁TONE - RRY - RS - ▁WIDE - CHE - ▁FORTH - IG - OS - ▁NEITHER - ▁YOURSELF - ▁SMILE - ▁DRESS - ▁OPINION - ▁HAPPENED - ▁WAIT - ▁SIT - ▁SHIP - ▁AH - ▁DESIRE - ▁THICK - ▁THIRD - ▁GRAND - ▁FOLLOW - ▁GATHER - ▁HILL - ALLY - ▁COMPANY - ▁CHAIR - DER - ▁TOP - ▁PAR - ▁LENGTH - ▁THIRTY - ▁MINE - ▁MI - ▁EAT - ▁EQUAL - ▁AFRAID - ▁FRESH - ▁TAIL - ▁FILLED - ▁SU - ▁MINUTES - ▁FAST - BU - ▁ENTER - ▁QUEEN - ▁UTTER - AG - ▁FLOOR - ▁SHA - DI - ▁HEAVEN - ▁STOPPED - ▁GUARD - ▁HALL - ▁BAR - ▁COMPLETE - ▁NINE - ▁WEEK - ▁GOLD - VA - ▁FIFTY - ▁BEAT - ▁PRESS - ▁ATTEMPT - ▁EXCLAIMED - DO - ▁CONF - ▁SEEMS - ▁STARTED - ▁EL - ▁HAR - ▁EXPRESSION - ▁TRA - ▁WONDERFUL - ▁SAINT - ▁APPEARANCE - ▁GRAVE - ▁OFFICE - ▁INSTEAD - ▁SILENT - ▁SOUTH - ▁AGO - ▁CAMP - ▁LOVED - ▁PATH - ▁LEARN - ▁PLAN - ▁GOVERNMENT - OUR - PPED - ▁SITTING - ▁SEAT - TEN - RESS - SIDE - ▁MOVED - ▁DIE - ▁RESULT - ▁SPRING - ▁PLEASE - ▁RI - ▁NATURAL - ▁ANNE - ▁STA - ▁CORNER - ▁WALL - ▁IMPOSSIBLE - ▁BROWN - ▁SUIT - ▁MUSIC - PI - ▁TRY - ▁DIED - ▁TEARS - ▁JU - ▁COMFORT - ▁DANGER - ▁MEASURE - ▁PROPERTY - ▁BORN - CON - ▁CR - ▁BROKEN - ▁MASS - EVER - IER - ▁EXPRESS - ▁POCKET - ▁SCARCE - ▁SELF - NY - ▁MADAME - ▁LAUGHED - ▁TOUCH - ▁APPEAR - ▁LONDON - ▁SAFE - ▁SHARP - ▁ATTACK - ▁JANE - ▁COVERED - ▁OUTSIDE - ▁WHATEVER - ▁PLACED - ▁RACE - ▁SHORE - ▁LAID - ▁ROMAN - ▁PERSONAL - UP - AU - ▁REMAINED - ▁HAPPINESS - ▁AFTERNOON - ▁DISTANCE - ▁STORM - ▁MARRIED - ▁FRANK - ▁VALLEY - ▁BOUND - ▁TALKING - ▁JO - ▁QUICK - ▁STEP - AND - ▁ARMY - ▁EFFORT - ▁FRENCH - ▁V - LEY - ▁PARTICULAR - ▁START - ATING - OO - LU - ▁TRANS - ▁HAPPEN - ▁HABIT - ▁VILLAGE - ▁BELOW - ▁GENTLEMAN - BLE - ▁BILL - ▁SAVE - ACT - ▁SOCIETY - ▁MAJOR - ▁QUARTER - ▁SKY - ▁GUESS - CY - ▁SAD - ILE - ▁SL - ▁PLEASANT - ▁STRAIGHT - ▁STRENGTH - ▁FORTUNE - ▁WRONG - ▁COMMAND - ▁BOX - ▁QUIET - ISE - ▁JA - IBLE - ▁TREAT - ▁GLANCE - ▁NECESSARY - ▁FORGET - ▁MOUNTAIN - ▁WINTER - ▁DREW - ▁WAV - ▁PLAIN - ▁ENTIRELY - ▁TEA - ▁SOFT - ▁QUICKLY - ▁INFLUENCE - ▁DINNER - ▁FOOD - ▁CHAPTER - ▁YE - ▁REACH - ▁GETT - ▁PAPER - ▁GIVING - ▁BEGINNING - ▁SEND - ▁FIGHT - ▁SCENE - ▁RUSH - ▁PI - ▁MARK - ▁NA - ▁BROKE - ▁CLASS - ▁BATTLE - ▁EASY - ▁GROUP - BY - ▁STOP - ▁DIRECTION - ▁BESIDE - ▁MOR - HAM - UFF - ▁WEST - ▁OBLIG - ▁COLOR - ▁SINGLE - ▁EASILY - ▁PALE - ▁ACTION - ▁INTER - ▁STRANGER - ▁WI - ▁CONVERSATION - ▁BLOW - ▁MARY - ▁MU - ▁TERRIBLE - ▁THINKING - ▁PULL - ▁MOON - AB - ▁REP - ▁ESPECIALLY - ▁HEAVY - ▁SICK - ▁LUCK - ▁TRAIN - ▁GUN - ▁GU - ▁WAITING - ▁TURNING - ITIES - ▁BREAD - ▁BELONG - ▁LOUD - ▁REPORT - ▁AMERICAN - ▁JOURNEY - ▁ANXIOUS - ▁LIPS - ▁KILLED - IGHT - GO - ▁CONSIDER - ▁PROBABLY - ▁PALACE - ▁HISTORY - ▁LAKE - ▁SHUT - ▁SIMPLY - WA - ▁PAIN - ▁HORSES - ▁SEEING - FULLY - ▁EXPECTED - ▁EVIL - ▁BURN - ▁SIMPLE - ▁DIRECT - IFIED - HER - ▁SLOWLY - ▁LEG - UGH - ▁SAIL - RIC - ▁WISHED - ▁RULE - ▁LAD - ▁MORAL - ▁MOVE - ▁FOLLOWING - ▁SILVER - ▁SEARCH - ▁CHANGED - ▁HANDSOME - ▁COULDN - ▁PASSION - ▁HU - ▁SMILED - ▁STREAM - ▁CONCERN - ▁PRESENCE - STER - ▁CONTENT - ▁BOARD - ▁SHAPE - ▁DECIDED - ▁MARRY - ▁PERFECT - ▁STEPS - ▁CLOSED - ABLY - DEN - ▁WEAK - ▁SUFFICIENT - ▁SHADOW - ▁EXPECT - ▁SPOT - ▁DUTY - ▁SPEAKING - ▁BESIDES - ▁FIELD - ▁ROLL - ▁TRYING - ▁EAR - ▁VER - ▁MARRIAGE - ▁SHOT - ▁SLAVE - ▁MILL - ▁NATION - ▁NECK - ▁ARRIVED - ▁TALL - ▁GRACE - LIN - ▁FORTY - ▁BROAD - ▁SUMMER - ▁COUSIN - ▁BEGIN - ▁CATCH - ▁FO - ▁PE - ▁MEANT - ▁THIN - IO - ▁GROW - ▁TRO - ▁NOTICE - ▁CRY - ▁FISH - ▁COM - ▁DEGREE - ▁HONOUR - ▁UNDERSTOOD - ▁SHOP - ▁TRUST - ▁CONDITION - ▁FARM - IZ - ▁SUDDEN - ▁SUCCESS - ▁SURPRISE - ORS - ▁THOUGHTS - UND - ▁ALLOWED - ITE - ▁NARROW - ▁GLASS - ▁SERIOUS - ▁STICK - ▁GAME - ▁SPENT - ▁SELL - ▁GRA - ▁LOWER - ▁RAISED - ▁PIN - ▁ALLOW - ▁CALM - FT - ▁L - ▁PU - ▁FIT - ACH - ▁SUFFER - ▁LEGS - ▁SUPPORT - ▁FRANCE - ▁LATTER - OV - ▁TASTE - ▁GATE - ▁INSTANT - ▁MINUTE - ▁OFFER - ▁GREATER - ▁PORT - ILL - ▁INDIVIDUAL - ▁AUNT - ▁EAST - ▁ADVANTAGE - ▁FASHION - ▁SWORD - ▁TWELVE - ▁HONOR - ▁MOVEMENT - ▁ISLAND - ACK - ▁WOODS - NCH - ▁PLEASED - ▁ENEMY - ▁RAIN - ▁VARIOUS - ▁OBSERVED - ▁LADIES - ▁BELIEVED - ▁CAST - ▁RISE - ▁BALL - ▁MONTHS - ICE - ▁MURDER - ▁CONDUCT - ▁SOCIAL - ▁TENDER - ▁LEARNED - ▁FRA - ▁FIRM - CLOCK - ▁PREVENT - ▁RING - LIE - ▁GOLDEN - ▁DECLARED - ▁BUILDING - ▁WRITE - ▁ATTEND - ▁CARRIAGE - ▁SITUATION - IDE - ▁NOBLE - ▁HUNG - ▁RUNN - ▁YELLOW - ▁KNOWLEDGE - ▁YORK - ▁PUSH - ▁LEAVING - ▁POST - ▁CIRCUMSTANCES - ▁SEEK - ▁FINALLY - ▁MAIN - ▁LETTERS - ▁POL - ▁ADD - FE - ▁ANCIENT - ▁MARCH - ▁WINE - ▁STATES - ▁WALLS - ▁PRISONER - ▁ISABEL - ▁TEMPER - ▁JUDGE - ▁FAINT - ▁POND - ▁GRASS - ▁FAM - OUT - ▁LAUGH - ▁GRAY - IGN - ▁ESCAPE - ▁KILL - ▁PRAY - ▁COMES - ▁ABSOLUTE - ▁BLIND - ▁WIN - ▁HOST - ▁MERELY - ▁RID - ▁EVERYBODY - ▁MATERIAL - ▁STRETCH - ▁DUE - ▁ROW - ▁TIN - ▁PROMISE - ▁LISTEN - ▁WALKING - ▁COMPANION - ▁INDIAN - ▁BREAK - ▁BENEATH - ▁RUIN - ▁EDGE - ▁WOR - ▁FORMER - ▁WORSE - ▁EVIDENTLY - ▁HARM - ▁CENT - ▁PIECE - ▁LOT - ▁PRESIDENT - ▁SPECIAL - ▁LABOR - ▁HEALTH - GA - ▁PLACES - ▁BEN - ▁SOMEWHAT - ▁DROPPED - ▁AFFECTION - ▁EXACTLY - ▁DARKNESS - ▁FALLEN - ▁DRESSED - ▁BILLY - ▁ACCEPT - ▁FL - ▁HOT - ▁REPEATED - ▁MEETING - PA - ▁PERIOD - ▁HONEST - ▁INSTANCE - ▁FLA - ▁PASSAGE - ▁NE - ▁POSSESSION - ▁WEAR - ▁PEACE - ▁COAT - ▁HOUSES - ▁MOUNTAINS - ▁FIFTEEN - ▁WELCOME - ▁YARD - ▁PROPER - ▁MUS - ADE - ▁RECEIVE - ▁SKIN - ▁GROWN - ▁AFTERWARDS - ANG - ▁DA - ▁DIFFICULT - ▁PERSONS - ▁ACCORDING - ▁FARMER - ▁SPEECH - ▁IMPORTANT - PAR - ▁PERFECTLY - ▁MIN - ▁CONSIDERED - ▁NU - ▁DEPEND - ▁MORROW - ▁MOUNT - ▁KISS - ▁LYING - ▁SUFFERING - ▁EXIST - ERY - OOK - BA - ▁PAINT - AH - ▁CAT - ▁PURE - ▁WISE - ▁PRIVATE - ▁REBECCA - ▁VESSEL - ▁CLEAN - ▁GENTLEMEN - ▁IRON - ▁STORE - ▁FUR - ▁INDIANS - ▁LOSE - ▁BATH - ▁NEWS - ▁CHI - ▁FA - ▁CHARGE - ▁PRIEST - ▁WRITTEN - ▁FORGOTTEN - ▁TRAIL - ▁CLOTHES - ▁ALIVE - ▁SUB - ▁REPLY - ▁THROW - ▁AB - ▁SOLDIERS - ▁ISN - ▁COTTAGE - ▁COURAGE - ▁CONTAIN - ▁BUILT - ▁PAID - ▁HUNT - ▁CASTLE - HOOK - ▁MERE - GGED - ▁NI - ▁UNC - ▁PREPARED - ▁BARE - ▁SMILING - ▁SPREAD - ▁WEATHER - ▁EDWARD - ▁GERMAN - ▁CURIOUS - ▁SERVANT - ▁DISCOVERED - ▁TRAVEL - EY - ▁DANCE - ▁PEN - BR - GEN - ▁BREAKFAST - ▁CHAMBER - ▁WILLIAM - ▁TERROR - ▁SPITE - ▁TIRED - ▁LOCK - ▁CONSIDERABLE - TLE - ▁MANAG - ▁DRY - ▁FINISHED - ▁MILLION - ▁FRE - ▁MIS - ▁PASSING - ▁DRAW - ▁BON - ▁VA - ▁VEN - ▁MAKES - ▁VAIN - ▁BOTTOM - ▁DRINK - ▁FUTURE - ▁RACHEL - ▁SORROW - ▁SIXTEEN - ▁KNIT - ▁PROUD - WI - ▁TOBY - ▁NOISE - ▁SLIGHT - ▁PROCEED - ▁FER - ▁COVER - ▁DRAWING - ▁FAVOR - ▁CATHERINE - ▁NEWSPAPER - ▁NOBODY - ▁ROOF - ▁WEALTH - ▁PROVE - ▁DRAWN - TTED - OKE - ▁DETERMINED - ▁DOG - ▁REMEMBERED - ▁OPENING - ▁FLOWERS - ▁GENTLE - ▁KNIGHT - ▁RECOVER - ▁DESERT - ▁MOTION - ▁NICE - ▁INTENTION - ▁GROWING - ▁CLOUD - ▁MONTH - HOOD - ▁POT - UDE - ▁PLANT - ▁MAD - ▁ENJOY - ▁FAT - ▁COR - ▁KNOWING - ▁IDEAS - IZED - ▁CHEEK - ▁EUROPE - ▁KNOCK - ▁ALARM - ▁TONGUE - ▁SPACE - ▁PATSY - ▁MISTRESS - ▁HENRY - ▁JERRY - ▁LIKED - ▁PLAYED - ▁BOOKS - ▁MODER - ▁CORN - ▁ELIZABETH - ▁CLUB - ▁BRAIN - ▁TROOP - ▁COOK - ▁DU - ▁FUN - DAY - ▁QUA - ▁FLOW - ▁DARE - ▁DELIGHT - ▁WOUND - ▁DESCEND - ▁EVERYWHERE - ▁FRIGHTENED - ▁GEORGE - ▁PECULIAR - ▁MACHINE - ▁PATIENT - ▁MEADOW - ▁PEASANT - ▁BURST - ▁ORDINAR - ▁SONG - ▁BRAVE - ▁EXISTENCE - ▁LUCY - ▁J - ▁CAREFULLY - ▁PRESENTLY - ▁GEN - ▁COW - LLY - ▁PROMISED - UOUS - ▁LIFTED - ▁MEANING - ALL - ▁FAIL - NER - ▁REGULAR - ▁VIRTUE - ▁STUDY - ▁PROTECT - ▁FOND - ▁FANCY - ▁STOCK - ▁KEY - ▁JUSTICE - ▁PACK - LET - ▁AFFAIRS - ▁DIFFICULTY - ▁WORE - ▁COST - ▁HEAT - ▁SHOULDER - ▁OFFERED - ▁MISTAKE - ▁DOLLARS - ▁LOOKS - QUA - ▁BREAST - ▁PRINCIPLE - ▁CHARLES - ▁TEETH - ▁OCCUPIED - ▁DROP - ▁PAPA - ▁SHEEP - ▁KNOWS - ▁DECK - ▁BORE - ▁EXC - ▁SURPRISED - ▁STATION - ▁PL - ▁PR - ▁OURSELVES - ▁SYMPATHY - ▁RUTH - ▁EXCITED - ▁CONTROL - ▁ANGRY - ▁IMAGINATION - ▁WITNESS - ▁HOLDING - THER - DA - ▁TRADE - ▁CREATURE - ▁SISTERS - ▁JOIN - LAS - ▁ALTOGETHER - ▁CIVIL - ▁EMPTY - ▁LEAP - ▁HURT - ▁BOLD - ▁TASK - ▁POLICE - ▁DRAGON - ▁MAID - ▁CLAIM - ▁SHAME - ▁PHYSICAL - ▁CONC - ▁SEIZED - ▁OB - ▁LIVES - ▁HEIGHT - ▁GI - ▁PAL - ▁CHARMING - ▁FEELINGS - ▁SERVANTS - ▁DELIVER - ▁FRUIT - ▁SATISFIED - ▁STRUGGLE - ▁WROTE - ▁CONCEAL - ▁MOVING - ▁FLASH - ▁OPPOSITE - ▁HURRY - ▁ROUGH - ▁PRICE - ▁AWFUL - ▁SAND - ▁SLIPP - ▁SHOWN - ▁SPRA - ▁AGREED - ▁FIXED - ▁PERCEIVED - ▁UPPER - ▁FINGER - ▁FINGERS - ▁EAGER - LF - ▁EARS - LIGHT - ▁IMAGINE - ▁LIKELY - ▁COAST - ▁UNITED - ▁VAN - ▁EXPLAINED - ▁TELLING - ▁DANGEROUS - ▁DICK - ▁COOL - ▁CAL - ▁INSIST - BI - ▁SECURE - ▁HILLS - ▁SAN - ▁CHEER - ▁FILL - ▁BUY - ZA - HI - ▁CLOTH - ▁POSSESSED - ▁ADVANCE - ▁METHOD - ATIVE - ▁GREATLY - ▁SMOKE - ▁HIGHER - ▁COMPANIONS - ▁ANIMALS - ▁GALL - ▁QUIETLY - ▁TRAVELL - ▁RESOLVED - ▁FLEW - ▁CARLYLE - ▁MEMORY - ▁RESIST - ▁GRAHAM - ▁LAUGHING - ▁FAITH - ▁BIRD - CRI - ▁LEAVES - ▁AMERICA - ▁DEMAND - BOARD - ▁AWAKE - ▁CURIOSITY - ▁LANGUAGE - ▁VIOLENT - ▁AWARE - ▁DOUBLE - ▁LOOSE - LIKE - ▁ADAM - ▁RISING - ▁HOTEL - ▁BAND - ▁ENGAGED - ▁HEADS - ▁LOG - ▁FORMED - ▁WINDOWS - ▁PREFER - RUS - ▁THROWN - ▁ARCH - ▁PAUSE - ▁SERVE - KIN - ▁FALLING - ▁VO - ▁WHISPERED - ▁POWERFUL - ▁ER - ▁DEPART - ▁CRUEL - ▁EXAMPLE - ▁SMOOTH - ▁INTRODUC - ▁RELIGION - ▁SEVENTEEN - ▁ABSENCE - ▁PRINT - ▁SHINING - ▁ICE - ▁POET - ▁DREADFUL - ▁REQUIRED - ▁ORIGINAL - ▁POINTED - ▁INSIDE - ▁BROTHERS - ▁PRODUCED - ▁SPOKEN - ▁CREATURES - ▁FLY - ▁TOM - ▁PURSU - ▁SYSTEM - ▁EXCELLENT - ▁EXCITEMENT - ▁MIDDLE - ▁FALSE - ▁REGRET - ▁RAY - ▁PHYSICIAN - ▁COP - ▁VALUE - ▁TOUCHED - ▁FLAT - ▁OAK - ▁SUM - ▁LOSS - ▁PAPERS - ▁STEPP - ▁REVER - ▁SHADE - SOME - ▁LISTENED - ▁N - ▁DISCOVER - ▁BITTER - TERN - ▁HOLE - ▁ADVANCED - ▁PICK - ARTAGNAN - ▁CORPORAL - ▁ASLEEP - ▁TEMPLE - ▁INDICAT - IUM - ▁FARTHER - ▁EXCUSE - ▁FLU - ▁NOSE - ▁SIXTY - ▁SUPPOSED - ▁PROVED - ▁RATE - ▁SHOULDERS - ▁AFFAIR - ▁FIELDS - ▁REMARKED - AVE - ▁WEEKS - ▁ESTABLISH - ▁PARIS - ▁ADMIT - ▁NEIGHBOR - ▁ATTRACT - ▁CUSTOM - ▁DISTINGUISH - ▁SURFACE - ▁COUPLE - ▁DEVIL - ▁LIMIT - ▁ROYAL - ▁FOOL - ▁RARE - ▁PRIDE - ▁PROFESSOR - ▁SAKE - ▁DALE - ▁VAST - ▁REFUSED - ▁FAILED - ▁BAG - ▁ROB - ▁WASH - ▁FAIRY - ▁FREQUENT - ▁MARILLA - ▁PROGRESS - ▁RELIEF - ▁DROVE - ▁DOZEN - ▁AHEAD - ▁ADVENTURE - ▁GRANT - ▁PRIM - ▁MENTAL - ▁PAIR - ▁IMPRESSION - ▁WOUNDED - ▁FULLY - ▁DISAPPEARED - ▁MILE - ▁DRIVE - ▁MUD - ▁SIZE - ▁ANIMAL - ZE - ▁GRE - ▁REPRESENT - ▁ACQUAINTANCE - ▁INSTRUMENT - ▁SPLENDID - ▁UNKNOWN - ▁CORONEL - ▁EMPEROR - ▁EARNEST - ▁EXTEND - ▁BRIEF - ▁RENDER - ▁PARENTS - ▁GENTLY - ▁CALLING - ▁TRIBE - ▁CHRISTIAN - ▁INTERESTING - ▁LAMP - ▁JIMM - ▁DIV - ▁LOVER - UCH - ▁HID - ▁NEEDED - ▁ORDERED - ▁MEAL - ▁SLOW - ▁DAM - ▁CLOUDS - ▁DAN - ▁GAR - ▁EXPLAIN - ▁QUI - ▁CLIMB - ▁HURRIED - ▁MURMUR - ▁SWIFT - ▁ARTHUR - ▁JEFF - ▁KINGDOM - ▁MESSAGE - ▁PROTEST - ▁ORGAN - ▁RISK - ▁FORGIVE - ▁OCCURRED - ▁PEARL - ▁ODD - ▁INFORMATION - ▁BUSY - ▁TRI - ▁LACK - ▁BAY - ▁FLEET - ▁CROWN - ▁WAITED - ▁BIRDS - ▁PITY - ▁SUCCEEDED - ▁INFORMED - ▁WISHES - ▁DIRECTLY - ▁CABIN - ▁AUGUST - ▁COUNTENANCE - ▁HORROR - ▁PHILIP - ▁POPULAR - ▁PREVIOUS - ▁CONTRARY - ▁ARTICLE - ▁DIFFERENCE - ▁HIDDEN - ▁HUGE - ▁AUTHORITY - ▁POUND - ▁JUMP - ▁SPI - ▁SHAKE - ▁EVENTS - ▁FRO - ▁LEAN - ▁CRO - ▁TRIM - ▁SHARE - ▁FISHER - ▁SETTLED - ▁QUESTIONS - ▁SI - ▁VAL - ▁APPROACHED - ▁SUGGESTED - ▁CONTINU - ▁PERFORM - ▁ACKNOWLEDG - ▁CLIFF - ▁COLONEL - ▁GHOST - ▁MAJESTY - ▁EMOTION - ▁SUPPER - ▁DISTANT - ▁INTERESTED - ▁JACK - ▁HUM - ▁TRAMP - ▁BRI - ▁POUR - ▁SHIPS - ▁CHAIN - ▁DY - ▁RANK - ▁MATTERS - ▁LOVELY - AW - ▁PAT - ▁WORKING - ▁CONSEIL - ▁EVIDENCE - ▁MERCHANT - ▁SOLEMN - ▁CONSTANT - ▁MINISTER - ▁OFFICIAL - ▁SENTIMENT - ▁CENTURY - ▁DELAY - ▁JAMES - ▁MATCH - ▁FOREIGN - ▁AROSE - ▁BEAST - ▁BAB - ▁WIT - ▁REMARKABLE - ▁THOR - ▁COMPAR - ▁MAL - ▁NEARER - ▁FOURTH - ▁GREY - ▁MENTION - ▁RUBB - ▁CHARM - ▁BARON - ▁DESIRED - SCAR - ▁HOPED - ▁TEACHER - ▁MON - ITCH - BEL - ▁PARTS - ▁EIGHTY - LAC - GGING - ▁REFLECT - ▁COLLECT - ▁BULL - ▁CONSCIOUS - ▁MOMENTS - ▁DISTURB - ▁COLLEGE - ▁EGGS - ▁STUPID - ▁YESTERDAY - ▁EXAMINE - ▁FAULT - ▁DEPTH - ▁ROOT - ▁MOUSE - ▁SOUGHT - ▁TURTLE - ▁NATIVE - ▁CRACK - ▁SOLD - ▁INVIT - ▁PICKED - ▁CEASED - ▁HEARING - ▁MIDS - ▁PLAYING - ▁STAGE - ▁UNTO - ▁GAIN - ▁MIST - ▁ORDERS - ▁KNEES - ▁TALE - ▁DISTINCT - ▁BENT - ▁DESPAIR - ▁TRIUMPH - ▁SQUARE - ▁THROAT - ▁BOUGHT - ▁PERMIT - ▁SPEND - ▁TRIP - ▁THREATEN - ▁ROME - INESS - ▁EXPOS - GON - ▁WRITING - ▁INCREASED - ▁PORTION - ▁TENT - IUS - ▁YO - ▁INTENDED - ▁NAMED - RATION - ▁NOTIC - ▁PIPE - ▁WILLING - ▁INSTANTLY - ▁SERVED - ▁BAL - ▁POSSESS - ▁CRE - ▁ADMIRATION - ▁LIBERTY - ▁OPPORTUNITY - ▁SELDOM - ▁BIRTH - ▁GLOW - ▁INCLUD - ▁REQUEST - ▁TYPE - ▁SLEPT - ▁CRIME - ▁MOTIVE - ▁ELSIE - ▁BEGUN - ▁CONSENT - ▁ADMITTED - ▁AVOID - ▁ADDRESS - ▁HATE - ▁DEMANDED - ▁APPARENTLY - ▁SUGGESTION - ▁CONSIDERATION - ▁BLESS - ▁PROCEEDED - NCY - ▁PRISON - ▁CONT - ▁SHOUTED - ▁FACES - ▁SPIRITS - ▁DEVELOP - ▁ACCIDENT - ▁ADVICE - ▁INNOCENT - ▁INSTINCT - ▁UNCONSCIOUS - ▁MYSTERIOUS - ▁PRETEND - ▁PEEP - ▁ANYONE - ▁DUKE - ▁PLUM - VILLE - ▁SEVERE - ▁ALAS - ▁DELIGHTED - ▁ISSUE - ▁ASKING - ▁CROW - ▁ACCEPTED - ▁RIDE - ▁DOORS - ▁TAR - ▁PREPAR - ▁SUGGEST - WOOD - ▁CITIZEN - ▁ENTRANCE - ▁LINCOLN - ▁POLITICAL - ▁PRACTICAL - ▁STIFF - ▁WIDOW - ▁CAPITAL - ▁CLEVER - ▁MAMMA - ▁CREDIT - ▁OBEY - ▁STRING - ▁DAILY - ▁ARGUMENT - ▁HEAP - ▁APARTMENT - ▁FLIGHT - ▁ELDER - ▁PUR - ▁PAGE - ▁DUST - ▁GAZE - ▁NATIONAL - ▁BABY - DDING - ISTS - ▁TEACH - ▁STREETS - CAL - ▁GE - AFF - ▁GOES - ▁POSSIBL - UNG - ▁LINES - GUE - ▁VOTE - ▁HUNTING - ▁QUO - ▁RESEMBL - ▁BASKET - ▁CIRCLE - ▁CONSEQUENCE - ▁KITCHEN - ▁TREASURE - ▁NEVERTHELESS - ▁FANCI - ▁ASSEMBL - ▁GRIEF - ▁VEIL - ▁SEASON - ▁INVENT - ▁VIRGINIA - ▁HUT - ▁GUEST - ▁ROAR - ▁BEHOLD - ▁VICTORY - ▁CAPABLE - ▁DULL - ▁SHOE - ▁FLOAT - ▁MERRY - ▁IMMEDIATE - ETH - ▁ELEANOR - ▁EXPLANATION - ▁PARLIAMENT - ▁PRINCIPAL - ▁PROPORTION - ▁RESOLUTION - ▁UNUSUAL - ▁BLUFF - ▁NINETEEN - ▁SENSATION - ▁VISIBLE - ▁INCOME - ▁FATE - ▁SUPER - ▁LAUGHTER - ▁EASE - ▁LOAD - ▁JEW - ▁ZE - ▁FEVER - ▁WEDDING - ▁JOINED - ▁TRACE - ▁LEADER - ▁CLEARLY - ▁FLOWER - ▁TERMS - ▁EMPLOYED - OCK - ▁PARTICULARLY - ▁MEMBERS - ▁CONFESS - ▁GRO - ▁ADDRESSED - ▁CHRIST - ▁ACCOMPANI - ▁AFFORD - ▁AMOUNT - ▁BRILLIANT - ▁COMMUNICAT - ▁FIERCE - ▁RECORD - ▁SACRIFICE - ▁TEMPT - ▁CORDIAL - ▁COLOUR - ▁PROOF - ▁ESTATE - ▁PARDON - ▁ADVIS - ▁ATTITUDE - ▁IMPORTANCE - ▁BOOT - ▁SHOCK - ▁FIR - ▁PLENT - ▁HIT - ▁MEMBER - ▁SUR - ▁SEATED - ▁MAG - AVING - ▁FAVOUR - ▁REMARK - ▁DIM - ▁FAITHFUL - ▁SAVED - CHI - ▁SIN - THE - ▁CONFIDENCE - ▁EXTRAORDINARY - ▁FORTUNATE - ▁MISFORTUNE - ▁PATIENCE - ▁RELIGIOUS - ▁SATISFACTION - ▁POSITIVE - ▁SIMILAR - ▁EXCHANG - ▁RETREAT - ▁FLESH - ▁ADMIRE - ▁SPIRITUAL - ▁DAWN - ▁BURIED - ▁URGE - ▁SUNDAY - ▁FOX - ▁EMMA - ▁NURSE - ▁SNAPP - ▁PARK - ▁OBTAIN - ▁RECOGNIZED - ▁SPEED - ▁MAGIC - ▁LAWS - ▁REMOVED - ▁HAM - ▁PRESERV - ▁AID - HOUSE - ▁MENTIONED - ▁CONSCIENCE - ▁CONTEMPT - ▁DETAIL - ▁IMMENSE - ▁NERVOUS - ▁PRISCILLA - ▁UNFORTUNATE - ▁UNHAPPY - ▁COMPLAIN - ▁TWICE - ▁WHISTL - ▁SNAKE - ▁WASHINGTON - ▁PIRATE - ▁WICKED - ▁BODIES - ▁DESIGN - ▁JASON - ▁VAGUE - ▁CONSIST - ▁GIFT - ▁ANGEL - ▁RODE - ▁FOLD - ▁BRIDE - ▁ANGER - ▁BASE - ITUDE - ▁CONCLUDED - ▁ALTER - ▁FRI - ▁PANT - ▁BID - ▁HIGHEST - ▁SAILOR - MPLE - ▁OBSERV - ▁CHEERFUL - IFICATION - RID - ▁DESCRIBED - ▁BIN - ▁JEWEL - ▁ARTIST - ▁PEER - ▁NORA - ▁SKI - ▁DIAMOND - ▁ENCOURAGE - ▁PRIVILEGE - ▁PROJECT - ▁ANYBODY - ▁ENCOUNTER - ▁HOLLOW - ▁YIELD - ▁BOBBY - ▁SAVAGE - ▁SOMEBODY - ▁OTHERWISE - ▁PRAISE - ▁PROBLEM - ▁DISTRESS - ▁UGLY - ▁WARRIOR - ▁MOURN - ▁RELIEV - ▁DESK - ▁FOOLISH - ▁STARTLED - ▁SKILL - SHONE - ▁LONE - ▁OBSERVATION - ▁DENI - ▁NEST - ▁SOLDIER - ▁RELATION - ▁TRULY - ▁VISITOR - ▁OFFICERS - ERSON - ▁YA - ▁EVIDENT - ▁DREAMS - ▁KEEPING - ▁PLAINLY - ▁DRUNK - ▁EMBRAC - ▁INTELLIGENCE - ▁LIEUTENANT - ▁PERSUADE - ▁SURROUNDING - ▁UNIVERSAL - ▁GLEAM - ▁SUPERIOR - ▁WHEEL - ▁JEALOUS - ▁QUEER - ▁PIERRE - ▁MILK - ▁RAIL - ▁FLUSH - ▁STAIRS - ▁JESUS - ▁HORN - ▁REGION - ▁SAFETY - ▁KA - ▁GUIDE - ▁CAKE - ▁CUP - ▁INQUIRED - ▁DEFI - ▁LESSON - ▁WRETCHED - ▁PACE - ▁TEST - ▁READING - ▁ENTIRE - ▁NET - ▁DOGS - ▁COMMANDER - ▁PRODUCE - ▁GAINED - ▁ARRIVAL - ▁FAMILIAR - ▁MEANWHILE - ▁SUSPICION - ▁CHOICE - ▁IMPULSE - ▁THRUST - ▁PROCESS - ▁SUMMON - ▁SHEPHERD - ▁HASTILY - ▁GRASP - ▁COUNTESS - ▁STYLE - ▁DWELL - ▁MERIT - ▁PITCH - ▁HUNGRY - ▁SPORT - ▁LOUISE - ▁STERN - ▁PROVIDED - ▁ASSUME - ▁EARLIE - ▁RAGE - ▁U - ▁RAPIDLY - PORT - ▁SUCCESSFUL - ▁FLED - ▁AGREE - ▁CONDITIONS - ▁RELATIONS - ▁DREAD - ▁NATURALLY - ▁EARL - ▁GAY - ▁HYPNOTI - ▁PUTT - ▁GAZ - ▁JIM - ▁PAUS - ▁PROPOS - ▁ADMINISTRATION - ▁ELEVEN - ▁HOSPITAL - ▁MAGISTRATE - ▁STRIKE - ▁DIGNITY - ▁GLORY - ▁BOTTLE - ▁THRONE - ▁RECKON - ▁COSETTE - ▁MOREOVER - ▁APPLI - ▁HIND - ▁PRODUCT - ▁POOL - ▁TRIAL - HAN - ▁ERIC - ▁CUB - ▁PIECES - ▁EXCEPTION - ▁ENJOYED - ▁DARED - ▁TRU - ▁CLOSELY - ▁RAPID - ▁AFFECTED - ▁REQUIRE - ▁SOFTLY - ▁BROW - UCK - ▁MARKED - ▁SEVENT - ▁ELECT - ▁FORGOT - ▁CORRECT - ▁FRANCS - ▁MARGUERITE - ▁SCIENCE - ▁UNEXPECTED - ▁FOUGHT - ▁MILITA - ▁THUNDER - ▁VOYAGE - ▁GANEM - ▁FREEDOM - ▁NODDED - ▁CAPTURE - ▁MORTAL - ▁OWNER - ▁POLITE - ▁VISION - ▁EDUCATION - ▁GOVERNOR - ▁RAV - ▁REWARD - ▁HASTE - ▁REPEAT - ▁DETERMIN - ▁PITI - ▁KNEE - LINE - ▁DEVOTED - ▁INTERRUPTED - ▁FOLKS - ▁EXTREME - ▁APPROACH - ▁CONTINUE - ▁BEARING - ▁CHAP - ▁ACQUAINTED - ▁GLIMPSE - ▁GRADUALLY - ▁SUNSHINE - ▁PRACTICE - ▁SUPPLI - ▁DAVID - ▁DRIFT - ▁SHOWING - ▁LEVEL - ▁PROMPT - ▁QUARREL - ▁REPRESENTATIVE - ▁PLUNG - ▁GIANT - FALL - ▁STOUT - CHA - WEPT - ▁GLANC - ▁SALT - ▁CHOSEN - ▁BUCK - ▁REALIZED - ▁REALITY - ▁TUR - ▁DRIVEN - ▁CARD - ▁PRAYER - ▁TERM - AID - ▁HOLY - ▁ENDURE - ▁RANGE - ▁HANG - ▁SAM - LAN - ▁CAVE - INA - ▁GRI - ▁SIGH - ▁NEIGHBOUR - ▁COUNCIL - ▁EXERCISE - ▁NAUTILUS - ▁SOMEWHERE - ▁SYLVIA - ▁THOROUGH - ▁VICTIM - ▁BRIDGE - ▁COMPELLED - ▁INCLINED - ▁OVERCOME - ▁RESERVE - ▁ARREST - ▁PRECIOUS - ▁DUTCH - ▁OCEAN - ▁ACQUIR - ▁RECALL - ▁DESTIN - ▁ATTACH - ▁SLIM - ▁WEEP - ▁CONSCIOUSNESS - ▁TIGHT - ▁WAKE - ▁COMFORTABLE - ▁ACTIVE - ▁WINGS - ▁GRIN - ▁AFFECT - ▁WHIT - ▁IDEAL - ▁EASTER - ▁APPROACHING - ▁CREATED - ▁PLANS - ▁INCREASE - ▁FLYING - ▁SHOUT - OES - MISSION - ▁ARMED - ABILITY - ▁BLUSH - ▁CONNECTION - ▁MATTHEW - ▁MEDICINE - ▁REMIND - ▁EXHIBIT - ▁BLOCK - ▁DESERVE - ▁LISTENING - ▁TITLE - ▁FLOUR - ▁FLAME - ▁AGENT - ▁USEFUL - ▁BRIG - ▁BOIL - ▁ASSURED - ▁REFLECTION - ▁PINE - ▁WAG - ▁YOUNGER - ▁BEARD - ▁KINDNESS - CTUALLY - ▁ACTUAL - ▁WEIGHT - ▁LILY - ▁IMPRESS - ▁DESCRIBE - ▁BEHELD - ▁COMMUNITY - ▁DESPERATE - ▁DISPLAY - ▁ENEMIES - ▁MELANCHOLY - ▁MIRROR - ▁RECOMMEND - ▁SPANISH - ▁BLAME - ▁VOLUME - ▁SHOOT - ▁COMBIN - ▁SHAKING - ▁SOUTHERN - ▁MYSTERY - ▁EVERYONE - ▁COMMISSION - ▁COMPOSED - ▁UDO - ▁IMAGE - ▁DECEIV - ▁FAILURE - ▁PATTY - ▁ALICE - ▁FRAME - ▁MODEST - ▁MAGNIFICENT - ▁BRANCHES - ▁REIGN - ▁RAG - ▁PARISH - ▁KATE - ▁AMID - ▁SLEEPING - ▁ANNOUNCED - ▁EAGERLY - ▁WIRE - ▁LAP - ▁ARAB - ▁EATING - ▁RUM - ▁CAREFUL - ▁DISCUSS - WORTH - ▁DISTRICT - ▁FOREHEAD - ▁FRANCIS - ▁INCIDENT - ▁APPEAL - ▁EMBARRASS - ▁MAINTAIN - ▁PRONOUNC - ▁FURNISH - ▁STRAIN - ▁ELEMENT - ▁SILK - ▁FEAST - ▁RECENT - ▁DANCING - ▁LODGE - ▁ASHAMED - ▁TRICK - ▁BOBO - ▁STUFF - ▁ET - ▁ASSERT - ▁SANK - ▁TREATMENT - ECI - ▁SWIM - ▁BECOMING - ▁SINGING - ▁PLATE - ▁SCATTERED - ▁EXTREMELY - ▁GRIM - ▁SANG - ▁FIGHTING - ▁FACTOR - ▁PAINFUL - ▁HIDE - ▁FUNN - ▁AFTERWARD - ▁FROG - ▁VENTURE - ▁DISAPPOINT - ▁COMRADE - ▁MONSIEUR - ▁OBVIOUS - ▁PASSENGER - ▁PROFOUND - ▁PUBLISH - ▁ACCUSTOM - ▁BLOOM - ▁SMITH - ▁RELATIVE - ▁ACCUSE - ▁MANIFEST - ▁SOLID - ▁MONSTER - ▁MARIUS - ▁CANDLE - ▁PROCUR - ▁INTERFERE - ▁HOUSEHOLD - ▁DEVELOPMENT - ▁AGREEABLE - ▁HALT - ▁NECESSITY - FOLD - ▁CITIES - ▁REGI - ▁GLOOMY - BBL - ▁SEPARATED - ▁CHEST - ▁STRIP - ▁SPAR - ▁DUN - ▁SETTLE - ▁STARED - ▁HANGING - ▁FEATURES - ▁PILE - ▁ORIGIN - ARIES - ▁LION - ▁ALI - ▁ASTONISHMENT - ▁COMPLIMENT - ▁DELICATE - ▁COUNSEL - ▁FIFTH - ▁SUPPRESS - ▁BURDEN - ▁COMPLEX - ▁ADDITION - ▁CRUSH - ▁TWIST - ▁PIANO - ▁BRUSH - ▁CHECK - ▁ANNIE - ▁SHELTER - ▁IMPROV - ▁WESTERN - ▁LOCAL - ▁APPLE - ▁GREET - ▁MASK - ▁RUSSIAN - ▁TOWER - ▁CREW - ▁TIP - ▁WANDERING - ▁READER - ▁WANDERED - ▁DESTROY - ▁OBSERVE - MORE - ▁ESCAPED - ▁PET - ▁BUILD - ▁REAR - ▁DESTROYED - HIN - ▁OWE - ▁RANG - ▁TEAR - ▁NED - ▁OFFICER - ▁TRAP - ▁OCCUR - ▁APPOINTED - ▁ATMOSPHERE - ▁CHOOSE - ▁CONCLUSION - ▁CULTIVAT - ▁DESCRIPTION - ▁ENORMOUS - ▁EXHAUSTED - ▁LANDSCAPE - ▁NATASHA - ▁PROSPECT - ▁REFRESH - ▁SPECIES - ▁SURROUNDED - ▁WEAPON - ▁BLANK - ▁DEFEND - ▁EDITH - ▁HORRIBL - ▁BETRAY - ▁FERKO - ▁LABOUR - ▁NEGRO - ▁RESUMED - ▁LEAF - ▁MUSKET - ▁INTENSE - ▁MERCY - ▁ADOPT - ▁SCORE - ▁DASH - ▁LAWYER - ▁SLOPE - ▁CHUCK - ▁ASSISTANCE - ▁BROOK - ▁BREAKING - ▁ASSIST - ▁GROAN - ▁HELEN - ▁BEHAV - ▁MAIDEN - ▁CRIS - ▁SHOUTING - ▁NAY - ▁PIG - ▁ACCORDINGLY - ETTE - ▁DESIR - ▁RUB - ▁GRU - ▁PIT - ▁HEAVI - ▁OBTAINED - ▁SPARE - ▁BRANCH - ▁COUNTER - ▁APART - ▁AMBITION - ▁ASTONISHED - ▁CORRESPOND - ▁DRIVING - ▁ENERGY - ▁HISTORIAN - ▁REVOLUTION - ▁SWEEP - ▁TREMBLING - ▁CRAFT - ▁FAMILIES - ▁LITERATURE - SBURG - ▁FEMALE - ▁TILNEY - ▁GENEROUS - ▁SUBMIT - ▁INTELLECTUAL - ▁ORCHARD - ▁STORIES - ▁DIANA - ▁VEIN - ▁TRIFL - ▁TWIN - ▁WORSHIP - ▁MARBLE - ▁GALLANT - ▁SENSIBLE - ▁NEAT - ▁BROWNIE - ▁JUNE - ▁SHAW - ▁WORST - ▁USELESS - ▁FISHING - ▁CRYING - ▁MAYBE - ▁VARI - ▁PRESERVE - ▁VOL - ▁EMPLOY - ▁INTERRUPT - ▁SLIGHTLY - ▁ACCOMPLISHED - NEY - ▁STEAM - ▁BALANC - ▁LEANING - ▁SIGHED - ▁REFUSE - ▁IMAGINED - ▁DATE - GROUND - ▁ENTERTAIN - ▁PERCEIVE - ▁ABROAD - ▁CHEESE - ▁DESTRUCTION - ▁ESSENTIAL - ▁EXPEDITION - ▁GRANDFATHER - ▁INFINITE - ▁LIBRARY - ▁MULTITUDE - ▁NEGLECT - ▁SWALLOW - ▁VILLEFORT - ▁BELOVED - ▁COMMITTEE - ▁CONFIDENT - ▁PURPLE - ▁PURCHAS - ▁SCRAP - ▁SPOIL - ▁LIKEWISE - ▁EXTRA - ▁STRAW - ▁SALUT - ▁SOURCE - ▁HASTENED - ▁RESENT - ▁FLOCK - ▁LOFT - ▁FLO - ▁CLO - ▁CONVINCED - ▁GOODNESS - ▁HYPNOTIZ - ▁SETTING - ▁HAIL - ▁PHI - ▁GROVE - ▁DISCOVERY - ▁DAMP - ▁WHISPER - ▁LIFT - ▁HOP - ▁SUSPECTED - ▁SCR - OLI - ▁FAC - ▁BUSH - ▁FOREVER - ▁BARRICADE - ▁CONSTITUTION - ▁ENDEAVOR - ▁ENTHUSIASM - ▁EXECUTION - ▁HYACINTH - ▁PERCEVAL - ▁PSYCHE - ▁REPROACH - ▁THIRTEEN - ▁ABSORB - ▁GRATITUDE - ▁MERCER - ▁REPUTATION - ▁SCREAM - ▁PUPIL - ▁RETIRED - ▁STEEP - ▁SUMMIT - ▁MISERABLE - ▁STRICT - ▁MINGLED - ▁DEFEAT - ▁REVEAL - ▁LOVING - ▁GOOSE - ▁ECHO - ▁AWAIT - ▁MOOD - ▁CRAWLEY - ▁CELL - ▁ENGAGEMENT - ▁PRECED - ▁SOMEONE - ▁ARRANGEMENT - ▁PICKET - ▁GASP - ▁HUMOR - ▁INVITATION - ▁JOB - WITHSTAND - ▁LAMENT - ▁CLASSES - ▁HUNGER - ▁DISPOSED - ▁STEAMER - ▁FEARFUL - ▁GER - ▁FINAL - ▁FLAG - ▁JULY - ▁DIG - WORK - ▁OPPOS - ▁ANXIETY - ▁AUDIENCE - ▁BACHELOR - ▁COLUMN - ▁HANDKERCHIEF - ▁IMPATIENT - ▁JUDGMENT - ▁KNIFE - ▁SOVEREIGN - ▁STRIKING - ▁THOMPSON - ▁EMPIRE - ▁FULFIL - ▁CONSULT - ▁JENNY - ▁THENARDIER - ▁POYSER - ▁FOURTEEN - ▁JAPANESE - ▁INDULG - ▁MARTIAN - ▁COUNTRIES - ▁FETCH - ▁CRITIC - ▁ROBBER - ▁CROOK - ▁DEPARTURE - ▁MABEL - ▁PREACH - ESCENT - ▁WHIP - ▁NAIL - ▁DELIGHTFUL - ▁DISCUSSION - ▁SENTENCE - ▁LANE - ▁ENGINEER - ▁ARRANGED - MMY - ▁LEST - ▁RENT - MMED - ▁LIST - ▁ROBE - ▁MISSION - ▁GRACEFUL - ▁LIGHTN - STONE - COURT - ▁CONCEPTION - ▁CONTRACT - ▁DROWN - ▁EXPERIMENT - ▁HITHERTO - ▁PLAGUE - ▁PORTHOS - ▁SHRIEK - ▁DETECT - ▁ACCENT - ▁ERECT - ▁SAZEN - ▁PROFIT - ▁VIVID - ▁SQUIRE - ▁OPERATION - ▁SMELL - ▁SIMON - ▁EXTENT - ▁KEEN - ▁EMERG - ▁REVIV - ▁REGIMENT - ▁DISAPPOINTMENT - ▁STOLE - ▁DIVINE - ▁GUILTY - ▁COWARD - ▁EXPECTATION - ▁SIGNOR - ▁MODE - ▁CENTRE - ▁FIL - HOW - ▁WEARI - ▁TOTAL - ▁VICTOR - ▁GOVERN - ▁RAISE - ▁ABANDON - ▁ABSURD - ▁ASPECT - ▁CRIMINAL - ▁DEFINITE - ▁DELIBERAT - ▁FEATHER - ▁FLORINA - ▁MIDNIGHT - ▁RICHMOND - ▁SATISFY - ▁SINGULAR - ▁STEADILY - ▁SUPREME - ▁TIMBER - ▁PSYCHOLOG - ▁GESTURE - ▁VALUABLE - ▁INTERVAL - ▁CONFUSION - ▁FLUTTER - ▁SACRED - ▁DISEASE - ▁UNDERTAKE - ▁PENETRAT - ▁MARVEL - ▁NORTHERN - ▁GRIEV - ▁GENIUS - ▁SADDLE - ▁NOVEL - ▁MISERY - ▁CONVICTION - ▁SINK - ▁WAGON - ▁ARISE - ▁COMMENT - ▁BARN - UPON - ▁FENCE - ▁ASSOCIATION - ▁BONES - ▁IDLE - ▁DOUBTFUL - ▁PREPARATION - IZZ - ▁RAIS - ▁BITTERLY - ▁JOE - ▁RELI - ADI - ▁METAL - ▁EXACT - ▁GLOOM - FIELD - ▁DANGLARS - ▁DISGRACE - ▁EXAMINATION - ▁FASCINAT - ▁GLITTER - ▁INCREASING - ▁MESSENGER - ▁PATRIOT - ▁PLATFORM - ▁PROVISION - ▁QUALITIES - ▁SELECT - ▁STEADY - ▁POVERTY - ▁POWDER - ▁PROPHET - ▁HOLLAND - ▁TRUNK - ▁VARIETY - ▁PLANCHET - ▁CONQUER - ▁CONCEIVE - ▁COMBAT - ▁STOOP - ▁SHIRT - ▁GENERATION - ▁COMMITTED - ▁INSULT - ▁CONFUSED - ▁RADIAN - ▁DEBT - ▁IMITAT - ▁DART - ▁CAROLINE - ▁SWAM - ▁WREN - ▁CHILDHOOD - ▁BRAND - ▁JOKE - ▁FRIENDSHIP - ▁DIRT - ▁JOLL - ▁BUSHES - ▁MINK - ▁ROUT - ▁EQUALITY - ▁HESITATED - ▁BARK - ▁ANTI - ▁STATEMENT - PHER - ▁SUNK - ▁DAT - ▁BACKWARD - ▁SUSPECT - ▁OBJECTION - ▁RAP - ▁CHIN - ▁MATE - ▁REDUC - ▁GREGG - ▁ACCOMPANY - ▁ANYWHERE - ▁BENEFIT - ▁CLERK - ▁EXPENSE - ▁FETNAH - ▁INTERPRET - ▁LUKASHKA - ▁NUMEROUS - ▁SURGEON - ▁PUZZL - ▁RESCUE - ▁GRATEFUL - ▁APPROV - ▁RIVAL - ▁NIECE - ▁FLOOD - ▁VANISHED - ▁ERROR - ▁BLAZ - ▁TUMBL - ▁WENDY - ▁PERSIST - ▁CONSOL - ▁SOAP - ▁HUMOUR - ▁FITTED - ▁HOUSEKEEPER - ▁ENABL - ▁OCCASIONALLY - ▁HATRED - ▁SWELL - ▁WORRY - ▁RUST - ▁PURSUIT - ▁INTIMATE - ▁SEAL - ▁COLLECTION - ▁TREMBLED - ▁DENY - ▁HUMANITY - ▁FATAL - ▁COCK - ▁DRIVER - ▁HOPELESS - ▁MISTAKEN - ▁LUC - ▁ACCOMPLISH - ▁COAL - ▁ACCORD - ▁PURSE - ▁SEPARATE - ▁ARRIVE - ▁SMOK - ▁MADAM - ▁ASSOCIAT - ▁INSTRUCT - ▁CELEBR - ▁CHANNEL - ▁CIVILIZATION - ▁DOCTRINE - ▁ENDEAVOUR - ▁GLACIER - ▁INTELLIGENT - ▁INVOLVE - ▁LEATHER - ▁MUTTERED - ▁OLENIN - ▁PENCROFT - ▁PERPLEX - ▁SPECTATOR - ▁UNIVERSITY - ▁ATTAIN - ▁INEVITABL - ▁YONDER - ▁ENCHANT - ▁REPAIR - ▁CURRENT - ▁ASCEND - ▁CREEK - ▁SPARKL - ▁RUE - ▁BEAVER - ▁INFANT - ▁CONTINUALLY - ▁CLASP - ▁IRISH - ▁ROLLIN - ▁PUNISHMENT - ▁LUNCH - ▁AGONY - ▁RUDE - ▁DRAGG - ▁INQUIRI - ▁SEX - ▁TERRIFI - ▁ROBIN - ▁PROFESSIONAL - ▁SPUR - ▁GRAIN - ▁VINE - ▁PENN - ▁ROC - ▁CHASE - ▁INFORM - ▁WRITER - ▁AVO - ▁TAP - ▁CREAT - ▁WHIL - ▁BARR - ▁ASSURE - ▁CIRCUMSTANCE - ▁OIL - ▁ROUSE - ▁COLUMB - ▁CUNNING - ▁DOMESTIC - ▁GLORIOUS - ▁INDIGNATION - ▁PRECISELY - ▁PRUDENCE - ▁RAILROAD - ▁SATURDAY - ▁UTMOST - ▁VIOLENCE - ▁WHIRL - ▁CALCULAT - ▁OVERWHELM - ▁PERPETUAL - ▁QUARLES - ▁SLENDER - ▁TELEGRAPH - ▁ALOUD - ▁OPPRESS - ▁CROPPER - ▁CANADIAN - ▁HERBERT - ▁TIMID - ▁SUPPLY - ▁STROLL - ▁CREEP - ▁OATH - ▁DUSK - ▁EXCESS - ▁HUMBLE - ▁FURIOUS - ▁RIDGE - ▁BULLET - ▁PONY - ▁STATU - ▁ENJOYMENT - ▁CONWAY - ▁DIFFICULTIES - ▁PATCH - ▁JOYCE - ▁CLOCK - ▁RESTORED - ▁ARGU - ▁WIG - ▁CHATT - ▁PLAC - ▁REMOVE - ▁TORN - ▁DISAPPEAR - TIME - WELL - ▁RECOGNIZE - ▁FISHE - ▁DECLARE - ISTIC - ▁AUTHOR - ▁WHISK - ▁COFFEE - ▁COMPREHEND - ▁DISGUISE - ▁ELZEVIR - ▁ENTERPRISE - ▁HOLIDAY - ▁HORIZON - ▁IGNORANT - ▁INTERVIEW - ▁OLIVER - ▁RONICKY - ▁CAPACITY - ▁DISPOSITION - ▁EXTERNAL - ▁OPPOSITION - ▁REPUBLIC - ▁WHEAT - ▁CORPSE - ▁DARLING - ▁THRILL - ▁INHABITANTS - ▁ORNAMENT - ▁SHIFT - ▁RECOGNISE - ▁SHIVER - ▁BOAST - ▁HINT - ▁BOSTON - ▁MULTI - IFYING - ▁STEAL - ▁INSTRUCTIONS - ▁ELECTRIC - ▁SWING - ▁SOOTH - ▁SCALE - ▁MORLAND - ▁DISLIKE - ▁FLATTER - ▁COACH - ▁LEIF - ▁STAMP - ▁ANYHOW - ▁MOTIONLESS - ▁ANDREA - ▁LOSING - ▁PAUL - ▁CAROL - ▁ADVANC - ▁IMAGIN - ▁CENTER - ▁JAR - ▁SUCCEED - ▁DISMISS - CTOR - ▁RECEIV - ▁DRAG - ▁INTENT - ▁BARBAR - ▁PUNISH - ▁ABRUPTLY - ▁BERNARD - ▁DECISION - ▁INDEPENDENT - ▁PROVINCE - ▁SLEEVE - ▁TREMENDOUS - ▁UNPLEASANT - ▁LEISURE - ▁THRONG - ▁THUMB - ▁BANNER - ▁CONTRADICT - ▁RESTRAIN - ▁DIVIDED - ▁WRAPPED - ▁HAUNT - ▁SNEER - CHESTER - ▁JULIA - ▁MILD - ▁CONTACT - ▁MEANTIME - ▁NEEDLE - ▁BLOT - ▁BARREL - ▁ISABELLA - ▁THEATRE - ▁ESTABLISHMENT - ▁MARKET - ▁CHINA - ▁FORBID - ▁PERISH - ▁DOORWAY - ▁CARLING - ▁PERIL - ▁PRIZE - ▁HATCH - ▁CURL - ▁REFER - ▁DEVOT - EMBER - MONT - ▁CANOE - ▁PROFESSION - ▁CONVICT - ▁CRAWL - ▁ACTIVITY - ▁BEWILDER - ▁BREEZE - ▁CONTEMPLAT - ▁DISGUST - ▁FATIGUE - ▁MERRICK - ▁PRAIRIE - ▁REFORM - ▁SPECTACLE - ▁STUDENT - ▁TUMULT - ▁UNIFORM - ▁VIGOROUS - ▁CONDEMN - ▁GENUINE - ▁THOMAS - ▁ARROW - ▁PILLOW - ▁FEEBLE - ▁RALPH - ▁SCHEME - ▁COLLAR - ▁JUSTINIAN - ▁NERVE - ▁OYSTER - ▁BENNET - ▁DUTIES - ▁BINGLEY - ▁CHRISTMAS - ▁CONVEY - ▁DESPIS - ▁RATTL - ▁GARMENTS - ▁GOWN - ▁BERYL - ▁BARRIER - ▁CHARACTERISTIC - ▁MEDITAT - ▁DISCOURSE - ▁STAFF - ▁KARA - ▁MONTE - ▁READILY - ▁VENTUR - ▁HENCE - ▁ROPE - ▁CRIES - ▁ANGLE - ▁RESPECTABLE - ▁MOAN - ▁OUTLINE - BORN - ▁FIX - ▁INTEND - LIA - ▁CHILL - ▁CREP - ▁CHOSE - ▁SPECULAT - ▁ATTRIBUT - ▁BUFFALO - ▁ENTREAT - ▁ENVELOP - ▁FREDERICK - ▁IMPATIENCE - ▁INDIFFERENCE - ▁INDUSTRY - ▁INSTITUTION - ▁LYNDE - ▁RETAIN - ▁TROUTINA - ▁UNCOMFORTABL - ▁VENGEANCE - ▁JENKS - ▁CONGRESS - ▁SMART - ▁THITHER - ▁DISAGREE - ▁IMPROVEMENT - ▁PISTOL - ▁GOSSIP - ▁ETERNAL - ▁BELIEF - ▁SLEDGE - ▁AROUSED - ▁ORANGE - ▁FASTENED - ▁MONKEY - ▁WITHDREW - ▁OFFEND - ▁PIERC - ▁MOONLIGHT - ▁OARS - ▁GROOM - ▁FIDDLER - ▁BARBARA - SHIRE - ▁ATTENDANT - ▁DIVERS - ▁DUCK - ▁PROPOSAL - ▁GROWTH - ▁CURATE - ▁STEWAR - ▁MOCK - ▁SUCCESSION - ▁CREATION - ▁PARTIAL - ▁SWU - ▁FROST - ▁EIGHTH - ▁AWE - ▁PERCH - ▁LACE - SPOON - ▁ARRANGE - SERIES - ▁FOG - ▁SCU - ▁ABRAHAM - ▁ADMIRAL - ▁BARBICANE - ▁CAMPAIGN - ▁CONSEQUENTLY - ▁CULTURE - ▁GRAMMONT - ▁GWYNPLAINE - ▁HAPPILY - ▁HOOPDRIVER - ▁INDEPENDENCE - ▁LEOPOLD - ▁MISCHIEF - ▁MONTGOMERY - ▁NECESSARILY - ▁PSYCHIC - ▁RABBIT - ▁REFUGE - ▁RESPONSIBILIT - ▁SENATOR - ▁UNCERTAIN - ▁MENSTRUA - ▁FANNY - ▁SUBSTANCE - ▁APRIL - ▁ELBOW - ▁QUALITY - ▁BORDER - ▁BRUTAL - ▁CARPET - ▁SOLITAR - ▁FROWN - ▁SCENT - ▁ANNOY - ▁NAKED - ▁BOSOM - ▁CONSUM - ▁TIGER - ▁ITALIAN - ▁PARSON - ▁DECLIN - ▁NEIGHBORHOOD - ▁GREGGORY - ▁EXCEED - ▁SILLY - ▁ICELAND - ▁HIDEOUS - ▁STRU - ▁ALTERNAT - ▁CABINET - ▁ABILITY - ▁BEECH - ▁SECRETARY - ▁CONTEST - ▁MONK - ▁PADD - ▁EVA - ▁CREST - ▁FINISH - ▁APPARENT - ▁MIX - ▁SLIP - ▁LUXURI - ▁AUTUMN - ▁CIRCULAR - ▁COMPOSITION - ▁DISPLEAS - ▁EXCELLENC - ▁FURNITURE - ▁GRADUATE - ▁INDIFFERENT - ▁JOSEPH - ▁OCCUPATION - ▁POSSIBILITY - ▁RENEWED - ▁RESPONDED - ▁PREVAIL - ▁HOARSE - ▁PRACTIS - ▁FAREWELL - ▁JULIET - ▁OVERHEAD - ▁THREAD - ▁APPLICATION - ▁SOLITUDE - ▁ADAPT - ▁FALK - ▁LARK - ▁COARSE - ▁MANKIND - ▁KICK - ▁BATTER - ▁SOLICIT - ▁RESIGN - ▁MOTOR - ▁STEEL - ▁CONTRIV - ▁AUTHORITIES - ▁HARSH - ▁FAVORITE - ▁TALENT - ▁FLEECE - ▁AGITATION - ▁ABBE - ▁STUCK - ▁HEDGE - ▁BIBLE - ▁RECOLLECTION - ▁PARTNER - ▁DAMON - ▁SHINE - ▁HOOK - ▁CONFESSION - ▁ASSENT - ▁ELDE - ▁BIGGE - ▁PEACEFUL - SCRIBED - ▁WEIGH - CARLET - ▁DECIDE - ▁RECOLLECT - ▁BOHEMIA - ▁CALIFORNIA - ▁CONSTRUCT - ▁DEMONSTRAT - ▁DISTRIBUT - ▁FRIGHTFUL - ▁GNOME - ▁IGNORANCE - ▁JANUARY - ▁JULIUS - ▁MEMORIES - ▁OCCUPY - ▁PHRASE - ▁WHIRLWIND - ▁WILMINGTON - ▁CARLINI - ▁CHAUVELIN - ▁ESTEEM - ▁GENZABURO - ▁GLOBE - ▁LECOQ - ▁MARGARET - ▁MONARCH - ▁NAPOLEON - ▁SCORN - ▁STAGGER - ▁SUSTAIN - ▁TRADITION - ▁ADJUST - ▁FROZEN - ▁IMPRISON - ▁LANTERN - ▁MICHEL - ▁STOMACH - ▁TORRENT - ▁WITHDRAW - ▁FRANZ - ▁POISON - ▁SURVEY - ▁BRITISH - ▁ELEVAT - ▁AWOKE - ▁ESTHER - ▁INHERIT - ▁TRAVERS - ▁STOPPING - ▁IRELAND - ▁COMPARATIVE - ▁SOBB - ▁FAVOURITE - ▁CANVAS - ▁CLOAK - ▁GLAR - ▁ASSISTANT - ▁DAMAGE - ▁PEAK - ▁DISTINCTION - FARE - ▁DOLLAR - ▁BEGGAR - LUSIVE - ▁MODEL - ▁SECUR - ▁DISPOS - ▁SLID - ▁PEA - ▁SPEEDI - HOLD - ▁SNAP - ▁CIGAR - ▁AFFLICT - ▁AMAZEMENT - ▁LAUNCELOT - ▁LEAGUE - ▁MARIPOSA - ▁POPULATION - ▁UNEASY - ▁BLOSSOM - ▁CATERPILLAR - ▁INCLINATION - ▁SUSPEND - ▁SYNDIC - ▁TAYLOR - ▁WILSON - ▁CONTRAST - ▁PORTRAIT - ▁CORONER - ▁GREEK - ▁BUNDLE - ▁BLEW - ▁THORPE - ▁ORPHAN - ▁MUSCLE - ▁DEAF - ▁SURVIV - ▁EXCEEDINGLY - ▁TENDENC - ▁ISRAEL - ▁QUANTIT - ▁PENSION - ▁DRIED - TEXT - ▁REFERENCE - ▁REPOSE - ▁FOLLY - ▁REPLACE - ▁TERR - ▁ANKLE - ▁SUNLIGHT - ▁SECURITY - ▁SHOV - ▁RAW - CULAR - ▁JACKET - ▁TUNE - ▁HOBB - ▁MARTIN - DUCED - ▁FIST - ▁BEGG - ▁CHOK - ▁INQUIRE - ▁INTELLECT - ▁AMUSEMENT - ▁APPROPRIATE - ▁CONGRATULAT - ▁CONVENTION - ▁DISCOURAG - ▁EXQUISITE - ▁FOUNTAIN - ▁JUNIOR - ▁NONSENSE - ▁OBSTACLE - ▁SPECIMEN - ▁SWEAR - ▁TRANQUIL - ▁VEHICLE - ▁WISDOM - ▁ASCERTAIN - ▁CAUTIOUS - ▁CENTURIES - ▁CORRUPT - ▁EXPLOR - ▁TURKEY - ▁BARGAIN - ▁CONFOUND - ▁FUNCTION - ▁GRACIOUS - ▁MONICA - ▁ILLUSTRAT - ▁CRUMB - ▁REMEDY - ▁REMOTE - ▁REVENGE - ▁BABYLON - ▁CAUTION - ▁INTERIOR - ▁CRISTEL - ▁BRAZ - ▁THIRST - ▁PROBABLE - ▁HARMONY - ▁CHARITY - ▁DECAY - ▁COLONI - ▁AVAIL - ▁REPULS - ▁ABSENT - ▁PULSE - ▁PRESUM - ▁CRANE - ▁NEIGHBOURHOOD - ▁SUNSET - ▁CANNON - ▁GRAPE - ▁SOFA - ▁DRANK - MINOUS - ▁DECLARATION - ▁CLOSING - ▁MEEK - ▁STARV - ▁BUNCH - ▁PERFORMANCE - ▁ENTERTAINMENT - ▁STRIV - ▁EMILY - ▁VALET - MPOSED - ▁INTIMA - ▁POLISH - ▁HIRE - POST - ▁TREMBLE - ▁CEASE - ▁VIRGIN - ▁RUSSIA - COURSE - ▁EDUCAT - BOUND - ▁INHABIT - ▁SUPERINTEND - ▁BISCUIT - ▁CHICAGO - ▁CHOKICHI - ▁CONFLICT - ▁ENCLOS - ▁EXCLUSION - ▁EXECUTIVE - ▁GRANDMOTHER - ▁HEADQUARTERS - ▁INFERIOR - ▁INVISIBLE - ▁MUTUAL - ▁OPPONENT - ▁SENSITIVE - ▁STUDIED - ▁TEMPORARY - ▁UNWILLING - ▁PERMANENT - ▁BEDROOM - ▁NOVEMBER - ▁COMPLICAT - ▁DEVOUR - ▁SCRAMBL - ▁SECTION - ▁PROPOSITION - ▁DEPRIV - ▁RYNCH - ▁PLEAD - ▁TORTURE - ▁SCOUT - ▁PILOT - ▁CHERISH - ▁SPEAR - ▁SUGAR - ▁JASPER - ▁STRAY - ▁RIFLE - ▁NORMAL - ▁JERK - ▁HONEY - ▁AWAKENED - ▁QUIVER - ▁PYE - ▁APPLY - LICK - JA - ▁ANNOUNC - FORE - ▁ENGINE - ▁HESITATE - ▁PROVIDE - ▁REALIZE - ▁SEIZE - ▁RESTORE - MOUTH - FOOT - ▁DIFFER - ▁ULTIMATE - ▁ABUNDANCE - ▁APPRECIATE - ▁APPREHENSION - ▁AVENUE - ▁AWKWARD - ▁CETERA - ▁CHIMNEY - ▁CLUTCH - ▁CONVENIENT - ▁CORRIDOR - ▁DISTRACT - ▁ELEGANT - ▁ELSEWHERE - ▁ENTHUSIASTIC - ▁EXECUTE - ▁EXTREMIT - ▁JERUSALEM - ▁MIRACLE - ▁MONSTROUS - ▁OBEDIENCE - ▁OBSCURE - ▁PHENOMENA - ▁RESIDENCE - ▁RESOURCE - ▁REVOLT - ▁SCIENTIFIC - ▁SHIELD - ▁SIMPSON - ▁UNIVERSE - VOLUNTARY - ▁ATTENTIVE - ▁BRENDA - ▁DEPOSIT - ▁MAXIM - ▁REJECT - ▁STIRRED - ▁DISORDER - ▁SERENE - ▁TOBACCO - ▁MILTON - ▁BALLOON - ▁STEPHEN - ▁STRAIT - ▁CHINESE - ▁COURTEOUS - ▁RELEASE - ▁RECESS - ▁COTTON - ▁STUMP - ▁TANK - ▁PROMOTE - ▁DERIVE - ▁LOYAL - ▁GRANIT - ▁DISMAL - ▁CATTLE - ▁DOONE - ▁CUPID - DIGNIFIED - ▁RIPE - ▁EXILE - ▁ANTIQU - UMINAT - ▁SUPPOS - ▁WRETCH - ▁IDENTI - ▁EASI - ▁SERV - ▁QUEST - TOWN - ▁ACHIEVEMENT - ▁APPETITE - ▁BUCCANEER - ▁COMMENCED - ▁DELAWARE - ▁DISCERN - ▁IMMORTAL - ▁INDIGNANT - ▁JOSIANA - ▁MECHANICAL - ▁MUSKRAT - ▁REVIEW - ▁ROBARTS - ▁SIGNIFICANT - ▁SUBSEQUENT - ▁YOURSELVES - ▁ANGRILY - ▁BORROW - ▁SUBLIME - ▁AFRICA - ▁CHICKEN - ▁DEGRAD - ▁GEORGI - ▁HUMILIAT - ▁LODGING - ▁REDCOAT - ▁VIOLET - ▁HOPKINS - ▁RAWDON - ▁PRICK - ▁WHALE - ▁FUNERAL - ▁GUINEA - ▁DISMAY - ▁PORCH - ▁HARVEST - ▁PARCEL - ▁SUBDU - ▁SYRIA - ▁PANIC - ▁BOUGHS - ▁CIGARETTE - ▁CHRON - ▁INQUIRY - ▁CRYSTAL - ▁SPELL - ▁PLUCK - ▁PATTERN - ▁DARING - ▁CRITICISM - ▁DAINT - ▁DISTURBANCE - ▁BUTCHER - ▁LITERA - ▁ABUSE - IXTURE - ▁ANIMAT - ▁WRIT - ▁BELIEV - ▁INDUCE - COMING - ▁DRAMA - ▁AGITAT - SHAW - ▁IMPERFECT - ▁MANUFACTURE - ▁AFFIRM - ▁ANGUISH - ▁ARTIFICIAL - ▁BIBBS - ▁CHARLOTTE - ▁CIRCUS - ▁CONNISTON - ▁CONSTITUTE - ▁DAZZL - ▁DEFECT - ▁DISCHARG - ▁ESCORT - ▁EXAGGERAT - ▁GWENDOLEN - ▁IRRESISTIBL - ▁PHILOSOPHY - ▁PHOTOGRAPH - ▁PILGRIM - ▁PLEASING - ▁QUIXOTE - ▁RESPONSE - ▁SCRATCH - ▁SERGEANT - ▁SHERIFF - ▁SHUDDER - ▁STRUCTURE - ▁SUFFRAGE - ▁SURRENDER - ▁SWORE - ▁VILLAIN - ▁HESITATING - ▁FLORENCE - ▁IRRITAT - ▁RIGID - ▁SINISTER - ▁STUDIO - ▁RAFT - ▁CHAMPION - ▁PAVEMENT - ▁WOLF - ▁DEVICE - ▁WRECK - ▁HESITATION - ▁LAZY - ▁ADJO - ▁DECENT - ▁INTERVEN - ▁WOOL - ▁ILLUSION - ▁HAWK - ▁IMPART - ▁LUNGS - ▁WINNING - ▁VITAL - ▁CONSPI - ▁SUBTLE - ▁CONSTANC - ▁HURL - ▁AMIABL - ▁FOLK - GGY - ▁NECESSIT - ▁PROFESS - WASH - ▁ADMIRING - ▁AMBITIOUS - ▁ANTHONY - ▁CEREMONY - ▁CONTRIBUTE - ▁CRAGGS - ▁DETAIN - ▁DISCLOS - ▁DWELT - ▁EGYPT - ▁FELIX - ▁JOURNAL - ▁KWAIRYO - ▁LIBERAL - ▁LUMBER - ▁OCTOBER - ▁ORGANIZATION - ▁POPULACE - ▁PRECAUTION - ▁PREJUDICE - ▁PROCLAIM - ▁PROPRIETOR - ▁RESPONSIBLE - ▁RHYTHM - ▁RIDICULOUS - ▁SCHOLAR - ▁SQUEEZ - ▁SUBSTITUTE - ▁SURPASS - ▁THRESHOLD - ▁WHARTON - ▁FLICKER - ▁AMAZED - ▁BRONZE - ▁COSSACK - ▁SPILETT - ▁CASUAL - ▁DARCY - ▁PARLOUR - ▁SEXUAL - ▁INSECT - ▁NATHAN - ▁EMINENT - ▁PENCIL - ▁PETITION - ▁ROTTEN - ▁VIGIL - ▁CAESAR - ▁EAGLE - ▁TREAD - ▁REACTION - ▁TACIT - ▁PARLOR - ▁SPAIN - ▁WILDERNESS - ▁DICTAT - ▁GRATIFY - ▁STOVE - ▁SKIRT - ▁UTILI - ▁CONCERT - ▁GORGE - ▁DECORAT - ▁LATIN - ▁ANCHOR - ▁KNOT - ▁MONDAY - ▁GABLES - ▁TOLERABL - ▁ROGER - BERRIES - ▁INVAD - IMMER - OMETER - ▁PRODUC - OBIL - ▁PERMISSI - FICIENCY - ▁WANDER - RREL - PIECE - HORN - ▁COMMIT - ▁ACCUMULAT - ▁JAPAN - ▁ABUNDANT - ▁ACADEMY - ▁ALBERT - ▁BANQUET - ▁DELICIOUS - ▁DOCUMENT - ▁EXCLAMATION - ▁FEBRUARY - ▁GROTESQUE - ▁HEATHERSTONE - ▁HUMPHREY - ▁HURSTWOOD - ▁MOHAMMED - ▁MOSCOW - ▁NICHOLAS - ▁OBSTINATE - ▁PHANTOM - ▁PHILOSOPHER - ▁RECEPTION - ▁SPANIARD - ▁SWOLLEN - ▁TELEPHONE - ▁TRIBUTE - ▁TUNNEL - ▁UNREASONABL - ▁WIGWAM - ▁BUTTERFLY - ▁COLLINS - ▁DISPATCH - ▁EDITOR - ▁CONTINENT - ▁DIMINISH - ▁HORRID - ▁KEATS - ▁PROVIDENCE - ▁BEHALF - ▁CHARLEY - ▁DRAKE - ▁LAUNCH - ▁SALOON - ▁GIGANT - ▁DISPUTE - ▁HYSTERI - ▁DEFENCE - ▁SCREEN - ▁VAULT - ▁NINTH - ▁HARBOR - ▁FLANK - ▁SPECK - ▁UPRIGHT - ▁KEMP - ▁CANADA - ▁STALK - ▁OWL - ▁BRUTE - ▁FERRIS - ▁DECREE - ▁HABITUAL - ▁BRISK - ▁INSPIRE - ▁HUSH - ▁CROUCH - ▁FRIDAY - ▁MOUNTAINEER - ▁HISTORIC - ▁BATES - ▁RUSK - ▁SEMI - DICTION - ▁BUSI - ▁REMOV - MMI - ▁SUFFIC - ▁FLEE - ▁LOUIS - NLEA - ▁IMPORT - OLOGY - ▁CLERGY - ▁ADVERTISEMENT - ▁BENEVOLEN - ▁BORODINO - ▁CATHOLIC - ▁COMMERCIAL - ▁CONJECTURE - ▁CURTAIN - ▁CUTHBERT - ▁DEMOCRACY - ▁GUARANTEE - ▁HYPNOSIS - ▁INDEFINITE - ▁INVESTIGATION - ▁IRREGULAR - ▁KOYO - ▁MERRIWIG - ▁MIRANDA - ▁NICHOLL - ▁ONLOOKER - ▁PERSECUT - ▁RECOGNITION - ▁REJOICE - ▁REMEMBRANCE - ▁REVELATION - ▁SCOLD - ▁SENIOR - ▁SQUIRREL - ▁SYMPATHETIC - ▁TEMPEST - ▁TREACHER - ▁UNDERNEATH - ▁UNEASINESS - ▁UNNECESSARY - ▁UPSTAIRS - ▁VEXATION - ▁ACCESS - ▁CHEAP - ▁ESTIMATE - ▁HAZARD - ▁HORSEBACK - ▁PLUNDER - ▁RASCAL - ▁ROSTOV - ▁ACCUR - ▁GRAVITY - ▁SITUATED - ▁INVARIABL - ▁PLENTIFUL - ▁SPENCER - ▁WALLACE - ▁POLICY - ▁WARRANT - ▁ENVY - ▁LAMB - ▁EXTRACT - ▁CORRAL - ▁PANEL - ▁LINK - ▁LILIES - ▁BECKON - ▁SENOR - ▁BORG - ▁DEBATE - ▁STEER - COGNI - COMB - ▁SETTL - ▁VENERA - ▁FEATURE - ▁TERRIBL - CAPABLE - OLOGICAL - ▁INCESSANT - ▁RESOLUTE - SHAUGHNESSY - ▁ABOLITION - ▁ASSASSIN - ▁BEHAVIOUR - ▁BLUNT - ▁COMMERCE - ▁CONSTANTINOPLE - ▁CRICKET - ▁DISCIPLINE - ▁DROUET - ▁DWARF - ▁INJUSTICE - ▁LUXURY - ▁MANUSCRIPT - ▁MISUNDERSTAND - ▁POLITICIAN - ▁REDOUBT - ▁SALVATION - ▁SERMON - ▁STRUGGLING - ▁SURPRISING - ▁TRIGGER - ▁TUESDAY - ▁TWILIGHT - ▁UNDOUBTEDLY - ▁VEGETABLE - ▁VULGAR - ▁WAISTCOAT - ▁WRINKLE - ▁ALEXANDER - ▁CEILING - ▁ECONOMIC - ▁EVERLASTING - ▁INFLICT - ▁LEVISON - ▁LOBSTER - ▁OVERFLOW - ▁SNATCH - ▁TRAGEDY - ▁DEASEY - ▁ENLIGHTEN - ▁FRIGATE - ▁INSPECT - ▁MARVELLOUS - ▁ATLANTIC - ▁LUFTON - ▁BLADE - ▁CRASH - ▁SLAUGHTER - ▁ANNUAL - ▁CONFERENCE - ▁TWIG - ▁REASSUR - ▁UNIQUE - ▁WRATH - ▁CRADLE - ▁HULLO - ▁LIQUID - ▁MIRTH - ▁EXPERT - ▁HARVEY - ▁RESTORATION - ▁PRETTI - ▁APOLOGY - ▁SLAIN - ▁BARBER - ▁UPROAR - ▁SCANT - ▁BADGER - ▁GROCER - ▁ACRES - ▁BRIDLE - ▁SPECIFI - ▁TANGLE - ▁FERTIL - ▁PATRON - WIXT - LAMOUR - ▁DARN - ▁POPE - ▁PERCEIV - ▁CONCLUDE - ▁SIMPL - ▁GUILT - ▁CARRIE - EFFICIENT - SGIVING - ▁APPOINTMENT - ▁APPRECIATION - ▁CARTRIDGE - ▁CHALLENGE - ▁CRAYFISH - ▁CRIMSON - ▁CUCUMETTO - ▁ENERGETIC - ▁EPOCH - ▁EXAMINING - ▁EXTENSIVE - ▁EXTINGUISH - ▁GLOODY - ▁INSIGNIFICANT - ▁LANDLORD - ▁LANGUID - ▁LEGISLATURE - ▁MAJESTIC - ▁PACIFIC - ▁PASTRINI - ▁PHRONSIE - ▁RECONCIL - ▁SIMULTANEOUS - ▁SKELETON - ▁SKETCH - ▁TRANSFORM - ▁UNJUST - ▁VEXED - ▁ASYLUM - ▁CLUSTER - ▁ERRAND - ▁EXPEND - ▁NEGATIVE - ▁NORHALA - ▁SCANDAL - ▁STIMULAT - ▁SWEAT - ▁COMPOUND - ▁DECEMBER - ▁EXPAND - ▁PROLONG - ▁PURITAN - ▁CONQUEST - ▁MAGUA - ▁SANCHO - ▁TRENCH - ▁ENTITLE - ▁PEPPER - ▁DISASTER - ▁REGAIN - ▁SHREWD - ▁SULLEN - ▁CLAVIER - ▁COLOSS - ▁SHILLING - ▁ETHEL - ▁MYSTERIES - ▁BULK - ▁GRANDEUR - ▁AGNES - ▁CONVERT - ▁WRIST - ▁GLID - ▁TERRACE - ▁SONYA - ▁DANTES - ▁MOULD - ▁MAGNET - ▁PLOT - RANK - ▁CAVIT - ▁SUBSID - ▁SLAP - TURNED - ▁THREAT - BREAK - ▁ANCESTORS - ▁ANTICIPATED - ▁APPLAUSE - ▁ASSAULT - ▁ATTORNEY - ▁AUTOMATIC - ▁CARAVAN - ▁CATASTROPHE - ▁CAVALCANTI - ▁CROMWELL - ▁ENVOY - ▁EXHAUSTION - ▁FIEND - ▁GENEROSITY - ▁GIMBLET - ▁HARDQUANONNE - ▁HOUARN - ▁INJURY - ▁MACKINSON - ▁OGLETHORPE - ▁PETTICOAT - ▁RASPBERR - ▁REHNHJELM - ▁REJOICING - ▁REMNANT - ▁SCOTLAND - ▁SHRINK - ▁STANDPOINT - ▁TESTIMONY - ▁THEREAFTER - ▁THIRTIETH - ▁TWENTIETH - ▁TYRANT - ▁VENTNOR - ▁VETERAN - ▁WHITTAKER - ▁ZVERKOV - ▁ARCHITECTUR - ▁BLUNDER - ▁DENSHER - ▁FORTNIGHT - ▁JUDITH - ▁MARIANNE - ▁MEMORABLE - ▁REFINED - ▁REVOLV - ▁UNDERTAKING - ▁CLUMP - ▁GRUMBLE - ▁SYMPATHI - ▁TICKET - ▁TWITCH - ▁EDITION - ▁FALANDER - ▁CARTHAGE - ▁ORLEANS - ▁POSSUM - ▁SWITCH - ▁CLUNG - ▁CARDINAL - ▁GNAW - ▁LOCATED - ▁HARROW - ▁RASH - ▁SIEGE - ▁LOAF - ▁BRUISE - ▁REGULAT - ▁RESORT - ▁SARAH - ▁LEVIN - ▁NAVY - ▁MOOSE - ▁STOOL - ▁CHANCELLOR - ▁INGENIOUS - ▁CHALK - ▁PRETENCE - ▁REPAY - ▁ROAST - ▁PLUTO - ▁BAFFL - ▁STUMBL - ▁SPHERE - ▁PLEDGE - ▁SPRAWL - ▁WRAP - ▁FRINGE - ▁DREAR - ARRINGTON - ▁FEDERA - KEEPER - ▁PHYSIC - ▁ADVENT - HUMAN - OLOGIST - ▁ALEXANDR - ▁APPARITION - ▁BARTHOLEMY - ▁CITOYEN - ▁CLIMATE - ▁CONTEMPORAR - ▁DESOLATE - ▁DISCONTENT - ▁ELEPHANT - ▁FERNANDO - ▁FERRALTI - ▁FOLIAGE - ▁FUGITIVE - ▁GAMBLING - ▁INVOLUNTARILY - ▁LABYRINTH - ▁LEGITIMATE - ▁MILLIONAIRE - ▁PERCEPTION - ▁PROPRIETY - ▁REBELLION - ▁REFRAIN - ▁RUGGLES - ▁SCRIPTURE - ▁SPLENDOR - ▁SQUADRON - ▁STRICKEN - ▁SWARM - ▁THEODORA - ▁TOMORROW - ▁VELVET - ▁WOLVES - ▁DISREGARD - ▁GLIMMER - ▁SHROUD - ▁TWINKLING - ▁UNEQUAL - ▁CHANNING - ▁CLUMS - ▁ENIGMA - ▁NAVIGAT - ▁TARKAS - ▁TEMPERATURE - ▁DIVISION - ▁GRATIFICATION - ▁MONUMENT - ▁SQUEAK - ▁KAVIN - ▁INTERPOSE - ▁THORNTON - ▁SOLUTION - ▁STREAK - ▁SHRILL - ▁APRON - ▁PITEOUS - ▁HAUGHTY - ▁RECKLESS - ▁EMPTI - ▁WADMAN - ▁BONNET - ▁MARTHA - ▁DUMB - ▁SHATTER - ▁ACUTE - ▁BRINK - ▁CAPRICE - ▁HURON - ▁INFERN - ▁FOWL - ▁ENRAGE - ▁ADORN - ▁CRUIS - ▁PROBABILIT - ▁EXPIR - ▁IMPETU - ▁OVERHEAR - BURTON - ▁TRANSLAT - ▁ENGAGE - ▁CONVINCE - ▁ABNORMAL - ▁GESTICULAT - ▁ABOMINABL - ▁ADVERSARY - ▁ADVERTISER - ▁ADVERTISING - ▁ANNIHILAT - ▁ARTILLERY - ▁CATHEDRAL - ▁COMPETITOR - ▁COULSON - ▁CREVICE - ▁CUSHION - ▁DEBRAY - ▁DEJECT - ▁DIETRICH - ▁DISADVANTAGE - ▁ELLISON - ▁EMPHASIS - ▁EXCURSION - ▁FANTASTIC - ▁HYPOTHES - ▁INCONVENIENCE - ▁INDESCRIBABLE - ▁INDUSTRI - ▁INVALID - ▁MERCILESS - ▁MESOPOTAMIA - ▁MOSQUITO - ▁NARRATIVE - ▁NOWADAYS - ▁OPPORTUNITIES - ▁PROMISING - ▁RECTANGLE - ▁REMONSTRANCE - ▁RESTAURANT - ▁RIBBON - ▁SCIENTIST - ▁SHALMANESER - ▁SKULL - ▁SPRUCE - ▁SUBSTANTIAL - ▁SYMBOL - ▁TEAPOT - ▁TERRITORY - ▁TRAFFIC - ▁TREASON - ▁TRUMPET - ▁TYRANN - ▁UNANIMOUS - ▁UNAWARE - ▁VICINITY - ▁WREATH - ▁ZADIG - ▁CHATEAU - ▁CONFRONT - ▁DUCHESS - ▁EMBODI - ▁FEMININ - ▁FURNACE - ▁MONTONI - ▁RENOWN - ▁SMASH - ▁HARVARD - ▁NEWBERRY - ▁PERFUME - ▁SIGNATURE - ▁SPLASH - ▁SUPPOSITION - ▁HARBOUR - ▁ASSURANCE - ▁BRISTOL - ▁BUCKINGHAM - ▁DUDLEY - ▁INTENSITY - ▁CHOPIN - ▁ENLIST - Q - <sos/eos> init: null input_size: null ctc_conf: dropout_rate: 0.0 ctc_type: builtin reduce: true ignore_nan_grad: null zero_infinity: true joint_net_conf: null use_preprocessor: true token_type: bpe bpemodel: data/en_token_list/bpe_unigram5000/bpe.model non_linguistic_symbols: null cleaner: null g2p: null speech_volume_normalize: null rir_scp: null rir_apply_prob: 1.0 noise_scp: null noise_apply_prob: 1.0 noise_db_range: '13_15' short_noise_thres: 0.5 frontend: default frontend_conf: n_fft: 512 win_length: 400 hop_length: 160 fs: 16k specaug: specaug specaug_conf: apply_time_warp: true time_warp_window: 5 time_warp_mode: bicubic apply_freq_mask: true freq_mask_width_range: - 0 - 27 num_freq_mask: 2 apply_time_mask: true time_mask_width_ratio_range: - 0.0 - 0.05 num_time_mask: 5 normalize: global_mvn normalize_conf: stats_file: exp/asr_stats_raw_en_bpe5000_sp/train/feats_stats.npz model: espnet model_conf: ctc_weight: 1.0 lsm_weight: 0.1 length_normalized_loss: false preencoder: null preencoder_conf: {} encoder: e_branchformer encoder_conf: output_size: 256 attention_heads: 4 attention_layer_type: rel_selfattn pos_enc_layer_type: rel_pos rel_pos_type: latest cgmlp_linear_units: 1024 cgmlp_conv_kernel: 31 use_linear_after_conv: false gate_activation: identity num_blocks: 12 dropout_rate: 0.1 positional_dropout_rate: 0.1 attention_dropout_rate: 0.1 input_layer: conv2d layer_drop_rate: 0.0 linear_units: 1024 positionwise_layer_type: linear use_ffn: true macaron_ffn: true merge_conv_kernel: 31 postencoder: null postencoder_conf: {} decoder: rnn decoder_conf: {} preprocessor: default preprocessor_conf: {} required: - output_dir - token_list version: '202211' distributed: false ``` </details> ### Citing ESPnet ```BibTex @inproceedings{watanabe2018espnet, author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai}, title={{ESPnet}: End-to-End Speech Processing Toolkit}, year={2018}, booktitle={Proceedings of Interspeech}, pages={2207--2211}, doi={10.21437/Interspeech.2018-1456}, url={http://dx.doi.org/10.21437/Interspeech.2018-1456} } ``` or arXiv: ```bibtex @misc{watanabe2018espnet, title={ESPnet: End-to-End Speech Processing Toolkit}, author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai}, year={2018}, eprint={1804.00015}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
Declan/HuffPost_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: creativeml-openrail-m tags: - pytorch - diffusers - stable-diffusion - text-to-image - diffusion-models-class - dreambooth-hackathon - science widget: - text: top rated photo of mafra fractal in the shape of seashells. --- ## Description This is a Stable Diffusion model fine-tuned on Mandelbrot fractal images for the DreamBooth Hackathon 🔥 science theme. To participate or learn more, visit [this page](https://huggingface.co/dreambooth-hackathon). To generate Mandelbrot fractals, use **a photo of mafra fractal in the shape of [your choice]** or experiment with other variations. CFG scale seems to be the best around 8-9. Additional modifiers and negative prompts may also improve results. ## Examples *a photo of mafra fractal in the shape of a squid.* ![squid fractal](https://i.imgur.com/UHJ5K7J.png) *a photo of mafra fractal in the shape of seashells.* ![seashell fractal](https://i.imgur.com/PgzEOAV.png) *a photo of mafra fractal in the shape of jungle foliage.* ![jungle fractal](https://i.imgur.com/v6ISc3u.png) *a photo of mafra fractal in the shape of a beautiful flower.* ![flower fractal](https://i.imgur.com/9VIk2Jc.png) ## Usage ```python from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained('baruga/mandelbrot-fractals') image = pipeline().images[0] image ```
Declan/Politico_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: distilbert-base-uncased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue config: cola split: train args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.5268023551875569 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.8337 - Matthews Correlation: 0.5268 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5253 | 1.0 | 535 | 0.5187 | 0.4181 | | 0.3463 | 2.0 | 1070 | 0.4989 | 0.5134 | | 0.2318 | 3.0 | 1605 | 0.5932 | 0.5136 | | 0.1724 | 4.0 | 2140 | 0.7905 | 0.5156 | | 0.1285 | 5.0 | 2675 | 0.8337 | 0.5268 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
DeepChem/ChemBERTa-77M-MLM
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,416
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: qlearning-taxiv3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="keshan/qlearning-taxiv3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
DeepESP/gpt2-spanish
[ "pytorch", "tf", "jax", "gpt2", "text-generation", "es", "dataset:ebooks", "transformers", "GPT-2", "Spanish", "ebooks", "nlg", "license:mit", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,463
null
--- language: en --- <p align="center"> <img src="https://doctr-static.mindee.com/models?id=v0.3.1/Logo_doctr.gif&src=0" width="60%"> </p> **Optical Character Recognition made seamless & accessible to anyone, powered by TensorFlow 2 & PyTorch** ## Task: recognition https://github.com/mindee/doctr ### Example usage: ```python >>> from doctr.io import DocumentFile >>> from doctr.models import ocr_predictor, from_hub >>> img = DocumentFile.from_images(['<image_path>']) >>> # Load your model from the hub >>> model = from_hub('mindee/my-model') >>> # Pass it to the predictor >>> # If your model is a recognition model: >>> predictor = ocr_predictor(det_arch='db_mobilenet_v3_large', >>> reco_arch=model, >>> pretrained=True) >>> # If your model is a detection model: >>> predictor = ocr_predictor(det_arch=model, >>> reco_arch='crnn_mobilenet_v3_small', >>> pretrained=True) >>> # Get your predictions >>> res = predictor(img) ``` ### Run Configuration { "arch": "vitstr_small", "train_path": "C:\\Users\\smartmind\\Desktop\\workspace\\test\\train_ocr\\train", "val_path": "C:\\Users\\smartmind\\Desktop\\workspace\\test\\train_ocr\\validation", "train_samples": 1000, "val_samples": 20, "font": "FreeMono.ttf,FreeSans.ttf,FreeSerif.ttf", "min_chars": 1, "max_chars": 12, "name": "vitstr_small-korean", "epochs": 15, "batch_size": 64, "device": 0, "input_size": 32, "lr": 0.001, "weight_decay": 0.01, "workers": 8, "resume": null, "vocab": "korean", "test_only": false, "show_samples": false, "wb": true, "push_to_hub": true, "pretrained": true, "sched": "onecycle", "amp": true, "find_lr": false }
DeepPavlov/distilrubert-tiny-cased-conversational
[ "pytorch", "distilbert", "ru", "arxiv:2205.02340", "transformers" ]
null
{ "architectures": null, "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5,993
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: whisper-small-zh-hk results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 zh-HK type: mozilla-foundation/common_voice_11_0 config: mozilla-foundation/common_voice_11_0 zh-HK split: None args: zh-HK metrics: - name: Wer type: wer value: 0.5615316117542297 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-small-zh-hk This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the mozilla-foundation/common_voice_11_0 zh-HK dataset. It achieves the following results on the evaluation set: - Loss: 0.3003 - Wer: 0.5615 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - total_train_batch_size: 32 - total_eval_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.1556 | 2.28 | 1000 | 0.2708 | 0.6069 | | 0.038 | 4.57 | 2000 | 0.2674 | 0.5701 | | 0.0059 | 6.85 | 3000 | 0.2843 | 0.5635 | | 0.0017 | 9.13 | 4000 | 0.2952 | 0.5622 | | 0.0013 | 11.42 | 5000 | 0.3003 | 0.5615 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.1 - Datasets 2.8.0 - Tokenizers 0.13.2
Denilson/gbert-base-germaner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Asteroids-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Asteroids-v5 type: Asteroids-v5 metrics: - type: mean_reward value: 16247.00 +/- 13460.05 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Asteroids-v5** This is a trained model of a PPO agent playing Asteroids-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/ppo_atari_envpool_async_jax_scan_impalanet_machado.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[ppo_atari_envpool_async_jax_scan_impalanet_machado]" python -m cleanrl_utils.enjoy --exp-name ppo_atari_envpool_async_jax_scan_impalanet_machado --env-id Asteroids-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Asteroids-v5-ppo_atari_envpool_async_jax_scan_impalanet_machado-seed1/raw/main/ppo_atari_envpool_async_jax_scan_impalanet_machado.py curl -OL https://huggingface.co/cleanrl/Asteroids-v5-ppo_atari_envpool_async_jax_scan_impalanet_machado-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Asteroids-v5-ppo_atari_envpool_async_jax_scan_impalanet_machado-seed1/raw/main/poetry.lock poetry install --all-extras python ppo_atari_envpool_async_jax_scan_impalanet_machado.py --track --wandb-project-name envpool-atari --save-model --upload-model --hf-entity cleanrl --env-id Asteroids-v5 --seed 1 ``` # Hyperparameters ```python {'anneal_lr': True, 'async_batch_size': 16, 'batch_size': 2048, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'ent_coef': 0.01, 'env_id': 'Asteroids-v5', 'exp_name': 'ppo_atari_envpool_async_jax_scan_impalanet_machado', 'gae': True, 'gae_lambda': 0.95, 'gamma': 0.99, 'hf_entity': 'cleanrl', 'learning_rate': 0.00025, 'max_grad_norm': 0.5, 'minibatch_size': 1024, 'norm_adv': True, 'num_envs': 64, 'num_minibatches': 2, 'num_steps': 32, 'num_updates': 24414, 'save_model': True, 'seed': 1, 'target_kl': None, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 2, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'envpool-atari'} ```
Deniskin/emailer_medium_300
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- license: cc-by-sa-4.0 tags: - generated_from_trainer model-index: - name: weights_text results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # weights_text This model is a fine-tuned version of [cl-tohoku/bert-base-japanese-whole-word-masking](https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.1+cu117 - Tokenizers 0.13.2
Denver/distilbert-base-uncased-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -197.83 +/- 128.39 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'ppo' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 2 'learning_rate': 0.00025 'num_envs': 4 'num_steps': 128 'anneal_lr': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.95 'num_minibatches': 4 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'dotunadegbite/ppo-LunarLander-v2' 'batch_size': 512 'minibatch_size': 128} ```
DiegoAlysson/opus-mt-en-ro-finetuned-en-to-ro
[ "pytorch", "tensorboard", "marian", "text2text-generation", "dataset:wmt16", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### Jason-Art Dreambooth model trained by Alexwww with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Putting the prompte words: "photography minimal symmetric" will help get better outputs Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
DimaOrekhov/transformer-method-name
[ "pytorch", "encoder-decoder", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-01-02T07:39:22Z
--- license: cc0-1.0 --- You want more than a digital style - you want to feel brush strokes and see the built-up paint of an oil painting. You love physical objects and want your AI-generated art to fool you that you're looking at a photograph of something analog, hanging on a wall somewhere. This is the embedding for you. Download the the 'classipeint.pt' file and trigger it in your prompt "art by classipeint" or "painted by classipeint" or simply "by classipeint" <strong>Interested in generating your own embeddings? <a href="https://docs.google.com/document/d/1JvlM0phnok4pghVBAMsMq_-Z18_ip_GXvHYE0mITdFE/edit?usp=sharing" target="_blank">My Google doc walkthrough might help</a></strong> It is reasonably flexible - I find I can prompt for fantasy elements, classic scenes, modern architecture ... it does sometimes take a little finessing but except for bad anatomy, I am using surprisingly few negative prompts. You can rename the file and use that filename as the prompt. Just be sure your filename is unique and not something that may be an existing token that Stable Diffusion is trained on. ![01642-2869083623-portrait of a___.jpg](https://s3.amazonaws.com/moonup/production/uploads/1672646629219-63169de2f5e32157c5226974.jpeg) ![01639-2347037953-Mexican count___.jpg](https://s3.amazonaws.com/moonup/production/uploads/1672645358818-63169de2f5e32157c5226974.jpeg) ![01636-63647559-extremely detai___.jpg](https://s3.amazonaws.com/moonup/production/uploads/1672645358846-63169de2f5e32157c5226974.jpeg) ![01634-850899942-painting of a____.jpg](https://s3.amazonaws.com/moonup/production/uploads/1672645358820-63169de2f5e32157c5226974.jpeg) ![01632-3303150612-North end of____.jpg](https://s3.amazonaws.com/moonup/production/uploads/1672645358826-63169de2f5e32157c5226974.jpeg) ![01631-2009822381-African busin___.jpg](https://s3.amazonaws.com/moonup/production/uploads/1672645358823-63169de2f5e32157c5226974.jpeg) ![01630-4016756398-a diminutive____.jpg](https://s3.amazonaws.com/moonup/production/uploads/1672645358844-63169de2f5e32157c5226974.jpeg) ![01629-4016756396-a diminutive____.jpg](https://s3.amazonaws.com/moonup/production/uploads/1672645358842-63169de2f5e32157c5226974.jpeg)
Dongjae/mrc2reader
[ "pytorch", "xlm-roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "XLMRobertaForQuestionAnswering" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: openrail --- ## Models ``` yolov4 (single/multiple gpu) yolov4-csp (single/multiple gpu) ``` ## Dataset Synthetic data consisting of common office and household items ## Training Using darknet
albert-base-v1
[ "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
38,156
2023-01-02T10:54:12Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 636.50 +/- 190.80 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga misza222 -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga misza222 -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga misza222 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 64), ('buffer_size', 150000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
albert-base-v2
[ "pytorch", "tf", "jax", "rust", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4,785,283
2023-01-02T10:55:17Z
--- language: en license: mit tags: - vision - image-to-text inference: false model_name: microsoft/git-base-msrvtt-qa --- # GIT (GenerativeImage2Text), base-sized, fine-tuned on MSRVTT-QA GIT (short for GenerativeImage2Text) model, base-sized version, fine-tuned on MSRVTT-QA. It was introduced in the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Wang et al. and first released in [this repository](https://github.com/microsoft/GenerativeImage2Text). Disclaimer: The team releasing GIT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description GIT is a Transformer decoder conditioned on both CLIP image tokens and text tokens. The model is trained using "teacher forcing" on a lot of (image, text) pairs. The goal for the model is simply to predict the next text token, giving the image tokens and previous text tokens. The model has full access to (i.e. a bidirectional attention mask is used for) the image patch tokens, but only has access to the previous text tokens (i.e. a causal attention mask is used for the text tokens) when predicting the next text token. ![GIT architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/git_architecture.jpg) This allows the model to be used for tasks like: - image and video captioning - visual question answering (VQA) on images and videos - even image classification (by simply conditioning the model on the image and asking it to generate a class for it in text). ## Intended uses & limitations You can use the raw model for video question answering (QA). See the [model hub](https://huggingface.co/models?search=microsoft/git) to look for fine-tuned versions on a task that interests you. ### How to use For code examples, we refer to the [documentation](https://huggingface.co/transformers/main/model_doc/git.html). ## Training data From the paper: > We collect 0.8B image-text pairs for pre-training, which include COCO (Lin et al., 2014), Conceptual Captions (CC3M) (Sharma et al., 2018), SBU (Ordonez et al., 2011), Visual Genome (VG) (Krishna et al., 2016), Conceptual Captions (CC12M) (Changpinyo et al., 2021), ALT200M (Hu et al., 2021a), and an extra 0.6B data following a similar collection procedure in Hu et al. (2021a). => however this is for the model referred to as "GIT" in the paper, which is not open-sourced. This checkpoint is "GIT-base", which is a smaller variant of GIT trained on 10 million image-text pairs. Next, the model was fine-tuned on MSRVTT-QA. See table 11 in the [paper](https://arxiv.org/abs/2205.14100) for more details. ### Preprocessing We refer to the original repo regarding details for preprocessing during training. During validation, one resizes the shorter edge of each image, after which center cropping is performed to a fixed-size resolution. Next, frames are normalized across the RGB channels with the ImageNet mean and standard deviation. ## Evaluation results For evaluation results, we refer readers to the [paper](https://arxiv.org/abs/2205.14100).
albert-large-v2
[ "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26,792
2023-01-02T10:56:26Z
--- license: apache-2.0 --- ## Anime Segmentation Models models of [https://github.com/SkyTNT/anime-segmentation](https://github.com/SkyTNT/anime-segmentation)
albert-xxlarge-v1
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7,091
2023-01-02T11:07:38Z
--- language: en license: mit tags: - vision model_name: microsoft/git-large-vqav2 pipeline_tag: visual-question-answering --- # GIT (GenerativeImage2Text), large-sized, fine-tuned on VQAv2 GIT (short for GenerativeImage2Text) model, large-sized version, fine-tuned on VQAv2. It was introduced in the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Wang et al. and first released in [this repository](https://github.com/microsoft/GenerativeImage2Text). Disclaimer: The team releasing GIT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description GIT is a Transformer decoder conditioned on both CLIP image tokens and text tokens. The model is trained using "teacher forcing" on a lot of (image, text) pairs. The goal for the model is simply to predict the next text token, giving the image tokens and previous text tokens. The model has full access to (i.e. a bidirectional attention mask is used for) the image patch tokens, but only has access to the previous text tokens (i.e. a causal attention mask is used for the text tokens) when predicting the next text token. ![GIT architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/git_architecture.jpg) This allows the model to be used for tasks like: - image and video captioning - visual question answering (VQA) on images and videos - even image classification (by simply conditioning the model on the image and asking it to generate a class for it in text). ## Intended uses & limitations You can use the raw model for visual question answering (VQA). See the [model hub](https://huggingface.co/models?search=microsoft/git) to look for fine-tuned versions on a task that interests you. ### How to use For code examples, we refer to the [documentation](https://huggingface.co/transformers/main/model_doc/git.html). ## Training data From the paper: > We collect 0.8B image-text pairs for pre-training, which include COCO (Lin et al., 2014), Conceptual Captions (CC3M) (Sharma et al., 2018), SBU (Ordonez et al., 2011), Visual Genome (VG) (Krishna et al., 2016), Conceptual Captions (CC12M) (Changpinyo et al., 2021), ALT200M (Hu et al., 2021a), and an extra 0.6B data following a similar collection procedure in Hu et al. (2021a). => however this is for the model referred to as "GIT" in the paper, which is not open-sourced. This checkpoint is "GIT-large", which is a smaller variant of GIT trained on 20 million image-text pairs. Next, the model was fine-tuned on VQAv2. See table 11 in the [paper](https://arxiv.org/abs/2205.14100) for more details. ### Preprocessing We refer to the original repo regarding details for preprocessing during training. During validation, one resizes the shorter edge of each image, after which center cropping is performed to a fixed-size resolution. Next, frames are normalized across the RGB channels with the ImageNet mean and standard deviation. ## Evaluation results For evaluation results, we refer readers to the [paper](https://arxiv.org/abs/2205.14100).
bert-base-cased-finetuned-mrpc
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11,644
2023-01-02T11:09:46Z
--- tags: - generated_from_trainer datasets: - samsum model-index: - name: pegasus-samsum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-samsum This model is a fine-tuned version of [google/pegasus-cnn_dailymail](https://huggingface.co/google/pegasus-cnn_dailymail) on the samsum dataset. It achieves the following results on the evaluation set: - Loss: 1.4812 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.6928 | 0.54 | 500 | 1.4812 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
bert-base-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8,621,271
2023-01-02T11:10:56Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="eyechen/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
bert-base-chinese
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "zh", "arxiv:1810.04805", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3,377,486
2023-01-02T11:12:15Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: insertion-prop-05-correct-data results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # insertion-prop-05-correct-data This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0794 - Precision: 0.9284 - Recall: 0.9056 - F1: 0.9169 - Accuracy: 0.9689 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.1815 | 0.32 | 500 | 0.0982 | 0.9159 | 0.8802 | 0.8977 | 0.9619 | | 0.1113 | 0.64 | 1000 | 0.0833 | 0.9257 | 0.9018 | 0.9136 | 0.9676 | | 0.1018 | 0.96 | 1500 | 0.0794 | 0.9284 | 0.9056 | 0.9169 | 0.9689 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
bert-base-german-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "de", "transformers", "exbert", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
175,983
2023-01-02T11:12:32Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 659.00 +/- 173.14 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga joheras -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga joheras -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga joheras ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
bert-base-german-dbmdz-cased
[ "pytorch", "jax", "bert", "fill-mask", "de", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,814
2023-01-02T11:14:16Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.50 +/- 2.72 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="eyechen/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
bert-base-german-dbmdz-uncased
[ "pytorch", "jax", "safetensors", "bert", "fill-mask", "de", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
68,305
2023-01-02T11:18:10Z
--- language: en license: mit tags: - vision model_name: microsoft/git-large-textvqa inference: false pipeline_tag: visual-question-answering --- # GIT (GenerativeImage2Text), large-sized, fine-tuned on TextVQA GIT (short for GenerativeImage2Text) model, large-sized version, fine-tuned on TextVQA. It was introduced in the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Wang et al. and first released in [this repository](https://github.com/microsoft/GenerativeImage2Text). Disclaimer: The team releasing GIT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description GIT is a Transformer decoder conditioned on both CLIP image tokens and text tokens. The model is trained using "teacher forcing" on a lot of (image, text) pairs. The goal for the model is simply to predict the next text token, giving the image tokens and previous text tokens. The model has full access to (i.e. a bidirectional attention mask is used for) the image patch tokens, but only has access to the previous text tokens (i.e. a causal attention mask is used for the text tokens) when predicting the next text token. ![GIT architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/git_architecture.jpg) This allows the model to be used for tasks like: - image and video captioning - visual question answering (VQA) on images and videos - even image classification (by simply conditioning the model on the image and asking it to generate a class for it in text). ## Intended uses & limitations You can use the raw model for visual question answering (VQA). See the [model hub](https://huggingface.co/models?search=microsoft/git) to look for fine-tuned versions on a task that interests you. ### How to use For code examples, we refer to the [documentation](https://huggingface.co/transformers/main/model_doc/git.html). ## Training data From the paper: > We collect 0.8B image-text pairs for pre-training, which include COCO (Lin et al., 2014), Conceptual Captions (CC3M) (Sharma et al., 2018), SBU (Ordonez et al., 2011), Visual Genome (VG) (Krishna et al., 2016), Conceptual Captions (CC12M) (Changpinyo et al., 2021), ALT200M (Hu et al., 2021a), and an extra 0.6B data following a similar collection procedure in Hu et al. (2021a). => however this is for the model referred to as "GIT" in the paper, which is not open-sourced. This checkpoint is "GIT-large", which is a smaller variant of GIT trained on 20 million image-text pairs. Next, the model was fine-tuned on TextVQA. See table 11 in the [paper](https://arxiv.org/abs/2205.14100) for more details. ### Preprocessing We refer to the original repo regarding details for preprocessing during training. During validation, one resizes the shorter edge of each image, after which center cropping is performed to a fixed-size resolution. Next, frames are normalized across the RGB channels with the ImageNet mean and standard deviation. ## Evaluation results For evaluation results, we refer readers to the [paper](https://arxiv.org/abs/2205.14100).
bert-base-uncased
[ "pytorch", "tf", "jax", "rust", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
59,663,489
2023-01-02T11:27:10Z
--- license: openrail --- text="""Dear Amazon, last week I ordered an Optimus Prime action figure from your online store in Germany. Unfortunately, when I opened the package, I discovered to my horror that I had been sent an action figure of Megatron instead! As a lifelong enemy of the Deceptions, I hope yoou can understand my dilemma. To resolve the issue, I demand an exchange of Megatron for the Optimus Prime figure I ordered. Enclosed are copies of my records concerning this purchase. I expect to hear from you soon. Sincerely, Bumblebee.""" from transformers import pipeline classifier = pipeline("text-classification")
bert-large-cased-whole-word-masking-finetuned-squad
[ "pytorch", "tf", "jax", "rust", "safetensors", "bert", "question-answering", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8,214
2023-01-02T11:33:19Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - f1 model-index: - name: text_classification_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # text_classification_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3686 - F1: 0.8968 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 0.2356 | 1.0 | 7215 | 0.3704 | 0.8946 | | 0.2011 | 2.0 | 14430 | 0.3686 | 0.8968 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
bert-large-uncased-whole-word-masking
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
76,685
2023-01-02T11:46:41Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: insertion-prop-015-correct-data results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # insertion-prop-015-correct-data This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0497 - Precision: 0.8907 - Recall: 0.8518 - F1: 0.8708 - Accuracy: 0.9816 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0978 | 0.32 | 500 | 0.0581 | 0.8730 | 0.8300 | 0.8509 | 0.9787 | | 0.0633 | 0.64 | 1000 | 0.0515 | 0.8867 | 0.8447 | 0.8652 | 0.9807 | | 0.0588 | 0.96 | 1500 | 0.0497 | 0.8907 | 0.8518 | 0.8708 | 0.9816 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
camembert-base
[ "pytorch", "tf", "safetensors", "camembert", "fill-mask", "fr", "dataset:oscar", "arxiv:1911.03894", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "CamembertForMaskedLM" ], "model_type": "camembert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,440,898
2023-01-02T11:48:08Z
--- language: en license: mit tags: - vision inference: false model_name: microsoft/git-large-vatex --- # GIT (GenerativeImage2Text), large-sized, fine-tuned on VATEX GIT (short for GenerativeImage2Text) model, large-sized version, fine-tuned on VATEX. It was introduced in the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Wang et al. and first released in [this repository](https://github.com/microsoft/GenerativeImage2Text). Disclaimer: The team releasing GIT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description GIT is a Transformer decoder conditioned on both CLIP image tokens and text tokens. The model is trained using "teacher forcing" on a lot of (image, text) pairs. The goal for the model is simply to predict the next text token, giving the image tokens and previous text tokens. The model has full access to (i.e. a bidirectional attention mask is used for) the image patch tokens, but only has access to the previous text tokens (i.e. a causal attention mask is used for the text tokens) when predicting the next text token. ![GIT architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/git_architecture.jpg) This allows the model to be used for tasks like: - image and video captioning - visual question answering (VQA) on images and videos - even image classification (by simply conditioning the model on the image and asking it to generate a class for it in text). ## Intended uses & limitations You can use the raw model for video captioning. See the [model hub](https://huggingface.co/models?search=microsoft/git) to look for fine-tuned versions on a task that interests you. ### How to use For code examples, we refer to the [documentation](https://huggingface.co/transformers/main/model_doc/git.html). ## Training data From the paper: > We collect 0.8B image-text pairs for pre-training, which include COCO (Lin et al., 2014), Conceptual Captions (CC3M) (Sharma et al., 2018), SBU (Ordonez et al., 2011), Visual Genome (VG) (Krishna et al., 2016), Conceptual Captions (CC12M) (Changpinyo et al., 2021), ALT200M (Hu et al., 2021a), and an extra 0.6B data following a similar collection procedure in Hu et al. (2021a). => however this is for the model referred to as "GIT" in the paper, which is not open-sourced. This checkpoint is "GIT-large", which is a smaller variant of GIT trained on 20 million image-text pairs. Next, the model was fine-tuned on VATEX. See table 11 in the [paper](https://arxiv.org/abs/2205.14100) for more details. ### Preprocessing We refer to the original repo regarding details for preprocessing during training. During validation, one resizes the shorter edge of each image, after which center cropping is performed to a fixed-size resolution. Next, frames are normalized across the RGB channels with the ImageNet mean and standard deviation. ## Evaluation results For evaluation results, we refer readers to the [paper](https://arxiv.org/abs/2205.14100).
distilbert-base-german-cased
[ "pytorch", "safetensors", "distilbert", "fill-mask", "de", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
43,667
2023-01-02T11:59:29Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: taxi_model results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="asiaLootus/taxi_model", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
gpt2
[ "pytorch", "tf", "jax", "tflite", "rust", "safetensors", "gpt2", "text-generation", "en", "doi:10.57967/hf/0039", "transformers", "exbert", "license:mit", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21,488,226
2023-01-02T12:18:41Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids library_name: ml-agents --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Write your model_id: toinsson/testpyramidsrnd 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
123www/test_model
[ "pytorch", "wav2vec2", "transformers" ]
null
{ "architectures": [ "Wav2Vec2ForSpeechClassification" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2023-01-02T13:47:50Z
--- language: en tags: - exbert license: mit --- # ColD Fusion BERT uncased model Finetuned model that aims to be a great base model. It improves over BERT base model (uncased), trained on 35 datasets. Full details at [this paper](https://arxiv.org/abs/2212.01378). ## Paper Abstract: Pretraining has been shown to scale well with compute, data size and data diversity. Multitask learning trains on a mixture of supervised datasets and produces improved performance compared to self-supervised pretraining. Until now, massively multitask learning required simultaneous access to all datasets in the mixture and heavy compute resources that are only available to well-resourced teams. In this paper, we propose ColD Fusion, a method that provides the benefits of multitask learning but leverages distributed computation and requires limited communication and no sharing of data. Consequentially, ColD Fusion can create a synergistic loop, where finetuned models can be recycled to continually improve the pretrained model they are based on. We show that ColD Fusion yields comparable benefits to multitask pretraining by producing a model that (a) attains strong performance on all of the datasets it was multitask trained on and (b) is a better starting point for finetuning on unseen datasets. We find ColD Fusion outperforms RoBERTa and even previous multitask models. Specifically, when training and testing on 35 diverse datasets, ColD Fusion-based model outperforms RoBERTa by 2.45 points in average without any changes to the architecture. ### How to use Best way to use is to finetune on your own task, but you can also extract features directly. To get the features of a given text in PyTorch: ```python from transformers import RobertaTokenizer, RobertaModel tokenizer = RobertaTokenizer.from_pretrained('ibm/ColD-Fusion') model = RobertaModel.from_pretrained('ibm/ColD-Fusion') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import RobertaTokenizer, TFRobertaModel tokenizer = RobertaTokenizer.from_pretrained('ibm/ColD-Fusion') model = TFRobertaModel.from_pretrained('ibm/ColD-Fusion') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ## Evaluation results See full evaluation results of this model and many more [here](https://ibm.github.io/model-recycling/roberta-base_table.html) When fine-tuned on downstream tasks, this model achieves the following results: ### BibTeX entry and citation info ```bibtex @article{ColDFusion, author = {Shachar Don-Yehiya, Elad Venezian, Colin Raffel, Noam Slonim, Yoav Katz, Leshem ChoshenYinhan Liu and}, title = {ColD Fusion: Collaborative Descent for Distributed Multitask Finetuning}, journal = {CoRR}, volume = {abs/2212.01378}, year = {2022}, url = {https://arxiv.org/abs/2212.01378}, archivePrefix = {arXiv}, eprint = {2212.01378}, } ``` <a href="https://huggingface.co/exbert/?model=ibm/ColD-Fusion"> <img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png"> </a>
13on/kw2t-wishes
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
2023-01-02T13:50:35Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 262.95 +/- 13.99 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AdapterHub/bert-base-uncased-pf-winogrande
[ "bert", "en", "dataset:winogrande", "arxiv:2104.08247", "adapter-transformers", "adapterhub:comsense/winogrande" ]
null
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- tags: - FrozenLake-v1-4x4 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-Slippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4 type: FrozenLake-v1-4x4 metrics: - type: mean_reward value: 0.74 +/- 0.44 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="0xid/q-FrozenLake-v1-4x4-Slippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
AdapterHub/bert-base-uncased-pf-yelp_polarity
[ "bert", "en", "dataset:yelp_polarity", "arxiv:2104.08247", "adapter-transformers", "text-classification" ]
text-classification
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="tkurtulus/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
AdapterHub/narrativeqa
[ "bart", "dataset:narrativeqa", "adapter-transformers", "adapterhub:qa/narrativeqa" ]
null
{ "architectures": null, "model_type": "bart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
23
null
--- language: en tags: - financial - stocks - topic datasets: - Jean-Baptiste/financial_news_sentiment_mixte_with_phrasebank_75 widget: - text: "LexaGene Receives Signed Quote from Large Biopharma Company to Purchase a MiQLab System -- LexaGene Holdings, Inc., (OTCQB: LXXGF; TSX-V: LXG) (“LexaGene” or the “Company”), an innovative, molecular diagnostics company that has commercialized the MiQLab® System for automated, genetic testing, is pleased to announce that it has received an indication that a major biopharma company intends to purchase its technology." - text: "Melcor REIT (TSX: MR.UN) today announced results for the third quarter ended September 30, 2022. Revenue was stable in the quarter and year-to-date. Net operating income was down 3% in the quarter at $11.61 million due to the timing of operating expenses and inflated costs including utilities like gas/heat and power" - text: "Badger Infrastructure Solutions Ltd. Announces Resignation of Chief Financial Officer and Appointment of Interim Chief Financial Officer -- Badger Infrastructure Solutions Ltd. (“Badger” or the “Company”) (TSX:BDGI) announced today the resignation of Mr. Darren Yaworsky, Senior Vice President, Finance & Chief Financial Officer and the appointment of Mr. Pramod Bhatia as interim Chief Financial Officer. Mr. Yaworsky will remain with the Company until December 31, 2022 to facilitate an orderly transition." license: mit --- # Model fine-tuned from roberta-large for topic classification of financial news (emphasis on Canadian news). ### Introduction This model was train on the topic column of financial_news_sentiment_mixte_with_phrasebank_75 dataset. The topic column was generated using a zero-shot classification model on 11 topics. There was no manual reviews on the generated topics and therefore we should expect misclassifications in the dataset, and therefore the trained model might reproduce the same errors. ### Training data Training data was classified as follow: class |Description -|- 0 |acquisition 1 |other 2 |quaterly financial release 3 |appointment to new position 4 |dividend 5 |corporate update 6 |drillings results 7 |conference 8 |share repurchase program 9 |grant of stocks ### How to use roberta-large-financial-news-topics-en with HuggingFace ##### Load roberta-large-financial-news-topics-en and its sub-word tokenizer : ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("Jean-Baptiste/roberta-large-financial-news-topics-en") model = AutoModelForSequenceClassification.from_pretrained("Jean-Baptiste/roberta-large-financial-news-topics-en") ##### Process text sample (from wikipedia) from transformers import pipeline pipe = pipeline("text-classification", model=model, tokenizer=tokenizer) pipe("Melcor REIT (TSX: MR.UN) today announced results for the third quarter ended September 30, 2022. Revenue was stable in the quarter and year-to-date. Net operating income was down 3% in the quarter at $11.61 million due to the timing of operating expenses and inflated costs including utilities like gas/heat and power") [{'label': 'quaterly financial release', 'score': 0.8829097151756287}] ``` ### Model performances Overall f1 score (average macro) precision|recall|f1 -|-|- 0.7533|0.7629|0.7499
AdapterHub/roberta-base-pf-emotion
[ "roberta", "en", "dataset:emotion", "arxiv:2104.08247", "adapter-transformers", "text-classification" ]
text-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - FrozenLake-v1-8x8-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-8x8-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-8x8-no_slippery type: FrozenLake-v1-8x8-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="0xid/q-FrozenLake-v1-8x8-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
AdapterHub/roberta-base-pf-hotpotqa
[ "roberta", "en", "dataset:hotpot_qa", "arxiv:2104.08247", "adapter-transformers", "question-answering" ]
question-answering
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
35
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 818.00 +/- 306.11 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga sliu -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga sliu -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga sliu ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 10000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
AdapterHub/roberta-base-pf-imdb
[ "roberta", "en", "dataset:imdb", "arxiv:2104.08247", "adapter-transformers", "text-classification", "adapterhub:sentiment/imdb" ]
text-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 275.15 +/- 21.31 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AdapterHub/roberta-base-pf-mit_movie_trivia
[ "roberta", "en", "arxiv:2104.08247", "adapter-transformers", "token-classification", "adapterhub:ner/mit_movie_trivia" ]
token-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### Barb2000 Dreambooth model trained by asfdsadsada with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
AdapterHub/roberta-base-pf-record
[ "roberta", "en", "arxiv:2104.08247", "adapter-transformers", "text-classification", "adapterhub:rc/record" ]
text-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: beto-sentiment-analysis-finetuned results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beto-sentiment-analysis-finetuned This model is a fine-tuned version of [finiteautomata/beto-sentiment-analysis](https://huggingface.co/finiteautomata/beto-sentiment-analysis) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.4406 - Accuracy: 0.7757 - F1: 0.7773 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 3380 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 35 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 2.5384 | 1.45 | 100 | 2.1387 | 0.2831 | 0.3049 | | 2.1562 | 2.9 | 200 | 1.6375 | 0.4596 | 0.4873 | | 1.5805 | 4.35 | 300 | 1.4332 | 0.5993 | 0.6377 | | 1.4242 | 5.8 | 400 | 1.3355 | 0.6544 | 0.6565 | | 1.1192 | 7.25 | 500 | 1.2845 | 0.6765 | 0.6854 | | 0.9617 | 8.7 | 600 | 1.1512 | 0.6912 | 0.7167 | | 0.829 | 10.14 | 700 | 1.0676 | 0.6801 | 0.7079 | | 0.6889 | 11.59 | 800 | 1.0715 | 0.7022 | 0.7323 | | 0.59 | 13.04 | 900 | 1.1065 | 0.7316 | 0.7392 | | 0.5129 | 14.49 | 1000 | 1.1585 | 0.7059 | 0.7382 | | 0.4278 | 15.94 | 1100 | 1.1106 | 0.75 | 0.7582 | | 0.3728 | 17.39 | 1200 | 1.1561 | 0.7537 | 0.7679 | | 0.3142 | 18.84 | 1300 | 1.1755 | 0.7537 | 0.7667 | | 0.275 | 20.29 | 1400 | 1.2095 | 0.7574 | 0.7707 | | 0.2251 | 21.74 | 1500 | 1.3647 | 0.7574 | 0.7674 | | 0.2175 | 23.19 | 1600 | 1.3127 | 0.7537 | 0.7635 | | 0.1923 | 24.64 | 1700 | 1.3494 | 0.7794 | 0.7760 | | 0.1753 | 26.09 | 1800 | 1.4221 | 0.7684 | 0.7658 | | 0.1484 | 27.54 | 1900 | 1.3572 | 0.7684 | 0.7727 | | 0.1455 | 28.99 | 2000 | 1.4063 | 0.7757 | 0.7747 | | 0.131 | 30.43 | 2100 | 1.3754 | 0.7721 | 0.7730 | | 0.1125 | 31.88 | 2200 | 1.4302 | 0.7757 | 0.7740 | | 0.1203 | 33.33 | 2300 | 1.4146 | 0.7684 | 0.7714 | | 0.1083 | 34.78 | 2400 | 1.4406 | 0.7757 | 0.7773 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
AdapterHub/roberta-base-pf-rotten_tomatoes
[ "roberta", "en", "dataset:rotten_tomatoes", "arxiv:2104.08247", "adapter-transformers", "text-classification", "adapterhub:sentiment/rotten_tomatoes" ]
text-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: ppo results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 251.81 +/- 46.92 name: mean_reward verified: false --- # **ppo** Agent playing **LunarLander-v2** This is a trained model of a **ppo** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AdapterHub/roberta-base-pf-scitail
[ "roberta", "en", "dataset:scitail", "arxiv:2104.08247", "adapter-transformers", "text-classification", "adapterhub:nli/scitail" ]
text-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- language: es license: gpl-3.0 tags: - spacy - token-classification widget: - text: "Fue antes de llegar a Sigüeiro, en el Camino de Santiago." - text: "El proyecto lo financia el Ministerio de Industria y Competitividad." model-index: - name: es_spacy_ner_cds results: - task: name: NER type: token-classification metrics: - name: NER Precision type: precision value: 0.9648998822 - name: NER Recall type: recall value: 0.9603751465 - name: NER F Score type: f_score value: 0.9626321974 --- # Introduction spaCy NER model for Spanish trained with interviews in the domain of tourism related to the Way of Saint Jacques. It recognizes four types of entities: location (LOC), organizations (ORG), person (PER) and miscellaneous (MISC). | Feature | Description | | --- | --- | | **Name** | `es_spacy_ner_cds` | | **Version** | `0.0.1a` | | **spaCy** | `>=3.4.3,<3.5.0` | | **Default Pipeline** | `tok2vec`, `ner` | | **Components** | `tok2vec`, `ner` | ### Label Scheme <details> <summary>View label scheme (4 labels for 1 components)</summary> | Component | Labels | | --- | --- | | **`ner`** | `LOC`, `MISC`, `ORG`, `PER` | </details> ## Usage You can use this model with the spaCy *pipeline* for NER. ```python import spacy from spacy.pipeline import merge_entities nlp = spacy.load("es_spacy_ner_cds") nlp.add_pipe('sentencizer') example = "Fue antes de llegar a Sigüeiro, en el Camino de Santiago. El proyecto lo financia el Ministerio de Industria y Competitividad." ner_pipe = nlp(example) print(ner_pipe.ents) for token in merge_entities(ner_pipe): print(token.text, token.ent_type_) ``` ## Dataset ToDo ### Accuracy | Type | Score | | --- | --- | | `ENTS_F` | 96.26 | | `ENTS_P` | 96.49 | | `ENTS_R` | 96.04 | | `TOK2VEC_LOSS` | 62780.17 | | `NER_LOSS` | 34006.41 |
AdapterHub/roberta-base-pf-sick
[ "roberta", "en", "dataset:sick", "arxiv:2104.08247", "adapter-transformers", "text-classification", "adapterhub:nli/sick" ]
text-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} **This model was fine tuned with SetFit based on 1 utterance per intent and is used for an university project for intent detection. Other usage not tested** This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 30 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 30, "warmup_steps": 3, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
AdapterHub/roberta-base-pf-stsb
[ "roberta", "en", "arxiv:2104.08247", "adapter-transformers", "text-classification", "adapterhub:sts/sts-b" ]
text-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
Fusion-in-Decoder (FiD) is a model described in the following paper: > Izacard, Gautier, and Édouard Grave. [Leveraging Passage Retrieval with Generative Models for Open Domain Question Answering](https://aclanthology.org/2021.eacl-main.74/). _Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume_. 2021. We have replicated FiD training with our Wikipedia corpus variants and incorporated the model into our [PyGaggle](https://github.com/castorini/pygaggle) neural text ranking library. Our own efforts are described in the paper entitled: > Pre-Processing Matters! Improved Wikipedia Corpora for Open-Domain Question Answering. This is a FiD-large reader model for the wiki-all-8-4 corpus variant trained on the TriviaQA dataset.
AdapterHub/roberta-base-pf-swag
[ "roberta", "en", "dataset:swag", "arxiv:2104.08247", "adapter-transformers" ]
null
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
Fusion-in-Decoder (FiD) is a model described in the following paper: > Izacard, Gautier, and Édouard Grave. [Leveraging Passage Retrieval with Generative Models for Open Domain Question Answering](https://aclanthology.org/2021.eacl-main.74/). _Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume_. 2021. We have replicated FiD training with our Wikipedia corpus variants and incorporated the model into our [PyGaggle](https://github.com/castorini/pygaggle) neural text ranking library. Our own efforts are described in the paper entitled: > Pre-Processing Matters! Improved Wikipedia Corpora for Open-Domain Question Answering. This is a FiD-large reader model for the wiki-text-8-4 corpus variant trained on the Natural Questions dataset.
Adarsh123/distilbert-base-uncased-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-01-02T21:25:34Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # {MODEL_NAME} **This model was fine tuned with SetFit based on 1 utterance per intent and is used for an university project for intent detection. Other usage not tested** This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 30 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 30, "warmup_steps": 3, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Normalize() ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Adharsh2608/DialoGPT-small-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # {MODEL_NAME} **This model was fine tuned with SetFit based on 5 utterances per intent and is used for an university project for intent detection. Other usage not tested** This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 150 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 150, "warmup_steps": 15, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Normalize() ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Adinda/Adinda
[ "license:artistic-2.0" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-01-02T21:42:52Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 291.75 +/- 16.72 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Adityanawal/testmodel_1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: openrail --- pip install transformers from transformers import Trainer, TrainingArguments # Load the training and validation data train_data = ... validation_data = ... # Define the model architecture and hyperparameters model_name = "bert-base-cased" num_labels = 2 # Define the training arguments training_args = TrainingArguments( output_dir="./output", # directory to save the trained model num_train_epochs=3, # number of training epochs per_device_train_batch_size=32, # batch size per_device_eval_batch_size=64, # batch size for evaluation warmup_steps=500, # number of warmup steps weight_decay=0.01, # L2 regularization coefficient learning_rate=3e-5, # learning rate adam_epsilon=1e-8, # epsilon for Adam optimizer max_grad_norm=1.0, # maximum gradient norm for gradient clipping save_steps=1000, # number of steps after which to save the model save_total_limit=2, # maximum number of models to save ) # Initialize the trainer trainer = Trainer( model_name=model_name, num_labels=num_labels, data_collator=data_collator, # data collator for the training and validation data args=training_args, ) # Train the model trainer.train(train_data, validation_data)
Advertisement/FischlUWU
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - data/copas metrics: - wer model-index: - name: Whisper Small dysarthric Dutch results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: data/copas copas-full type: data/copas config: copas-full split: test args: copas-full metrics: - name: Wer type: wer value: 22.163827473722364 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small dysarthric Dutch This model is a fine-tuned version of [qmeeus/whisper-small-nl](https://huggingface.co/qmeeus/whisper-small-nl) on the data/copas copas-full dataset. It achieves the following results on the evaluation set: - Loss: 0.4702 - Wer: 22.1638 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 10000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:-------:| | 0.1618 | 0.05 | 500 | 0.3787 | 28.9235 | | 0.0583 | 1.05 | 1000 | 0.3732 | 25.7702 | | 0.0382 | 2.05 | 1500 | 0.4001 | 25.4621 | | 0.0316 | 3.05 | 2000 | 0.4081 | 24.7010 | | 0.0169 | 4.05 | 2500 | 0.4325 | 24.1935 | | 0.0153 | 5.05 | 3000 | 0.4325 | 33.4179 | | 0.0074 | 6.05 | 3500 | 0.4367 | 23.9398 | | 0.0096 | 7.05 | 4000 | 0.4390 | 23.3055 | | 0.0054 | 8.05 | 4500 | 0.4441 | 23.7042 | | 0.0032 | 9.04 | 5000 | 0.4493 | 23.2693 | | 0.004 | 10.04 | 5500 | 0.4524 | 23.3418 | | 0.0048 | 11.04 | 6000 | 0.4498 | 23.7224 | | 0.001 | 12.04 | 6500 | 0.4577 | 22.8887 | | 0.0002 | 13.04 | 7000 | 0.4577 | 22.0913 | | 0.0001 | 14.04 | 7500 | 0.4616 | 22.1276 | | 0.0001 | 15.04 | 8000 | 0.4639 | 22.2726 | | 0.0001 | 16.04 | 8500 | 0.4662 | 22.1095 | | 0.0001 | 17.04 | 9000 | 0.4684 | 22.1457 | | 0.0001 | 18.04 | 9500 | 0.4697 | 22.1457 | | 0.0001 | 19.04 | 10000 | 0.4702 | 22.1638 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.12.1+cu116 - Datasets 2.4.0 - Tokenizers 0.12.1
Aeroxas/Botroxas-small
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # {MODEL_NAME} **This model was fine tuned with SetFit based on 5 utterances and is used for an university project for intent detection. Other usage not tested** This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 150 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 150, "warmup_steps": 15, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 384, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Normalize() ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Ahmadatiya97/Alannah
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer metrics: - recall - precision - f1 model-index: - name: t5-base-extraction-cnndm_fs0.02-c results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-base-extraction-cnndm_fs0.02-c This model was trained from scratch on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.8400 - Recall: 35.4852 - Precision: 40.9499 - F1: 36.9238 - Gen Len: 19.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 32 - eval_batch_size: 32 - seed: 1799 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Recall | Precision | F1 | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:---------:|:-------:|:-------:| | 2.7966 | 1.14 | 200 | 2.0568 | 17.0907 | 45.1725 | 21.4724 | 12.7061 | | 2.1271 | 2.29 | 400 | 1.8400 | 35.4852 | 40.9499 | 36.9238 | 19.0 | | 1.9831 | 3.43 | 600 | 1.7756 | 35.0259 | 39.8685 | 36.1824 | 18.9962 | | 1.9025 | 4.57 | 800 | 1.7365 | 34.9077 | 39.2092 | 35.8205 | 19.0 | | 1.8564 | 5.71 | 1000 | 1.7075 | 33.8282 | 38.141 | 34.765 | 19.0 | | 1.8164 | 6.86 | 1200 | 1.6898 | 34.6927 | 38.999 | 35.5568 | 19.0 | | 1.7929 | 8.0 | 1400 | 1.6753 | 34.9922 | 39.2711 | 35.8318 | 19.0 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.10.0+cu111 - Datasets 2.8.0 - Tokenizers 0.13.2
Ahmed59/Demo-Team-5-SIAD
[ "tf", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- inference: false tags: - onnx - text-classification - bert - adapterhub:qa/boolq - adapter-transformers datasets: - boolq language: - en --- # ONNX export of Adapter `AdapterHub/bert-base-uncased-pf-boolq` for bert-base-uncased ## Conversion of [AdapterHub/bert-base-uncased-pf-boolq](https://huggingface.co/AdapterHub/bert-base-uncased-pf-boolq) for UKP SQuARE ## Usage ```python onnx_path = hf_hub_download(repo_id='UKP-SQuARE/bert-base-uncased-pf-boolq-onnx', filename='model.onnx') # or model_quant.onnx for quantization onnx_model = InferenceSession(onnx_path, providers=['CPUExecutionProvider']) context = 'English orthography typically represents vowel sounds with the five conventional vowel letters ⟨a, e, i, o, u⟩, as well as ⟨y⟩, which may also be a consonant depending on context. However, outside of abbreviations, there are a handful of words in English that do not have vowels, either because the vowel sounds are not written with vowel letters or because the words themselves are pronounced without vowel sounds'. question = 'can there be a word without a vowel' tokenizer = AutoTokenizer.from_pretrained('UKP-SQuARE/bert-base-uncased-pf-boolq-onnx') inputs = tokenizer(question, context, padding=True, truncation=True, return_tensors='np') inputs = {key: np.array(inputs[key], dtype=np.int64) for key in inputs} outputs = onnx_model.run(input_feed=dict(inputs), output_names=None) ``` ## Architecture & Training The training code for this adapter is available at https://github.com/adapter-hub/efficient-task-transfer. In particular, training configurations for all tasks can be found [here](https://github.com/adapter-hub/efficient-task-transfer/tree/master/run_configs). ## Evaluation results Refer to [the paper](https://arxiv.org/pdf/2104.08247) for more information on results. ## Citation If you use this adapter, please cite our paper ["What to Pre-Train on? Efficient Intermediate Task Selection"](https://arxiv.org/pdf/2104.08247): ```bibtex @inproceedings{poth-etal-2021-pre, title = "{W}hat to Pre-Train on? {E}fficient Intermediate Task Selection", author = {Poth, Clifton and Pfeiffer, Jonas and R{"u}ckl{'e}, Andreas and Gurevych, Iryna}, booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", month = nov, year = "2021", address = "Online and Punta Cana, Dominican Republic", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.emnlp-main.827", pages = "10585--10605", } ```
AhmedHassan19/model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- inference: false tags: - onnx - text-classification - roberta - adapterhub:qa/boolq - adapter-transformers datasets: - boolq language: - en --- # ONNX export of Adapter `AdapterHub/roberta-base-pf-boolq` for roberta-base ## Conversion of [AdapterHub/roberta-base-pf-boolq](https://huggingface.co/AdapterHub/roberta-base-pf-boolq) for UKP SQuARE ## Usage ```python onnx_path = hf_hub_download(repo_id='UKP-SQuARE/roberta-base-pf-boolq-onnx', filename='model.onnx') # or model_quant.onnx for quantization onnx_model = InferenceSession(onnx_path, providers=['CPUExecutionProvider']) context = 'English orthography typically represents vowel sounds with the five conventional vowel letters ⟨a, e, i, o, u⟩, as well as ⟨y⟩, which may also be a consonant depending on context. However, outside of abbreviations, there are a handful of words in English that do not have vowels, either because the vowel sounds are not written with vowel letters or because the words themselves are pronounced without vowel sounds'. question = 'can there be a word without a vowel' tokenizer = AutoTokenizer.from_pretrained('UKP-SQuARE/roberta-base-pf-boolq-onnx') inputs = tokenizer(question, context, padding=True, truncation=True, return_tensors='np') inputs = {key: np.array(inputs[key], dtype=np.int64) for key in inputs} outputs = onnx_model.run(input_feed=dict(inputs), output_names=None) ``` ## Architecture & Training The training code for this adapter is available at https://github.com/adapter-hub/efficient-task-transfer. In particular, training configurations for all tasks can be found [here](https://github.com/adapter-hub/efficient-task-transfer/tree/master/run_configs). ## Evaluation results Refer to [the paper](https://arxiv.org/pdf/2104.08247) for more information on results. ## Citation If you use this adapter, please cite our paper ["What to Pre-Train on? Efficient Intermediate Task Selection"](https://arxiv.org/pdf/2104.08247): ```bibtex @inproceedings{poth-etal-2021-pre, title = "{W}hat to Pre-Train on? {E}fficient Intermediate Task Selection", author = {Poth, Clifton and Pfeiffer, Jonas and R{"u}ckl{'e}, Andreas and Gurevych, Iryna}, booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", month = nov, year = "2021", address = "Online and Punta Cana, Dominican Republic", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.emnlp-main.827", pages = "10585--10605", } ```
Ahmedahmed/Wewe
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: sklearn tags: - sklearn - skops - tabular-regression model_file: umit_regress.pkl widget: structuredData: AGE: - 92.7 - 97.4 - 18.5 B: - 395.09 - 302.76 - 392.33 CHAS: - 0 - 0 - 0 CRIM: - 0.15086 - 6.39312 - 0.07244 DIS: - 1.8209 - 2.206 - 10.7103 INDUS: - 27.74 - 18.1 - 1.69 LSTAT: - 18.06 - 24.1 - 7.79 NOX: - 0.609 - 0.584 - 0.411 PTRATIO: - 20.1 - 20.2 - 18.3 RAD: - 4 - 24 - 4 RM: - 5.454 - 6.162 - 5.884 TAX: - 711.0 - 666.0 - 411.0 ZN: - 0.0 - 0.0 - 60.0 --- # Model description [More Information Needed] ## Intended uses & limitations [More Information Needed] ## Training Procedure ### Hyperparameters The model is trained with below hyperparameters. <details> <summary> Click to expand </summary> | Hyperparameter | Value | |------------------|---------| | copy_X | True | | fit_intercept | True | | n_jobs | | | positive | False | </details> ### Model Plot The model plot is below. <style>#sk-container-id-2 {color: black;background-color: white;}#sk-container-id-2 pre{padding: 0;}#sk-container-id-2 div.sk-toggleable {background-color: white;}#sk-container-id-2 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-2 label.sk-toggleable__label-arrow:before {content: "▸";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-2 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-2 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-2 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-2 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-2 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-2 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: "▾";}#sk-container-id-2 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-2 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-2 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-2 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-2 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-2 div.sk-parallel-item::after {content: "";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-2 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-2 div.sk-serial::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-2 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-2 div.sk-item {position: relative;z-index: 1;}#sk-container-id-2 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-2 div.sk-item::before, #sk-container-id-2 div.sk-parallel-item::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-2 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-2 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-2 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-2 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-2 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-2 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-2 div.sk-label-container {text-align: center;}#sk-container-id-2 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-2 div.sk-text-repr-fallback {display: none;}</style><div id="sk-container-id-2" class="sk-top-container" style="overflow: auto;"><div class="sk-text-repr-fallback"><pre>LinearRegression()</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class="sk-container" hidden><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-2" type="checkbox" checked><label for="sk-estimator-id-2" class="sk-toggleable__label sk-toggleable__label-arrow">LinearRegression</label><div class="sk-toggleable__content"><pre>LinearRegression()</pre></div></div></div></div></div> ## Evaluation Results You can find the details about evaluation process and the evaluation results. | Metric | Value | |--------------------|-----------| | Mean Squared Error | 23.7928 | | R-Squared | 0.751045 | # How to Get Started with the Model [More Information Needed] # Model Card Authors This model card is written by following authors: [More Information Needed] # Model Card Contact You can contact the model card authors through following channels: [More Information Needed] # Citation Below you can find information related to citation. **BibTeX:** ``` [More Information Needed] ``` # limitations This model is not ready to be used in production. # model_description More info on me [umit isikdag](https://isikdag.com).
Ahren09/distilbert-base-uncased-finetuned-cola
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- license: apache-2.0 --- # Classifier architecture The classifier uses DenseNet161 as the encoder and some linear layers at classifier base. # Model accuracy: Model achieves 91.3% accuracy on the validation set. \ F1-score per class: {'digital': 0.9873773235685747, 'hard': 0.9338602782753218, 'soft': 0.8444277483052108} \ Mean F1-score: 0.9218884500497024 \ Accuracy: 0.913 # Training dataset metadata: 1. Dataset classes: ['soft', 'digital', 'hard'] 2. Number of classes: 3 3. Total number of images: 18415 # Number of images per class: - soft : 5482 - digital : 1206 - hard : 11727 # Classes description: 1. The **hard** class denotes a group of scenes to which a coarser background removal method should be applied, intended for objects with an edge without small details. The hard class contains the following categories of objects: object, laptop, charger, pc mouse, pc, rocks, table, bed, box, sneakers, ship, wire, guitar, fork, spoon, plate, keyboard, car, bus, screwdriver, ball, door, flower, clocks, fruit , food, robot. 2. The **soft** class denotes a group of scenes to which you want to apply a soft background removal method intended for people, hair, clothes, and other similar types of objects. The soft class contains the following categories of objects: animal, people, human, man, woman, t-shirt, hairs, hair, dog, cat, monkey, cow, medusa, clothes 3. The **digital** class denotes a group of images with digital graphics, such as screenshots, logos, and so on. The digital class contains the following categories of scenes: screenshot
Akari/albert-base-v2-finetuned-squad
[ "pytorch", "tensorboard", "albert", "question-answering", "dataset:squad_v2", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 630.00 +/- 204.11 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga armargolis -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga armargolis -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga armargolis ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 10000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
AkshatSurolia/ViT-FaceMask-Finetuned
[ "pytorch", "safetensors", "vit", "image-classification", "dataset:Face-Mask18K", "transformers", "license:apache-2.0", "autotrain_compatible" ]
image-classification
{ "architectures": [ "ViTForImageClassification" ], "model_type": "vit", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
40
null
--- license: creativeml-openrail-m language: - en tags: - text-to-image - midjourney - stable-diffusion - disco-diffusion - art - arxiv:2208.12242 inference: true library_name: diffusers --- ## Paint Journey V2 is [V1](https://huggingface.co/FredZhang7/paint-journey-v1) fine-tuned on 768x768 oil paintings by Midjourney V4, Open Journey V2, Disco Diffusion, and artists given permission Begin the prompt with **((oil painting))** to add the oil paint effect. For digital and other painting styles, use similar prompts as you would for Midjourney V4 (with some tweaks), Stable Diffusion v1.5 (add more styles), Open Journey V2, or Disco Diffusion. [![Open with Camenduru's WebUI in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AMLA-UBC/100-Exploring-the-World-of-Modern-Machine-Learning/blob/main/assets/PaintJourneyV2.ipynb) ## Examples *All examples were generated using Camenduru's WebUI (see the Colab file)* ![](./assets/characters.png) *⬆️ 768x1136 portraits, generated using descriptive prompts and without face restoration, [generation parameters](https://huggingface.co/FredZhang7/paint-journey-v2/raw/main/assets/character_settings.txt)* ![](./assets/nature.png) *⬆️ 1280x768 (mostly) natural landscapes, used shorter prompts, [generation parameters](https://huggingface.co/FredZhang7/paint-journey-v2/raw/main/assets/nature_settings.txt)* ![](./assets/outerspace.png) *⬆️ 1152x768 outerspace landscapes, used descriptive prompts, [generation parameters](https://huggingface.co/FredZhang7/paint-journey-v2/raw/main/assets/outerspace_settings.txt)* ![](./assets/lamborghini.png) *⬆️ 1280x768 lamborghini, [generation parameters](https://huggingface.co/FredZhang7/paint-journey-v2/raw/main/assets/lamborghini_settings.txt)* ![](./assets/eevee.png) *⬆️ 960x768 Eevee, [generation parameters](https://huggingface.co/FredZhang7/paint-journey-v2/raw/main/assets/eevee_settings.txt)* ## Comparisons Paint Journey V2's paintings are closer to human-drawn art than Open Journey V2. Compared to models like Dreamlike Diffusion 1.0, PJ V2 tends to generate 768x768 or higher resolution images with reduced noise levels. This model is also capable of generating stunning portraits at 768x1136 resolution without duplicated faces (with [Camenduru's WebUI](https://github.com/camenduru/stable-diffusion-webui)), a difficult task to models like DreamShaper 3.3. At lower resolutions, DreamShaper 3.3 tends to generate higher quality portraits than PJ V2 in terms of noise levels, given the same (short) postive and negative prompts. However, PJ V2 can craft more stunning masterpieces with more descriptive positive and negative prompts and can still generate beautiful landscapes with shorter prompts. ## Training Instead of solely fine-tuning its Unet, Paint Journey V2 focuses on fine-tuning its text encoder with a diverse range of prompts. This allows for a seamless blend of the digital and oil painting styles into various other types of prompts, resulting in a more natural and dynamic output. This model was trained on a curated dataset of roughly 300 images hand-picked from Midjourney, [Prompt Hero](https://prompthero.com/), [PixaBay](https://pixabay.com/images/search/paintings/), Open Journey V2, and Reddit. Before training, I used R-ESRGAN 4x on many images to increase their resolution and reduce noise. ## Running out of prompts? Useful resources: [Lexica.art](https://lexica.art/), [Fast GPT PromptGen](https://huggingface.co/FredZhang7/distilgpt2-stable-diffusion-v2), [Prompt Hero](https://prompthero.com/) ## Output Dimensions Portrait sizes include, but are not limited to, `512x768`, `768x768`, and `768x1136`. Landscape sizes include, but are not limited to, `768x512`, `768x768`, `1152x768`, and `1280x768`. ## Camenduru's WebUI ``` git clone -b v1.6 https://github.com/camenduru/stable-diffusion-webui ``` <details> <summary> Click to use Automatic1111's Webui instead, but may not output images as artistic </summary> ``` git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git ``` </details> Download [checkpoint](./paint_journey_v2.ckpt) and [vae](./paint_journey_v2.vae.pt) to the `./stable-diffusion-webui/models/Stable-diffusion` folder. Run `webui-user.bat`. ## 🧨 Diffusers *Tip: using double, tripple, or quadriple brackets around some letters WORD (e.g. "((WORD))") will put an 'emphasis' on WORD* ```bash pip install --upgrade diffusers transformers ``` ```python # see more sampling algorithms at https://huggingface.co/docs/diffusers/using-diffusers/schedulers#changing-the-scheduler from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler import torch, random, datetime pipe = StableDiffusionPipeline.from_pretrained("FredZhang7/paint-journey-v2") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe = pipe.to("cuda") def random_seed(): return random.randint(0, 2**32 - 1) prompt = "((oil painting)), gentle waves, bright blue sky, white sails billowing, sun glistening on the surface, salty sea air, distant horizon, calm breeze, birds soaring overhead, vibrant colors, artstation digital painting, high resolution, uhd, 4 k, 8k wallpaper" # what you want to see negative_prompt = "low-res, blurry, haze, dark clouds looming, choppy waves, engine failing, sails tattered, stormy winds".split(", ") # what you don't want to see seed = random_seed() # replace with the desired seed if needed width, height = 1280, 768 # width and height of the generated image cfg_scale = 7.5 # classifer free guidance scale, smaller means more creative, 7 to 11 is usually a good range num_inference_steps = 40 # sampling steps, 30 to 40 is usually good for Euler Ancestral generator = torch.Generator("cuda").manual_seed(seed) with torch.autocast("cuda"): image = pipe(prompt=prompt, num_inference_steps=num_inference_steps, width=width, height=height, generator=generator, guidance_scale=cfg_scale).images[0] def generate_filename(string, seed): invalid_chars = ["<", ">", ":", '"', "/", "\\", "|", "?", "*"] for char in invalid_chars: string = string.replace(char, "") return f"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_{seed}_{string}" image.save(f"./{generate_filename(prompt, seed)}.png") ``` ## Safety Checker V2 The official [stable diffusion safety checker](https://huggingface.co/CompVis/stable-diffusion-safety-checker) uses up 1.22GB VRAM. I recommend using [Google Safesearch Mini V2](https://huggingface.co/FredZhang7/google-safesearch-mini-v2) (220MB) to save 1.0GB VRAM.
AlErysvi/Erys
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.de split: train args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8697972857872921 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1727 - F1: 0.8698 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2544 | 1.0 | 787 | 0.1789 | 0.8115 | | 0.1391 | 2.0 | 1574 | 0.1601 | 0.8223 | | 0.0929 | 3.0 | 2361 | 0.1497 | 0.8586 | | 0.0591 | 4.0 | 3148 | 0.1528 | 0.8673 | | 0.0368 | 5.0 | 3935 | 0.1727 | 0.8698 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
Aleksandar/distilbert-srb-ner-setimes-lr
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit --- ## What is it? Just a mirror of a model from https://github.com/isl-org/MiDaS, to allow downloading with Huggingface Hub tools ## Citation ```bibtex @ARTICLE {Ranftl2022, author = "Ren\'{e} Ranftl and Katrin Lasinger and David Hafner and Konrad Schindler and Vladlen Koltun", title = "Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-Shot Cross-Dataset Transfer", journal = "IEEE Transactions on Pattern Analysis and Machine Intelligence", year = "2022", volume = "44", number = "3" } ``` ```bibtex @article{Ranftl2021, author = {Ren\'{e} Ranftl and Alexey Bochkovskiy and Vladlen Koltun}, title = {Vision Transformers for Dense Prediction}, journal = {ICCV}, year = {2021}, } ```
Aleksandar/distilbert-srb-ner
[ "pytorch", "distilbert", "token-classification", "sr", "dataset:wikiann", "transformers", "generated_from_trainer", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### ssaassaaddoo Dreambooth model trained by sasa30 with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
Aleksandra/distilbert-base-uncased-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - text classification widget: - text: "Take out the trash." example_title: "Example 1" - text: "Cut the tomato." example_title: "Example 2" --- # Temporal Action Prediction Prediction of action effect time from simple sentences.
AlexaMerens/Owl
[ "license:cc" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer model-index: - name: T5-asr-corrector results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # T5-asr-corrector This model is a fine-tuned version of [flax-community/bengali-t5-base](https://huggingface.co/flax-community/bengali-t5-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4683 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 6 - total_train_batch_size: 48 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.6804 | 0.15 | 500 | 0.8576 | | 0.792 | 0.31 | 1000 | 0.6556 | | 0.6553 | 0.46 | 1500 | 0.5640 | | 0.5901 | 0.62 | 2000 | 0.5114 | | 0.5454 | 0.77 | 2500 | 0.4815 | | 0.53 | 0.93 | 3000 | 0.4683 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
Alexander-Learn/bert-finetuned-ner
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: cc-by-4.0 metrics: - bleu4 - meteor - rouge-l - bertscore - moverscore language: fr datasets: - lmqg/qag_frquad pipeline_tag: text2text-generation tags: - questions and answers generation widget: - text: "Créateur » (Maker), lui aussi au singulier, « le Suprême Berger » (The Great Shepherd) ; de l'autre, des réminiscences de la théologie de l'Antiquité : le tonnerre, voix de Jupiter, « Et souvent ta voix gronde en un tonnerre terrifiant », etc." example_title: "Questions & Answers Generation Example 1" model-index: - name: lmqg/mbart-large-cc25-frquad-qag results: - task: name: Text2text Generation type: text2text-generation dataset: name: lmqg/qag_frquad type: default args: default metrics: - name: QAAlignedF1Score-BERTScore (Question & Answer Generation) type: qa_aligned_f1_score_bertscore_question_answer_generation value: 77.75 - name: QAAlignedRecall-BERTScore (Question & Answer Generation) type: qa_aligned_recall_bertscore_question_answer_generation value: 79.45 - name: QAAlignedPrecision-BERTScore (Question & Answer Generation) type: qa_aligned_precision_bertscore_question_answer_generation value: 76.19 - name: QAAlignedF1Score-MoverScore (Question & Answer Generation) type: qa_aligned_f1_score_moverscore_question_answer_generation value: 53.5 - name: QAAlignedRecall-MoverScore (Question & Answer Generation) type: qa_aligned_recall_moverscore_question_answer_generation value: 54.55 - name: QAAlignedPrecision-MoverScore (Question & Answer Generation) type: qa_aligned_precision_moverscore_question_answer_generation value: 52.57 --- # Model Card of `lmqg/mbart-large-cc25-frquad-qag` This model is fine-tuned version of [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) for question & answer pair generation task on the [lmqg/qag_frquad](https://huggingface.co/datasets/lmqg/qag_frquad) (dataset_name: default) via [`lmqg`](https://github.com/asahi417/lm-question-generation). ### Overview - **Language model:** [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) - **Language:** fr - **Training data:** [lmqg/qag_frquad](https://huggingface.co/datasets/lmqg/qag_frquad) (default) - **Online Demo:** [https://autoqg.net/](https://autoqg.net/) - **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation) - **Paper:** [https://arxiv.org/abs/2210.03992](https://arxiv.org/abs/2210.03992) ### Usage - With [`lmqg`](https://github.com/asahi417/lm-question-generation#lmqg-language-model-for-question-generation-) ```python from lmqg import TransformersQG # initialize model model = TransformersQG(language="fr", model="lmqg/mbart-large-cc25-frquad-qag") # model prediction question_answer_pairs = model.generate_qa("Créateur » (Maker), lui aussi au singulier, « le Suprême Berger » (The Great Shepherd) ; de l'autre, des réminiscences de la théologie de l'Antiquité : le tonnerre, voix de Jupiter, « Et souvent ta voix gronde en un tonnerre terrifiant », etc.") ``` - With `transformers` ```python from transformers import pipeline pipe = pipeline("text2text-generation", "lmqg/mbart-large-cc25-frquad-qag") output = pipe("Créateur » (Maker), lui aussi au singulier, « le Suprême Berger » (The Great Shepherd) ; de l'autre, des réminiscences de la théologie de l'Antiquité : le tonnerre, voix de Jupiter, « Et souvent ta voix gronde en un tonnerre terrifiant », etc.") ``` ## Evaluation - ***Metric (Question & Answer Generation)***: [raw metric file](https://huggingface.co/lmqg/mbart-large-cc25-frquad-qag/raw/main/eval/metric.first.answer.paragraph.questions_answers.lmqg_qag_frquad.default.json) | | Score | Type | Dataset | |:--------------------------------|--------:|:--------|:-------------------------------------------------------------------| | QAAlignedF1Score (BERTScore) | 77.75 | default | [lmqg/qag_frquad](https://huggingface.co/datasets/lmqg/qag_frquad) | | QAAlignedF1Score (MoverScore) | 53.5 | default | [lmqg/qag_frquad](https://huggingface.co/datasets/lmqg/qag_frquad) | | QAAlignedPrecision (BERTScore) | 76.19 | default | [lmqg/qag_frquad](https://huggingface.co/datasets/lmqg/qag_frquad) | | QAAlignedPrecision (MoverScore) | 52.57 | default | [lmqg/qag_frquad](https://huggingface.co/datasets/lmqg/qag_frquad) | | QAAlignedRecall (BERTScore) | 79.45 | default | [lmqg/qag_frquad](https://huggingface.co/datasets/lmqg/qag_frquad) | | QAAlignedRecall (MoverScore) | 54.55 | default | [lmqg/qag_frquad](https://huggingface.co/datasets/lmqg/qag_frquad) | ## Training hyperparameters The following hyperparameters were used during fine-tuning: - dataset_path: lmqg/qag_frquad - dataset_name: default - input_types: ['paragraph'] - output_types: ['questions_answers'] - prefix_types: None - model: facebook/mbart-large-cc25 - max_length: 512 - max_length_output: 256 - epoch: 14 - batch: 2 - lr: 0.0001 - fp16: False - random_seed: 1 - gradient_accumulation_steps: 64 - label_smoothing: 0.0 The full configuration can be found at [fine-tuning config file](https://huggingface.co/lmqg/mbart-large-cc25-frquad-qag/raw/main/trainer_config.json). ## Citation ``` @inproceedings{ushio-etal-2022-generative, title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration", author = "Ushio, Asahi and Alva-Manchego, Fernando and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", month = dec, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
Aliraza47/BERT
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 259.59 +/- 22.35 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Amro-Kamal/gpt
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - "ain" tags: - "ainu" - "token-classification" - "pos" - "dependency-parsing" license: "cc-by-sa-4.0" pipeline_tag: "token-classification" widget: - text: "itak=as awa pon rupne aynu ene itaki" - text: "イタカㇱ アワ ポン ルㇷ゚ネ アイヌ エネ イタキ" - text: "итакас ава пон рубне айну эне итакі" --- # deberta-base-ainu-upos ## Model Description This is a DeBERTa(V2) model pre-trained on Ainu texts (in カタカナ, Roman, and Кириллица) for POS-tagging and dependency-parsing, derived from [deberta-base-ainu](https://huggingface.co/KoichiYasuoka/deberta-base-ainu). Every word is tagged by [UPOS](https://universaldependencies.org/u/pos/) (Universal Part-Of-Speech). ## How to Use ```py from transformers import AutoTokenizer,AutoModelForTokenClassification tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/deberta-base-ainu-upos") model=AutoModelForTokenClassification.from_pretrained("KoichiYasuoka/deberta-base-ainu-upos") ``` or ```py import esupar nlp=esupar.load("KoichiYasuoka/deberta-base-ainu-upos","ainu") ``` ## Reference 安岡孝一: [ローマ字・カタカナ・キリル文字併用アイヌ語RoBERTa・DeBERTaモデルの開発](http://id.nii.ac.jp/1001/00224072/), 情報処理学会研究報告, Vol.2023-CH-131『人文科学とコンピュータ』, No.7 (2023年2月18日), pp.1-7. ## See Also [esupar](https://github.com/KoichiYasuoka/esupar): Tokenizer POS-tagger and Dependency-parser with BERT/RoBERTa/DeBERTa models
Amrrs/wav2vec2-large-xlsr-53-tamil
[ "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "ta", "dataset:common_voice", "transformers", "audio", "speech", "xlsr-fine-tuning-week", "license:apache-2.0", "model-index", "has_space" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- language: - "ain" tags: - "ainu" - "token-classification" - "pos" - "dependency-parsing" license: "cc-by-sa-4.0" pipeline_tag: "token-classification" widget: - text: "itak=as awa pon rupne aynu ene itaki" - text: "イタカㇱ アワ ポン ルㇷ゚ネ アイヌ エネ イタキ" - text: "итакас ава пон рубне айну эне итакі" --- # deberta-base-ainu-ud-goeswith ## Model Description This is a DeBERTa(V2) model pre-trained on Ainu texts (in カタカナ, Roman, and Кириллица) for POS-tagging and dependency-parsing (using `goeswith` for subwords), derived from [deberta-base-ainu-upos](https://huggingface.co/KoichiYasuoka/deberta-base-ainu-upos). ## How to Use ```py class UDgoeswith(object): def __init__(self,bert): from transformers import AutoTokenizer,AutoModelForTokenClassification self.tokenizer=AutoTokenizer.from_pretrained(bert) self.model=AutoModelForTokenClassification.from_pretrained(bert) def __call__(self,text): import numpy,torch,ufal.chu_liu_edmonds w=self.tokenizer(text,return_offsets_mapping=True) v=w["input_ids"] x=[v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)] with torch.no_grad(): e=self.model(input_ids=torch.tensor(x)).logits.numpy()[:,1:-2,:] r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())] e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan) g=self.model.config.label2id["X|_|goeswith"] r=numpy.tri(e.shape[0]) for i in range(e.shape[0]): for j in range(i+2,e.shape[1]): r[i,j]=r[i,j-1] if numpy.nanargmax(e[i,j-1])==g else 1 e[:,:,g]+=numpy.where(r==0,0,numpy.nan) m=numpy.full((e.shape[0]+1,e.shape[1]+1),numpy.nan) m[1:,1:]=numpy.nanmax(e,axis=2).transpose() p=numpy.zeros(m.shape) p[1:,1:]=numpy.nanargmax(e,axis=2).transpose() for i in range(1,m.shape[0]): m[i,0],m[i,i],p[i,0]=m[i,i],numpy.nan,p[i,i] h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0] if [0 for i in h if i==0]!=[0]: m[:,0]+=numpy.where(m[:,0]==numpy.nanmax(m[[i for i,j in enumerate(h) if j==0],0]),0,numpy.nan) m[[i for i,j in enumerate(h) if j==0]]+=[0 if i==0 or j==0 else numpy.nan for i,j in enumerate(h)] h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0] u="# text = "+text+"\n" v=[(s,e) for s,e in w["offset_mapping"] if s<e] for i,(s,e) in enumerate(v,1): q=self.model.config.id2label[p[i,h[i]]].split("|") u+="\t".join([str(i),text[s:e],"_",q[0],"|".join(q[1:-1]),"_",str(h[i]),q[-1],"_","_" if i<len(v) and e<v[i][0] else "SpaceAfter=No"])+"\n" return u+"\n" nlp=UDgoeswith("KoichiYasuoka/deberta-base-ainu-ud-goeswith") print(nlp("itak=as awa pon rupne aynu ene itaki")) ``` with [ufal.chu-liu-edmonds](https://pypi.org/project/ufal.chu-liu-edmonds/). Or without ufal.chu-liu-edmonds: ``` from transformers import pipeline nlp=pipeline("universal-dependencies","KoichiYasuoka/deberta-base-ainu-ud-goeswith",trust_remote_code=True,aggregation_strategy="simple") print(nlp("itak=as awa pon rupne aynu ene itaki")) ``` ## Reference 安岡孝一: [ローマ字・カタカナ・キリル文字併用アイヌ語RoBERTa・DeBERTaモデルの開発](http://id.nii.ac.jp/1001/00224072/), 情報処理学会研究報告, Vol.2023-CH-131『人文科学とコンピュータ』, No.7 (2023年2月18日), pp.1-7.
Ana1315/A
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_keras_callback model-index: - name: W4nkel/distilbertBase128KTrain results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # W4nkel/distilbertBase128KTrain This model is a fine-tuned version of [dbmdz/distilbert-base-turkish-cased](https://huggingface.co/dbmdz/distilbert-base-turkish-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.7462 - Validation Loss: 0.5115 - Train Accuracy: 0.7675 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 1500, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.7462 | 0.5115 | 0.7675 | 0 | ### Framework versions - Transformers 4.25.1 - TensorFlow 2.11.0 - Datasets 2.8.0 - Tokenizers 0.13.2
Ana1315/ana
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: PaulMest/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
AndrewMcDowell/wav2vec2-xls-r-300m-japanese
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "ja", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "generated_from_trainer", "hf-asr-leaderboard", "mozilla-foundation/common_voice_8_0", "robust-speech-event", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - generated_from_trainer metrics: - f1 model-index: - name: ES_roberta_30_prepro results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ES_roberta_30_prepro This model is a fine-tuned version of [klue/roberta-large](https://huggingface.co/klue/roberta-large) on the None dataset. It achieves the following results on the evaluation set: - Exact Match: 26.25 - F1: 36.0319 - Loss: 1.2394 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Exact Match | F1 | Validation Loss | |:-------------:|:-----:|:----:|:-----------:|:-------:|:---------------:| | No log | 1.0 | 305 | 22.9167 | 34.1584 | 1.0608 | | 0.7921 | 2.0 | 610 | 25.0 | 35.1179 | 1.0869 | | 0.7921 | 3.0 | 915 | 26.25 | 36.0319 | 1.2394 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
Andrey1989/mt5-small-finetuned-mlsum-es
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: jsalvador/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Andrey78/my_nlp_test_model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - conversational --- # Peter Griffin DialoGPT Model
Andrija/RobertaFastBPE
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### Ayaka_DB Dreambooth model trained by Falon with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
Andrija/SRoBERTa-F
[ "pytorch", "tensorboard", "roberta", "fill-mask", "hr", "sr", "multilingual", "dataset:oscar", "dataset:srwac", "dataset:leipzig", "dataset:cc100", "dataset:hrwac", "transformers", "masked-lm", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
59
null
# WARNING: NOT ORIGINAL MODEL This repository and model is not an ORIGINAL one published by the author. It is just a copy of diffusers for having a link to stable diffusion dreambooth training. So, thank you for your merge requests, but probably you need to do them to the author repo if it has it. At the giving time, huggingface does not have an author's repository here, unfortunately. The closest is that one: https://huggingface.co/johnslegers/hasdx
Andrija/SRoBERTa-NER
[ "pytorch", "roberta", "token-classification", "hr", "sr", "multilingual", "dataset:hr500k", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: creativeml-openrail-m tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image inference: true extra_gated_prompt: |- This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content 2. CompVis claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license carefully here: https://huggingface.co/spaces/CompVis/stable-diffusion-license --- [![Example][1]][1] ## Why Epic Diffusion Epîc Diffusion is a general purpose model based on Stable Diffusion 1.x intended to replace the official SD releases as your default model. It is focused on providing high quality output in a wide range of different styles, with support for NFSW content. Epîc Diffusion 1.0 is a heavily calibrated merge of SD 1.4, SD 1.5, Analog Diffusion, Wavy Diffusion, Openjourney Diffusion, Samdoesarts Ultramerge, postapocalypse, Elldreth's Dream, Inkpunk Diffusion, Arcane Diffusion & Van Gogh Diffusion blended and reblended multiple times until I got the quality & consistency I was looking for... Epic Diffusion is also [available on CivitAI](https://civitai.com/models/3855/epic-diffusion). ## License This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content 2. CompVis claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) <a href="https://www.buymeacoffee.com/johnslegers" target="_blank"> <img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 45px !important;width: 162px !important;" > </a> ## Example prompts <table> <tr style="border: 1px solid;background:#e5e7eb"> <th style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> Prompt </th> <th style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> Parameters </th> <th style="vertical-align:top;padding:.5714286em!important;border: 1px solid;min-width:270px"> Output </th> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> scarlett johansson, in the style of Wes Anderson, highly detailed, unreal engine, octane render, 8k </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>2263657329<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/0oZij.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> sansa angeline jolie gessica chastain mummy, intricate, elegant, highly detailed, digital painting, artstation, concept art, smooth, sharp focus, illustration, art by artgerm and greg rutkowski and alphonse mucha and william - adolphe bouguereau </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>1310341382<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/mnnBR.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> Pokimane, Feminine, Mercy, Perfect Sexy Symmetrical Face, Detailed Pupils, Pensive Smirk, Look at Viewer, Leaf Armor, Ilya Kuvshinov, Gil Elvgren, Mucha. Intricate, Octane Render, 4KUHD, Centered, Oil Painting, Bokeh, Rim Lighting. </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>4142902194<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/v9NoC.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> Mature babe,artgerm Style, gerald brom, atey ghailan, mike mignola, short cut off shirt knot, wide hips, showing off, exposing herself vulnerable, blushing, exited, confident, demanding, joyful, trending on artstation, double split complementary colors, intricate details, highly detailed, </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>3954688283<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/vl0bc.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> planet base, windows, night, ground level, no man's sky, digital art, highly detailed, intricate, sharp focus, Trending on Artstation HQ, deviantart, unreal engine 5, 4K UHD image </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>895811336<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/D2GNK.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> berchtesgaden, hyperdetailed, detailed faces, artgerm, wolfenstein, portal 2, Leartes Studios, assassin's creed, alphonse mucha, bouguereau, edmund blair leighton, greg kadel, dynamic lighting, delicate, unreal engine, octane render, 8k </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>1172925287<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/m7Xkb.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> princess, detailed portrait, hyperdetailed, detailed faces, irakli nadar, magali villeneuve, Assassin's Creed, Tim Hildebrandt, Ilya Kuvshinov, artgem, greg kadel, dynamic lighting, delicate, unreal engine, octane render, 8k </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>2096567313<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/LwPPa.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> a Photorealistic dramatic hyperrealistic bright blue eyes, African American elegant girl, black hair, white veil,by WLOP,Artgerm,Greg Rutkowski,Alphonse Mucha, Beautiful dynamic dramatic bright sunset lighting,shadows,cinematic atmosphere,Artstation,concept design art,Octane render,8k </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>2999946689<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/1nH9c.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> cutest girl in the world outside, (detailed portrait), in the style of fernanda suarez and simon stalenhag and Ilya Kuvshinov and Wlop and Artgerm and Chie Yoshii and Greg Rutkowski and Waking Life, trending on artstation, featured on pixiv, dynamic lighting, highly detailed, ambient lighting, octane render, 8k </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>2249388004<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/uNux1.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> military academy, (detailed portrait), steampunk, in the style of arcane and fernanda suarez and dishonored and bioshock and simon stalenhag and Ilya Kuvshinov and Wlop and Artgerm, trending on artstation, featured on pixiv, dynamic lighting, highly detailed, ambient lighting, octane render, 8k </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>3877530043<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/sFXCi.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> beautiful female assassin wearing cyberpunk clothing, respirator, cybernetic respirator, (detailed portrait), cell shaded, 4 k, vivid colours, photorealistic concept art by wlop, ilya kuvshinov, artgerm, krenz cushart, greg rutkowski, pixiv. cinematic dramatic atmosphere, sharp focus, volumetric lighting, cinematic lighting, studio quality </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>3388890157<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/14iZS.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> cemetary, pen and ink, in the style of gustave dore highly detailed, octane render, 8k, trending on artstation, sharp focus, studio photo, intricate details, highly detailed, by greg rutkowski </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>568457114<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/D1hsN.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> dubai, hyperdetailed, detailed faces, artgem, irakli nadar, mass effect, Tim Hildebrandt, Ilya Kuvshinov, liam wong, greg rutkowski, greg kadel, dynamic lighting, delicate, unreal engine, octane render, 8k, centered, symmetry, painted, intricate, volumetric lighting, beautiful, rich deep colors masterpiece, sharp focus, ultra detailed, in the style of dan mumford and marc simonetti, astrophotography </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>DPM++ SDE<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>4262868463<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/4uPzr.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> Little cute forest fluffy chibi cuteness overload, sunny magical background, ultra precious details, intricate details, volumetric lighting, photo realistic, lifelike, photography, digital art, 8k, trending on artstation, sharp focus, studio photo, intricate details, highly detailed, by greg rutkowski, sharp focus, emitting diodes, smoke, artillery, sparks, racks, system unit, motherboard, by pascal blanche rutkowski repin artstation hyperrealism painting concept art of detailed character design matte painting, 4 k resolution blade runner </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>DPM++ SDE Karras<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>3849507891<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/4yTQP.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> 15 year old schoolgirl with short straight hair, blue eyes, cute, friendly, round face, cottagecore, intricate, enlightened, highly detailed, digital painting, artstation, concept art, smooth, sharp focus, illustration, art by artgerm and greg rutkowski and alphonse mucha </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>2276800560<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/gqynB.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> extreme wide shot a futuristic containment building in a rainforest valley with a city in the distance, national geographic, hyper realistic, 4 k, harsh light </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>3260458902<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/8qH9Y.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> portrait of a middle - eastern female cleric with straight black hair wearing blue and yellow vestments casting fireball, fantasy, highly detailed, digital painting, artstation, concept art, character art, art by greg rutkowski and tyler jacobson and alphonse mucha </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>1379894453<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/BP98Y.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> aSnowshoe Siamese Cat as the doomslayer, realistic scifi cyberpunk power armor robot, closeup portrait art by donato giancola and greg rutkowski, vintage retro scifi, realistic face, digital art, trending on artstation, symmetry </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>2122325442<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/GYdOS.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> Beautiful boy by René Magritte </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>1753689226<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/vP9sv.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> portrait of a dark god, copper wires, visible scars and nerves, intricate, headshot, highly detailed, digital painting, artstation, concept art, sharp focus, cinematic lighting, illustration, art by artgerm and greg rutkowski, alphonse mocha, cgsociety, Olivia </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>3355776798<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/A94Gg.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> knight warrior helmet skyrim mask elder scrolls v nordic armor bethesda adam adamowicz illustration character design concept, unreal 5, daz, hyperrealistic, octane render, cosplay, rpg portrait, dynamic lighting, intricate detail, harvest fall vibrancy, cinematic volume inner glowing aura global illumination ray tracing hdr </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>1938574287<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/efGrz.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> berserker portrait, d&d style, fantasy, photorealistic, highly detailed, artstation, smooth, sharp focus, art by michael whelan, artgerm, greg rutkowski and alphonse mucha </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>DPM++ SDE Karras<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>156077154<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/Wbjgp.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> symmetry product render poster vivid colors classical proportion car, glowing fog intricate, elegant, highly detailed, digital painting, art station, concept art, smooth, sharp focus, illustration, </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>DPM++ SDE Karras<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>4294525772<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/sMMpR.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> Futuristic Vintage Medium Shot 1920's Poster with Cyberpunk, ovni, tron biker with helmet bike, black in color, with a cyberpunk city background, futuristic lighting, cinematic lighting, cozy lighting, 8k, cinematic poster vintage 1800s </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>1229558409<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/0Gojz.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> beautiful, young woman, cybernetic, cyberpunk, detailed gorgeous face, flowing hair, vaporwave aesthetic, synthwave , digital painting, artstation, concept art, smooth, sharp focus, illustration, art by artgerm and greg rutkowski and alphonse mucha </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>264509871<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/zFdjj.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> strong warrior princess| centered| key visual| intricate| highly detailed| breathtaking beauty| precise lineart| vibrant| comprehensive cinematic| Carne Griffiths| Conrad Roset </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>16<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/aGuIL.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> portrait of a rugged 19th century man with mutton chops in a jacket, victorian, concept art, detailed face, fantasy, close up face, highly detailed, cinematic lighting, digital art painting by greg rutkowski </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>16<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/6sKW6.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> side profile of cyberpunk body with cyborg skull | cyberpunk | styled in Art Nouveau | insanely detailed | embellishments | high definition | concept art | digital art | vibrant </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>16<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/N7kSu.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> a cute little matte low poly isometric cherry blossom forest island, pink waterfalls, mist, lat lighting, soft shadows, trending on artstation, 3d render, monument valley, fez video game, </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>16<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/fVj9N.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> high resolution concept art of an apartment living room overlooking a large futuristic city with floor to ceiling windows and mid century modern furniture cinematic lighting cgsociety </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>850995814<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/jkpgU.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> hyperrealistic full length portrait of gorgeous watson from apex legends | blonde | detailed gorgeous face!! | full body!! | armor | intricate | elegant | realistic | hyperrealistic | cinematic | character design | concept art | highly detailed | illustration | digital art | digital painting | depth of field | illustrated by tim brown lee </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>3002798343<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/hMsH2.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> Chibi spiderman, high redolution, 3D rendering, octane rendering, modern Disney style </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>20<br> <b>Sampler:</b><br>Euler a<br> <b>CFG scale:</b><br>7<br> <b>Seed:</b><br>3232863832<br> <b>Size:</b><br>512x512 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/zl18l.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> photo of the most beautiful artwork in the world featuring soft lustrous, industrial mechanic real world, fantastic location, working environment, rugged harsh situation worker, full body 8k unity render, action shot, skin pores, detailed intricate iris, very dark lighting, heavy shadows, detailed, detailed face, (vibrant, photo realistic, realistic, dramatic, dark, sharp focus, 8k), (weathered greasy dirty damaged old worn technician worker outfit:1.1), (intricate:1.1), (highly detailed:1.1), digital painting, octane render, artstation, concept art, smooth, sharp focus, illustration, art by artgerm, (loish:0.23), wlop ilya kuvshinov., (global illumination, studio light, volumetric light)<br><br> <b>Negative prompt:</b> Asian, black and white, close up, cartoon, 3d, denim, (disfigured), (deformed), (poorly drawn), (extra limbs), blurry, boring, sketch, lackluster, signature, letters, watermark, low res , horrific , mutated , artifacts , bad art , gross , b&w , poor quality , low quality , cropped </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>30<br> <b>Sampler:</b><br>DPM++ SDE Karras<br> <b>CFG scale:</b><br>10<br> <b>Seed:</b><br>169686802<br> <b>Size:</b><br>512x640 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/dPnAA.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> photo of the most beautiful artwork in the world featuring soft lustrous, industrial mechanic real world, fantastic location, working environment, rugged harsh situation worker, full body 8k unity render, action shot, skin pores, detailed intricate iris, very dark lighting, heavy shadows, detailed, detailed face, (vibrant, photo realistic, realistic, dramatic, dark, sharp focus, 8k), (weathered greasy dirty damaged old worn technician worker outfit:1.1), (intricate:1.1), (highly detailed:1.1), digital painting, octane render, artstation, concept art, smooth, sharp focus, illustration, art by artgerm, (loish:0.23), wlop ilya kuvshinov., (global illumination, studio light, volumetric light)<br><br> <b>Negative prompt:</b> Asian, black and white, close up, cartoon, 3d, denim, (disfigured), (deformed), (poorly drawn), (extra limbs), blurry, boring, sketch, lackluster, signature, letters, watermark, low res , horrific , mutated , artifacts , bad art , gross , b&w , poor quality , low quality , cropped </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>30<br> <b>Sampler:</b><br>DPM++ SDE Karras<br> <b>CFG scale:</b><br>10<br> <b>Seed:</b><br>169686796<br> <b>Size:</b><br>512x640<br> <b>Denoising strength:</b><br>0.7<br> <b>Hires upscale:</b><br>2<br> <b>Hires upscaler:</b><br>Latent </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.imgur.com/ktLu2Tl.png"> </td> </tr> <tr> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> dark and gloomy full body 8k unity render, female teen cyborg, Blue yonder hair, wearing broken battle armor, at cluttered and messy shack , action shot, tattered torn shirt, porcelain cracked skin, skin pores, detailed intricate iris, very dark lighting, heavy shadows, detailed, detailed face, (vibrant, photo realistic, realistic, dramatic, dark, sharp focus, 8k)<br><br> <b>Negative prompt:</b> nude, Asian, black and white, close up, cartoon, 3d, denim, (disfigured), (deformed), (poorly drawn), (extra limbs), blurry, boring, sketch, lackluster, signature, letters, watermark, low res , horrific , mutated , artifacts , bad art , gross , b&w , poor quality , low quality , cropped </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <b>Steps:</b><br>26<br> <b>Sampler:</b><br>DPM++ SDE Karras<br> <b>CFG scale:</b><br>7.5<br> <b>Seed:</b><br>2388736888<br> <b>Size:</b><br>768x1024 </td> <td style="vertical-align:top;padding:.5714286em!important;border: 1px solid"> <img style="vertical-align:top;margin:0;padding:0" src="https://i.stack.imgur.com/GnUuV.jpg"> </td> </tr> </table> [1]: https://i.stack.imgur.com/wkK2b.png
Andrija/SRoBERTa
[ "pytorch", "roberta", "fill-mask", "hr", "sr", "multilingual", "dataset:leipzig", "transformers", "masked-lm", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
88
2023-01-03T10:10:46Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} **This model was fine tuned with SetFit based on 1 utterance and is used for an university project for intent detection. Other usage not tested** This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch def cls_pooling(model_output, attention_mask): return model_output[0][:,0] # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, cls pooling. sentence_embeddings = cls_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 30 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 30, "warmup_steps": 3, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Andrija/SRoBERTaFastBPE
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -575.20 +/- 517.74 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Ani123/Ani
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} **This model was fine tuned with SetFit based on 5 utterances and is used for an university project for intent detection. Other usage not tested** This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch def cls_pooling(model_output, attention_mask): return model_output[0][:,0] # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, cls pooling. sentence_embeddings = cls_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 150 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 150, "warmup_steps": 15, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Ankitha/DialoGPT-small-harrypottery
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: unknown --- https://perchance.org/9898-mtg-card-generator-v3 --- background = {import:background-image-plugin} commentsPlugin = {import:comments-plugin} o = [output] ocn = [output_card_name.selectUnique(1)] tCT = [thisCardType] ocm = [output_card_mana] oct = [output_card_type] octst = [output_cardtype_subtype] octxt = [output_card_text] octxtkact = [output_card_text_keyword_action] octxtkab = [output_card_text_keyword_ability] ocr = [output_card_rarity] ocsc = [output_card_set_code] ocpt = [output_card_power_toughness] c = [colors] s = [scryfall.selectUnique(1).sentenceCase] r = <b>[ocn.selectUnique(1).titleCase]<p>[ocm.selectUnique(1)]<br>[tCT.selectUnique(1).titleCase]<br>[ocsc.selectUnique(1)] • [ocr.selectUnique(1)]<br>{{[scry_keyword_abilities.selectUnique(1)]|[scry_keyword_actions.selectUnique(1)]|[scry_ability_words.selectUnique(1)]}|[octxt.selectUnique(1)]|[octxtkact.selectUnique(1)]|[octxtkab.selectUnique(1)]|{[scry_keyword_abilities.selectUnique(1)]|[scry_keyword_actions.selectUnique(1)]|[scry_ability_words.selectUnique(1)]} [octxt.selectUnique(1)]|{[scry_keyword_abilities.selectUnique(1)]|[scry_keyword_actions.selectUnique(1)]|[scry_ability_words.selectUnique(1)]}|[octxt.selectUnique(1)] [octxtkact.selectUnique(1)]|{[scry_keyword_abilities.selectUnique(1)]|[scry_keyword_actions.selectUnique(1)]|[scry_ability_words.selectUnique(1)]}|[octxtkact.selectUnique(1)] [octxtkab.selectUnique(1)]}<br><b>[ocpt.selectUnique(1)] <br><br>— — — — — — — — — — — — — — — — — —<br> emo = {import:emotion} pageTitle = <u>9898-MTG Card Generator V3</u> pageSubtitle = 2023 © 9898-MTG ocbl = ocstbl = output_card_subtype_basic_land ocnbl = ocstnbl = output_card_subtype_nonbasic_land commentsOptions width = 400 title 9898-MTG Card Generator V3 $output = <b>[ocn.selectUnique(1).titleCase]<p>[ocm.selectUnique(1)]<br>[tCT.selectUnique(1).titleCase]<br>[ocsc.selectUnique(1)] • [ocr.selectUnique(1)]<br>{{[scry_keyword_abilities.selectUnique(1)]|[scry_keyword_actions.selectUnique(1)]|[scry_ability_words.selectUnique(1)]}|[octxt.selectUnique(1)]|[octxtkact.selectUnique(1)]|[octxtkab.selectUnique(1)]|{[scry_keyword_abilities.selectUnique(1)]|[scry_keyword_actions.selectUnique(1)]|[scry_ability_words.selectUnique(1)]} [octxt.selectUnique(1)]|{[scry_keyword_abilities.selectUnique(1)]|[scry_keyword_actions.selectUnique(1)]|[scry_ability_words.selectUnique(1)]}|[octxt.selectUnique(1)] [octxtkact.selectUnique(1)]|{[scry_keyword_abilities.selectUnique(1)]|[scry_keyword_actions.selectUnique(1)]|[scry_ability_words.selectUnique(1)]}|[octxtkact.selectUnique(1)] [octxtkab.selectUnique(1)]}<br><b>[ocpt.selectUnique(1)] <br>— — — — — — — — — — — — — — — — — —<br> output1 title = Name buttonText = Generate content = <b>[ocn.selectUnique(1).titleCase] output2 title = Cost buttonText = Generate content = <b>[ocm.selectUnique(1)] output3 title = Type — Subtype buttonText = Generate content = <b>{[tCT.selectUnique(1)]|[oct.selectUnique(1)] — [octst.selectUnique(1)]} output4 title = Set Code • Rartity buttonText = Generate content = <b>[ocsc.selectUnique(1)] • [ocr.selectUnique(1)] output5 title = Effects buttonText = Generate content = <b>{[octxt.selectUnique(1)]|[octxtkact.selectUnique(1)]|[octxtkab.selectUnique(1)]|[octxt.selectUnique(1)]|[octxt.selectUnique(1)], [octxtkact.selectUnique(1)]|[octxtkact.selectUnique(1)], [octxtkab.selectUnique(1)]} output6 title = Power/Toughness buttonText = Generate content = [ocpt.selectUnique(1)] output7 title = Results buttonText = Generate content = [r] output_card_name {{import:adjective}|{import:verb}} {{import:word}|{import:noun}} {import:adjective} {import:verb} {{import:word}|{import:noun}} {{import:adjective}|{import:verb}} {import:word} {import:noun} {import:adjective} {import:verb} {import:word} {import:noun} thisCardType [thisCardType = output_card_type] — [specificType] specificType [output_card_subtype_basic_land] ^[thisCardType == "Basic"] [output_card_subtype_nonbasic_land] ^[thisCardType == "NonBasic"] [output_card_subtype_creature] ^[thisCardType == "Creature"] [output_card_subtype_artifact] ^[thisCardType == "Artifact"] [output_card_subtype_enchantment] ^[thisCardType == "Enchantment"] [output_card_subtype_planeswalker] ^[thisCardType == "Planeswalker"] [output_card_subtype_instant] ^[thisCardType == "Instant"] [output_card_subtype_sorcery] ^[thisCardType == "Sorcery"] [output_card_subtype_creature] ^[thisCardType == "Creature"] [output_card_subtype_plane] ^[thisCardType == "Plane"] output_card_mana {{{0-12}|X}|{0-12}|X {0-6 [basic_mana]|[hybrid_mana]|[tri_hybrid_mana]|[four_color_mana]|[multicolor_mana]|[phyrexian_mana]|[prismatic_mana]}} basic_mana {W|U|B|R|G|C|X|S} hybrid_mana {{1-2}/{W|U|B|R|G}|{W|U|B|R|G}} tri_hybrid_mana {W/B/G|W/U/G|W/U/B|U/B/R|W/U/R|B/R/G|W/B/R|W/R/G|U/B/G|U/R/G} four_color_mana {U/B/R/G|W/B/R/G|W/U/B/G|W/U/B/R} multicolor_mana {BR|UB|BG|RG|GU|UR|WB|GW|RW|WU} phyrexian_mana -2 Life/{W|U|B|R|G|C|X|S} prismatic_mana WUBRG out [thisCardType = output_card_type] ocbl [ocbl = ocstbl = output_card_subtype_basic_land] ocnbl [ocnbl = ocstnbl = output_card_subtype_nonbasic_land] ocleg [ocleg = ocstleg = output_card_subtype_legendary] output_card_type Basic [if (output_card_type = "Basic") {output_card_subtype_basic_land} else {output_card_type}|{ocbl}] NonBasic [if (output_card_type = "NonBasic") {output_card_subtype_nonbasic_land} else {output_card_type}|{ocnbl}] Legendary [if (output_card_type = "Legendary") {output_card_subtype_legendary} else {output_card_type}|{ocstleg}] Token Tribal World Conspiracy Creature [if (output_card_type = "Creature") {output_card_subtype_creature} else {output_card_type}] Advertisement Artifact [if (output_card_type = "Artifact") {output_card_subtype_artifact} else {output_card_type}] Artifact Creature Artifact Land Enchantment [if (output_card_type = "Enchantment") {output_card_subtype_enchantment} else {output_card_type}] Enchantment Creature Instant [if (output_card_type = "Instant") {output_card_subtype_instant} else {output_card_type}] Sorcery [if (output_card_type = "Sorcery") {output_card_subtype_sorcery} else {output_card_type}] Land [if (output_card_type = "Land") {output_card_subtype_basic_land} else {output_card_subtype_basic_land} {output_card_type}] [if (output_card_type = "land") output_card_mana = ""] Planeswalker [if (output_card_type = "Planeswalker") {{{[scry_keyword_abilities.selectUnique(1)]|[scry_keyword_actions.selectUnique(1)]|[scry_ability_words.selectUnique(1)]}|[octxt.selectUnique(1)]|[octxtkact.selectUnique(1)]|[octxtkab.selectUnique(1)]|{[scry_keyword_abilities.selectUnique(1)]|[scry_keyword_actions.selectUnique(1)]|[scry_ability_words.selectUnique(1)]} [octxt.selectUnique(1)]|{[scry_keyword_abilities.selectUnique(1)]|[scry_keyword_actions.selectUnique(1)]|[scry_ability_words.selectUnique(1)]}|[octxt.selectUnique(1)] [octxtkact.selectUnique(1)]|{[scry_keyword_abilities.selectUnique(1)]|[scry_keyword_actions.selectUnique(1)]|[scry_ability_words.selectUnique(1)]}|[octxtkact.selectUnique(1)] [octxtkab.selectUnique(1)]}}[if (output_card_type = "Planeswalker") {output_card_subtype_planeswalker} else {output_card_type}] Emblem Phenemonom Plane [if (output_card_type = "Plane") {output_card_subtype_plane} else {output_card_type}] Dungeon Scheme Vanguard output_cardtype_subtype {[output_card_subtype_artifact]|[output_card_subtype_enchantment]|[output_card_subtype_basic_land]|[output_card_subtype_nonbasic_land]|[output_card_subtype_planeswalker]|[output_card_subtype_instant]|[output_card_subtype_sorcery]|[output_card_subtype_creature]|[output_card_subtype_plane]} output_card_subtype_artifact Blood Clue Contraption Equipment Food Fortification Gold Powerstone Treasure Vehicle output_card_subtype_enchantment Aura Background Cartouche Class Curse Rune Saga Shard Shrine output_card_subtype_basic_land Plains Island Swamp Mountain Forest Waste Snow-Covered Plains Snow-Covered Island Snow-Covered Swamp Snow-Covered Mountain Snow-Covered Forest output_card_subtype_nonbasic_land Desert Gate Lair Locus Mine Power-Plant Tower Urza’s output_card_subtype_planeswalker Ajani Aminatou Angrath Arlinn Ashiok Bahamut Basri Bolas Calix Chandra Dack Dakkon Daretti Davriel Dihada Domri Dovin Ellywick Elminster Elspeth Estrid Freyalise Garruk Gideon Grist Huatli Jace Jaya Jeska Kaito Karn Kasmina Kaya Kiora Koth Liliana Lolth Lukka Minsc Mordenkainen Nahiri Narset Niko Nissa Nixilis Oko Ral Rowan Saheeli Samut Sarkhan Serra Sivitri Sorin Szat Tamiyo Tasha Teferi Teyo Tezzeret Tibalt Tyvar Ugin Venser Vivien Vraska Will Windgrace Wrenn Xenagos Yanggu Yanling Zariel output_card_subtype_instant Adventure Arcane Lesson Trap output_card_subtype_sorcery Adventure Arcane Lesson Trap output_card_subtype_creature Advisor Aetherborn Ally Angel Antelope Ape Archer Archon Army Artificer Assassin Assembly-Worker Atog Aurochs Avatar Azra Badger Barbarian Bard Basilisk Bat Bear Beast Beeble Beholder Berserker Bird, Blinkmoth Boar Bringer Brushwagg Camarid Camel Caribou Carrier Cat Centaur Cephalid Chimera Citizen Cleric Cockatrice Construct Coward Crab Crocodile Cyclops Dauthi Demigod Demon Deserter Devil Dinosaur Djinn Dog Dragon Drake Dreadnought Drone Druid Dryad Dwarf Efreet Egg Elder Eldrazi Elemental Elephant Elf Elk Eye Faerie Ferret Fish Flagbearer Fox Fractal Frog Fungus Gargoyle Germ Giant Gith Gnoll Gnome Goat Goblin God Golem Gorgon Graveborn Gremlin Griffin Hag Halfling Hamster Harpy Hellion Hippo Hippogriff Homarid Homunculus Horror Horse Human Hydra Hyena Illusion Imp Incarnation Inkling Insect Jackal Jellyfish Juggernaut Kavu Kirin Kithkin Knight Kobold Kor Kraken Lamia Lammasu Leech Leviathan Lhurgoyf Licid Lizard Manticore Masticore Mercenary Merfolk Metathran Minion Minotaur Mole Monger Mongoose Monk Monkey Moonfolk Mouse Mutant Myr Mystic Naga Nautilus Nephilim Nightmare Nightstalker Ninja Noble Noggle Nomad Nymph Octopus Ogre Ooze Orb Orc Orgg Otter Ouphe Ox Oyster Pangolin Peasant Pegasus Pentavite Pest Phelddagrif Phoenix Phyrexian Pilot Pincher Pirate Plant Praetor Prism Processor Rabbit Raccoon Ranger Rat Rebel Reflection Rhino Rigger Rogue Sable Salamander Samurai Sand Saproling Satyr Scarecrow Scion Scorpion Scout Sculpture Serf Serpent Servo Shade Shaman Shapeshifter Shark Sheep Siren Skeleton Slith Sliver Slug Snake Soldier Soltari Spawn Specter Spellshaper Sphinx Spider Spike Spirit Splinter Sponge Squid Squirrel Starfish Surrakar Survivor Tentacle Tetravite Thalakos Thopter Thrull Tiefling Treefolk Trilobite Triskelavite Troll Turtle Unicorn Vampire Vedalken Viashino Volver Wall Walrus Warlock Warrior Weird Werewolf Whale Wizard Wolf Wolverine Wombat Worm Wraith Wurm Yeti Zombie Zubera output_card_subtype_plane Alara Arkhos Azgol Belenon Bolas’s Meditation Realm Dominaria Equilor Ergamon Fabacin Innistrad Iquatana Ir Kaldheim Kamigawa Karsus Kephalai Kinshala Kolbahan Kyneth Lorwyn Luvion Mercadia Mirrodin Moag Mongseng Muraganda New Phyrexia Phyrexia Pyrulea Rabiah Rath Ravnica Regatha Segovia Serra’s Realm Shadowmoor Shandalar Ulgrotha Valla Vryn Wildfire Xerex Zendikar output_card_subtype_legendary Artifact Creature Enchantment Land Planeswalker Instant Sorcery Artifact Land Artifact Creature Enchantment Artifact Enchantment Artifact Creature Enchantment Creature Enchantment Land Instant Creature Land Creature output_card_set_code {A-Z}{A-Z}{A-Z} output_card_rarity {Common|Uncommon|Rare|Mythic Rare|Special|Masterpiece} output_card_text [output_card_text_keyword_action] [output_card_text_keyword_ability] [output_card_text_keyword_action] [output_card_text_keyword_ability] When ~this enters the battlefield, {[output_card_text_keyword_action]|[output_card_text_keyword_ability]|[output_card_text_keyword_action], [output_card_text_keyword_ability]} Whenever ~this enters the battlefield or attacks, {[output_card_text_keyword_action]|[output_card_text_keyword_ability]|[output_card_text_keyword_action], [output_card_text_keyword_ability]} When ~this dies, {[output_card_text_keyword_action]|[output_card_text_keyword_ability]|[output_card_text_keyword_action], [output_card_text_keyword_ability]} Whenever a card from [output_game_zones] is put into [output_game_zones], [output_card_text_keyword_action] When ~this is put into [output_game_zones], [output_card_text_keyword_action] target {[output_card_type]|[output_cardtype_subtype]|[output_card_type] [output_cardtype_subtype]} Whenever ~this deals damage to a player, {[output_card_text_keyword_action]|[output_card_text_keyword_ability]|[output_card_text_keyword_action], [output_card_text_keyword_ability]} Whenever ~this deals damage to a player, create a token thats a copy of ~this. Whenever ~this deals damage to a player, exile target {[output_card_type]|[output_cardtype_subtype]} Whenever you {[output_card_text_keyword_action]|[output_card_text_keyword_ability]|[output_card_text_keyword_action], [output_card_text_keyword_ability]}, {[output_card_text_keyword_action]|[output_card_text_keyword_ability]|[output_card_text_keyword_action], [output_card_text_keyword_ability]} Whenever you gain life, create a 1/1 colorless [output_card_subtype_creature] creature token. Whenever you roll a die, create a X/X colorless [output_card_subtype_creature] creature token where X equals the result of the die roll. [c] creatures get {+0/+1|+1/+0|+1/+1} [c] creatures you control get {+0/+1|+1+0|+1/+1} output_card_text_keyword_action Abandon Activate Adapt Amass Assemble Attach Bolster Cast Clash Connive Counter Create Destroy Detain Discard Double Exchange Exert Exile Explore Fateseal Fight Goad Investigate Learn Manifest Meld Mill Monstrosity Planeswalk Play Populate Proliferate Regenerate Reveal Sacrifice Scry Search Set in Motion Shuffle Support Surveil Tap Transform Untap Venture into the Dungeon Vote output_card_text_keyword_ability Deathtouch Defender Double Strike Enchant Equip First Strike Flash Flying Haste Hexproof Indestructible Intimidate Landwalk Lifelink Protection Reach Shroud Trample Vigilance Ward Banding Rampage Cumulative Upkeep Flanking Phasing Buyback Shadow Cycling Echo Horsemanship Fading Kicker Flashback Madness Fear Morph Amplify Provoke Storm Affinity Entwine Modular Sunburst Bushido Soulshift Splice Offering Ninjutsu Epic Convoke Dredge Transmute Bloodthirst Haunt Replicate Forecast Graft Recover Ripple Split Second Suspend Vanishing Absorb Aura Swap Delve Fortify Frenzy Gravestorm Poisonous Transfigure Champion Changeling Evoke Hideaway Prowl Reinforce Conspire Persist Wither Retrace Devour Exalted Unearth Cascade Annihilator Level Up Rebound Totem Armor Infect Battle Cry Living Weapon Undying Miracle Soulbond Overload Scavenge Unleash Cipher Evolve Extort Fuse Bestow Tribute Dethrone Hidden Agenda Outlast Prowess Dash Exploit Menace Renown Awaken Devoid Ingest Myriad Surge Skulk Emerge Escalate Melee Crew Fabricate Partner Undaunted Improvise Aftermath Embalm Eternalize Afflict Ascend Assist Jump-Start Mentor Afterlife Riot Spectacle Escape Companion Mutate Encore Boast Foretell Demonstrate Daybound Nightbound Disturb Decayed Cleave Training Compleated Reconfigure Blitz Casualty Enlist Read Ahead output_card_power_toughness {{0-12}/{1-12}|[scry_powers]/[scry_toughness]} output_game_zones Battlefield Command Exile Graveyard Hand Library Sideboard Stack Outside Of The Game word_types {import:noun} {import:pronoun} {import:verb} {import:adjective} {import:adverb} {import:preposition} {import:interjection} noun abbey absence absorption abstinence absurdity abundance acceptance accessibility accommodation accomplice accountability accounting accreditation accuracy acquiescence acreage actress actuality adage adaptation adherence adjustment adoption adultery advancement advert advertisement advertising advice aesthetics affinity aggression agriculture aircraft airtime allegation allegiance allegory allergy allies alligator allocation allotment altercation ambulance ammonia anatomy anemia ankle announcement annoyance annuity anomaly anthropology anxiety apartheid apologise apostle apparatus appeasement appellation appendix applause appointment appraisal archery archipelago architecture ardor arrears arrow artisan artistry ascent assembly assignment association asthma atheism attacker attraction attractiveness auspices authority avarice aversion aviation babbling backlash baker ballet balls banjo baron barrier barrister bases basin basis battery battling bedtime beginner begun bending bicycle billing bingo biography biology birthplace blackberry blather blossom boardroom boasting bodyguard boldness bomber bondage bonding bones bonus bookmark boomer booty bounds bowling brainstorming breadth breaker brewer brightness broccoli broth brotherhood browsing brunch brunt building bullion bureaucracy burglary buyout by-election cabal cabbage calamity campaign canonization captaincy carcass carrier cartridge cassette catfish caught celebrity cemetery certainty certification charade chasm check-in cheerleader cheesecake chemotherapy chili china chivalry cholera cilantro circus civilisation civility clearance clearing clerk climber closeness clothing clutches coaster coconut coding collaborator colleague college collision colors combustion comedian comer commander commemoration commenter commissioner commune competition completeness complexity computing comrade concur condominium conduit confidant configuration confiscation conflagration conflict consist consistency consolidation conspiracy constable consul consultancy contentment contents contractor conversation cornerstone corpus correlation councilman counselor countdown countryman coverage covering coyote cracker creator criminality crocodile cropping cross-examination crossover crossroads culprit cumin curator curfew cursor custard cutter cyclist cyclone cylinder cynicism daddy damsel darkness dawning daybreak dealing dedication deduction defection deference deficiency definition deflation degeneration delegation delicacy delirium deliverance demeanor demon demonstration denomination dentist departure depletion depression designation despotism detention developer devolution dexterity diagnosis dialect differentiation digger digress dioxide diploma disability disarmament discord discovery dishonesty dismissal disobedience dispatcher disservice distribution distributor diver diversity docking dollar dominance domination dominion donkey doorstep doorway dossier downside drafting drank drilling driver drumming drunkenness duchess ducking dugout dumps dwelling dynamics eagerness earnestness earnings eater editor effectiveness electricity elements eloquence emancipation embodiment embroidery emperor employment encampment enclosure encouragement endangerment enlightenment enthusiasm environment environs envoy epilepsy equation equator error espionage estimation evacuation exaggeration examination exclamation expediency exploitation extinction eyewitness falls fascism fastball feces feedback ferocity fertilization fetish finale firing fixing flashing flask flora fluke folklore follower foothold footing forefinger forefront forgiveness formality formation formula foyer fragmentation framework fraud freestyle frequency friendliness fries frigate fulfillment function functionality fundraiser fusion futility gallantry gallery genesis genitals girlfriend glamour glitter glucose google grandeur grappling greens gridlock grocer groundwork grouping gunman gusto habitation hacker hallway hamburger hammock handling hands handshake happiness hardship headcount header headquarters heads headset hearth hearts heath hegemony height hello helper helping helplessness hierarchy hoarding hockey homeland homer honesty horror horseman hostility housing humility hurricane iceberg ignition illness illustration illustrator immunity immunization imperialism imprisonment inaccuracy inaction inactivity inauguration indecency indicator inevitability infamy infiltration influx iniquity innocence innovation insanity inspiration instruction instructor insurer interact intercession intercourse intermission interpretation intersection interval intolerance intruder invasion investment involvement irrigation iteration jenny jogging jones joseph juggernaut juncture jurisprudence juror kangaroo kingdom knocking laborer larceny laurels layout leadership leasing legislation leopard liberation licence lifeblood lifeline ligament lighting likeness line-up lineage liner lineup liquidation listener literature litigation litre loathing locality lodging logic longevity lookout lordship lustre ma'am machinery madness magnificence mahogany mailing mainframe maintenance majority manga mango manifesto mantra manufacturer maple martin martyrdom mathematician matrix matron mayhem mayor means meantime measurement mechanics mediator medics melodrama memory mentality metaphysics method metre miner mirth misconception misery mishap misunderstanding mobility molasses momentum monarchy monument morale mortality motto mouthful mouthpiece mover movie mowing murderer musician mutation mythology narration narrator nationality negligence neighborhood neighbour nervousness networking nexus nightmare nobility nobody noodle normalcy notification nourishment novella nucleus nuisance nursery nutrition nylon oasis obscenity obscurity observer offense onslaught operation opportunity opposition oracle orchestra organisation organizer orientation originality ounce outage outcome outdoors outfield outing outpost outset overseer owner oxygen pairing panther paradox parliament parsley parson passenger pasta patchwork pathos patriotism pendulum penguin permission persona perusal pessimism peter philosopher phosphorus phrasing physique piles plateau playing plaza plethora plurality pneumonia pointer poker policeman polling poster posterity posting postponement potassium pottery poultry pounding pragmatism precedence precinct preoccupation pretense priesthood prisoner privacy probation proceeding proceedings processing processor progression projection prominence propensity prophecy prorogation prospectus protein prototype providence provider provocation proximity puberty publicist publicity publisher pundit putting quantity quart quilting quorum racism radiance ralph rancher ranger rapidity rapport ratification rationality reaction reader reassurance rebirth receptor recipe recognition recourse recreation rector recurrence redemption redistribution redundancy refinery reformer refrigerator regularity regulator reinforcement reins reinstatement relativism relaxation rendition repayment repentance repertoire repository republic reputation resentment residency resignation restaurant resurgence retailer retention retirement reviewer riches righteousness roadblock robber rocks rubbing runoff saloon salvation sarcasm saucer savior scarcity scenario scenery schism scholarship schoolboy schooner scissors scolding scooter scouring scrimmage scrum seating sediment seduction seeder seizure self-confidence self-control self-respect semicolon semiconductor semifinal senator sending serenity seriousness servitude sesame setup sewing sharpness shaving shoplifting shopping siding simplicity simulation sinking skate sloth slugger snack snail snapshot snark soccer solemnity solicitation solitude somewhere sophistication sorcery souvenir spaghetti specification specimen specs spectacle spectre speculation sperm spoiler squad squid staging stagnation staircase stairway stamina standpoint standstill stanza statement stillness stimulus stocks stole stoppage storey storyteller stylus subcommittee subscription subsidy suburb success sufferer supposition suspension sweater sweepstakes swimmer syndrome synopsis syntax system tablespoon taker tavern technology telephony template tempo tendency tendon terrier terror terry theater theology therapy thicket thoroughfare threshold thriller thunderstorm ticker tiger tights today tossing touchdown tourist tourney toxicity tracing tractor translation transmission transmitter trauma traveler treadmill trilogy trout tuning twenties tycoon tyrant ultimatum underdog underwear unhappiness unification university uprising vaccination validity vampire vanguard variation vegetation verification viability vicinity victory viewpoint villa vindication violation vista vocalist vogue volcano voltage vomiting vulnerability waistcoat waitress wardrobe warmth watchdog wealth weariness whereabouts whisky whiteness widget width windfall wiring witchcraft withholding womanhood words workman youngster pronoun all another any anybody anyone anything both each each other either everybody everyone everything few he her hers herself him himself his I it its itself many me mine more most much my myself neither no one nobody none one one another other others our ours ourselves several she some somebody someone something that their theirs them themselves these they this those uswe what whatever which whichever who whoever whom whomever whose you your yours yourself yourselves verb accept pastTense = accepted add pastTense = added admire pastTense = admired admit pastTense = admitted advise pastTense = advised afford pastTense = afforded agree pastTense = agreed alert pastTense = alerted allow pastTense = allowed amuse pastTense = amused analyse pastTense = analysed announce pastTense = announced annoy pastTense = annoyed answer pastTense = answered apologise pastTense = apologised appear pastTense = appeared applaud pastTense = applauded appreciate pastTense = appreciated approve pastTense = approved argue pastTense = argued arrange pastTense = arranged arrest pastTense = arrested arrive pastTense = arrived ask pastTense = asked attach pastTense = attached attack pastTense = attacked attempt pastTense = attempted attend pastTense = attended attract pastTense = attracted avoid pastTense = avoided back pastTense = backed bake pastTense = baked balance pastTense = balanced ban pastTense = banned bang pastTense = banged bare pastTense = bared bat pastTense = batted bathe pastTense = bathed battle pastTense = battled beam pastTense = beamed beg pastTense = begged behave pastTense = behaved belong pastTense = belonged bleach pastTense = bleached bless pastTense = blessed blind pastTense = blinded blink pastTense = blinked blot pastTense = blotted blush pastTense = blushed boast pastTense = boasted boil pastTense = boiled bolt pastTense = bolted bomb pastTense = bombed book pastTense = booked bore pastTense = bored borrow pastTense = borrowed bounce pastTense = bounced bow pastTense = bowed box pastTense = boxed brake pastTense = braked branch pastTense = branched breathe pastTense = breathed bruise pastTense = bruised brush pastTense = brushed bubble pastTense = bubbled bump pastTense = bumped burn pastTense = burned bury pastTense = buried buzz pastTense = buzzed calculate pastTense = calculated call pastTense = called camp pastTense = camped care pastTense = cared carry pastTense = carried carve pastTense = carved cause pastTense = caused challenge pastTense = challenged change pastTense = changed charge pastTense = charged chase pastTense = chased cheat pastTense = cheated check pastTense = checked cheer pastTense = cheered chew pastTense = chewed choke pastTense = choked chop pastTense = chopped claim pastTense = claimed clap pastTense = clapped clean pastTense = cleaned clear pastTense = cleared clip pastTense = clipped close pastTense = closed coach pastTense = coached coil pastTense = coiled collect pastTense = collected colour pastTense = coloured comb pastTense = combed command pastTense = commanded communicate pastTense = communicated compare pastTense = compared compete pastTense = competed complain pastTense = complained complete pastTense = completed concentrate pastTense = concentrated concern pastTense = concerned confess pastTense = confessed confuse pastTense = confused connect pastTense = connected consider pastTense = considered consist pastTense = consisted contain pastTense = contained continue pastTense = continued copy pastTense = copied correct pastTense = corrected cough pastTense = coughed count pastTense = counted cover pastTense = covered crack pastTense = cracked crash pastTense = crashed crawl pastTense = crawled cross pastTense = crossed crush pastTense = crushed cry pastTense = cried cure pastTense = cured curl pastTense = curled curve pastTense = curved cycle pastTense = cycled dam pastTense = dammed damage pastTense = damaged dance pastTense = danced dare pastTense = dared decay pastTense = decayed deceive pastTense = deceived decide pastTense = decided decorate pastTense = decorated delay pastTense = delayed delight pastTense = delighted deliver pastTense = delivered depend pastTense = depended describe pastTense = described desert pastTense = deserted deserve pastTense = deserved destroy pastTense = destroyed detect pastTense = detected develop pastTense = developed disagree pastTense = disagreed disappear pastTense = disappeared disapprove pastTense = disapproved disarm pastTense = disarmed discover pastTense = discovered dislike pastTense = disliked divide pastTense = divided double pastTense = doubled doubt pastTense = doubted drag pastTense = dragged drain pastTense = drained dream pastTense = dreamed dress pastTense = dressed drip pastTense = dripped drop pastTense = dropped drown pastTense = drowned drum pastTense = drummed dry pastTense = dried dust pastTense = dusted earn pastTense = earned educate pastTense = educated embarrass pastTense = embarrassed employ pastTense = employed empty pastTense = emptied encourage pastTense = encouraged end pastTense = ended enjoy pastTense = enjoyed enter pastTense = entered entertain pastTense = entertained escape pastTense = escaped examine pastTense = examined excite pastTense = excited excuse pastTense = excused exercise pastTense = exercised exist pastTense = existed expand pastTense = expand expect pastTense = expected explain pastTense = explained explode pastTense = exploded extend pastTense = extended face pastTense = faced fade pastTense = faded fail pastTense = failed fancy pastTense = fancied fasten pastTense = fastened fax pastTense = faxed fear pastTense = feared fence pastTense = fenced fetch pastTense = fetched file pastTense = filed fill pastTense = filled film pastTense = filmed fire pastTense = fired fit pastTense = fitted fix pastTense = fixed flap pastTense = flapped flash pastTense = flashed float pastTense = floated flood pastTense = flooded flow pastTense = flowed flower pastTense = flowered fold pastTense = folded follow pastTense = followed fool pastTense = fooled force pastTense = forced form pastTense = formed found pastTense = founded frame pastTense = framed frighten pastTense = frightened fry pastTense = fried gather pastTense = gathered gaze pastTense = gazed glow pastTense = glowed glue pastTense = glued grab pastTense = grabbed grate pastTense = grated grease pastTense = greased greet pastTense = greeted grin pastTense = grinned grip pastTense = gripped groan pastTense = groaned guarantee pastTense = guaranteed guard pastTense = guarded guess pastTense = guessed guide pastTense = guided hammer pastTense = hammered hand pastTense = handed handle pastTense = handled hang pastTense = hung happen pastTense = happened harass pastTense = harassed harm pastTense = harmed hate pastTense = hated haunt pastTense = haunted head pastTense = headed heal pastTense = healed heap pastTense = heaped heat pastTense = heated help pastTense = helped hook pastTense = hooked hop pastTense = hopped hope pastTense = hoped hover pastTense = hovered hug pastTense = hugged hum pastTense = hummed hunt pastTense = hunted hurry pastTense = hurried identify pastTense = identified ignore pastTense = ignored imagine pastTense = imagined impress pastTense = impressed improve pastTense = improved include pastTense = included increase pastTense = increased influence pastTense = influenced inform pastTense = informed inject pastTense = injected injure pastTense = injured instruct pastTense = instructed intend pastTense = intended interest pastTense = interested interfere pastTense = interfered interrupt pastTense = interrupted introduce pastTense = introduced invent pastTense = invented invite pastTense = invited irritate pastTense = irritated itch pastTense = itched jail pastTense = jailed jam pastTense = jammed jog pastTense = jogged join pastTense = joined joke pastTense = joked judge pastTense = judged juggle pastTense = juggled jump pastTense = jumped kick pastTense = kicked kill pastTense = killed kiss pastTense = kissed kneel pastTense = knelt knit pastTense = knitted knock pastTense = knocked knot pastTense = knotted label pastTense = labelled land pastTense = landed last pastTense = lasted laugh pastTense = laughed launch pastTense = launched learn pastTense = learned level pastTense = levelled license pastTense = licensed lick pastTense = licked lie pastTense = lied lighten pastTense = lightened like pastTense = liked list pastTense = listed listen pastTense = listened live pastTense = lived load pastTense = loaded lock pastTense = locked long pastTense = longed look pastTense = look love pastTense = loved man pastTense = manned manage pastTense = managed march pastTense = marched mark pastTense = marked marry pastTense = married match pastTense = matched mate pastTense = mated matter pastTense = mattered measure pastTense = measured meddle pastTense = meddled melt pastTense = melted memorise pastTense = memorised mend pastTense = mended mess up pastTense = messed up milk pastTense = milked mine pastTense = mined miss pastTense = missed mix pastTense = mixed moan pastTense = moaned moor pastTense = moored mourn pastTense = mourned move pastTense = moved muddle pastTense = muddled mug pastTense = mugged multiply pastTense = multiplied murder pastTense = murdered nail pastTense = nailed name pastTense = named need pastTense = needed nest pastTense = nested nod pastTense = nodded note pastTense = noted notice pastTense = noticed number pastTense = numbered obey pastTense = obeyed object pastTense = objected observe pastTense = observed obtain pastTense = obtained occur pastTense = occurred offend pastTense = offended offer pastTense = offered open pastTense = opened order pastTense = ordered overflow pastTense = overflowed owe pastTense = owed own pastTense = owned pack pastTense = packed paddle pastTense = paddled paint pastTense = painted park pastTense = parked part pastTense = parted pass pastTense = passed paste pastTense = pasted pat pastTense = patted pause pastTense = paused peck pastTense = pecked pedal pastTense = pedalled peel pastTense = peeled peep pastTense = peeped perform pastTense = performed permit pastTense = permitted phone pastTense = phoned pick pastTense = picked pinch pastTense = pinched pine pastTense = pined place pastTense = placed plan pastTense = planned plant pastTense = planted play pastTense = played please pastTense = pleased plug pastTense = plugged point pastTense = pointed poke pastTense = poked polish pastTense = polished pop pastTense = popped possess pastTense = possessed post pastTense = posted pour pastTense = poured practise pastTense = practised pray pastTense = prayed preach pastTense = preached precede pastTense = preceded prefer pastTense = preferred prepare pastTense = prepared present pastTense = presented preserve pastTense = preserved press pastTense = pressed pretend pastTense = pretended prevent pastTense = prevented prick pastTense = pricked print pastTense = printed produce pastTense = produced program pastTense = programmed promise pastTense = promised protect pastTense = protected provide pastTense = provided pull pastTense = pulled pump pastTense = pumped punch pastTense = punched puncture pastTense = punctured punish pastTense = punished push pastTense = pushed question pastTense = questioned queue pastTense = questioned race pastTense = raced radiate pastTense = radiated rain pastTense = rained raise pastTense = raised reach pastTense = reached realise pastTense = realised receive pastTense = received recognise pastTense = recognised record pastTense = recorded reduce pastTense = reduced reflect pastTense = reflected refuse pastTense = refused regret pastTense = regretted reign pastTense = reigned reject pastTense = rejected rejoice pastTense = rejoiced relax pastTense = relaxed release pastTense = released rely pastTense = relied remain pastTense = remained remember pastTense = remembered remind pastTense = reminded remove pastTense = removed repair pastTense = repaired repeat pastTense = repeated replace pastTense = replaced reply pastTense = replied report pastTense = reported reproduce pastTense = reproduced request pastTense = requested rescue pastTense = rescued retire pastTense = retired return pastTense = returned rhyme pastTense = rhyme rinse pastTense = rinsed risk pastTense = risked rob pastTense = robbed rock pastTense = rocked roll pastTense = rolled rot pastTense = rotted rub pastTense = rubbed ruin pastTense = ruined rule pastTense = ruled rush pastTense = rushed sack pastTense = sacked sail pastTense = sailed satisfy pastTense = satisfied save pastTense = saved saw pastTense = sawed scare pastTense = scared scatter pastTense = scattered scold pastTense = scolded scorch pastTense = scorched scrape pastTense = scraped scratch pastTense = scratched scream pastTense = screamed screw pastTense = screwed scribble pastTense = scribbled scrub pastTense = scrubbed seal pastTense = sealed search pastTense = searched separate pastTense = separate serve pastTense = served settle pastTense = settled shade pastTense = shaded share pastTense = shared shave pastTense = shaved shelter pastTense = sheltered shiver pastTense = shivered shock pastTense = shocked shop pastTense = shopped shrug pastTense = shrugged sigh pastTense = sighed sign pastTense = signed signal pastTense = signalled sin pastTense = sinned sip pastTense = sipped ski pastTense = skied skip pastTense = skipped slap pastTense = slapped slip pastTense = slipped slow pastTense = slowed smash pastTense = smashed smell pastTense = smelled smile pastTense = smiled smoke pastTense = smoked snatch pastTense = snatched sneeze pastTense = sneezed sniff pastTense = sniffed snore pastTense = snored snow pastTense = snowed soak pastTense = soaked soothe pastTense = soothed sound pastTense = sounded spare pastTense = spared spark pastTense = sparked sparkle pastTense = sparkled spell pastTense = spelled spill pastTense = spilled spoil pastTense = spoiled spot pastTense = spotted spray pastTense = sprayed sprout pastTense = sprouted squash pastTense = squashed squeak pastTense = squeaked squeal pastTense = squealed squeeze pastTense = squeezed stain pastTense = stained stamp pastTense = stamped stare pastTense = stared start pastTense = started stay pastTense = stayed steer pastTense = steered step pastTense = stepped stir pastTense = stirred stitch pastTense = stitched stop pastTense = stopped store pastTense = stored strap pastTense = strapped strengthen pastTense = strengthened stretch pastTense = stretched strip pastTense = stripped stroke pastTense = stroked stuff pastTense = stuffed subtract pastTense = subtracted succeed pastTense = succeeded suck pastTense = sucked suffer pastTense = suffered suggest pastTense = suggested suit pastTense = suited supply pastTense = supplied support pastTense = supported suppose pastTense = supposed surprise pastTense = surprised surround pastTense = surrounded suspect pastTense = suspected suspend pastTense = suspended switch pastTense = switched talk pastTense = talked tame pastTense = tamed tap pastTense = tapped taste pastTense = tasted tease pastTense = teased telephone pastTense = telephoned tempt pastTense = tempted terrify pastTense = terrified test pastTense = tested thank pastTense = thanked thaw pastTense = thawed tick pastTense = ticked tickle pastTense = tickled tie pastTense = tied time pastTense = timed tip pastTense = tipped tire pastTense = tired touch pastTense = touched tour pastTense = toured tow pastTense = towed trace pastTense = traced trade pastTense = traded train pastTense = trained transport pastTense = transported trap pastTense = trapped travel pastTense = travelled treat pastTense = treated tremble pastTense = trembled trick pastTense = tricked trip pastTense = tripped trot pastTense = trotted trouble pastTense = troubled trust pastTense = trusted try pastTense = tried tug pastTense = tugged tumble pastTense = tumbled turn pastTense = turned twist pastTense = twisted type pastTense = typed undress pastTense = undressed unfasten pastTense = unfastened unite pastTense = united unlock pastTense = unlocked unpack pastTense = unpacked use pastTense = used vanish pastTense = vanished visit pastTense = visited wail pastTense = wailed wait pastTense = waited walk pastTense = walked wander pastTense = wandered want pastTense = wanted warm pastTense = warmed warn pastTense = warned wash pastTense = washed waste pastTense = wasted watch pastTense = watched water pastTense = watered wave pastTense = waved weigh pastTense = weighed welcome pastTense = welcomed whine pastTense = whined whip pastTense = whipped whirl pastTense = whirled whisper pastTense = whispered whistle pastTense = whistled wink pastTense = winked wipe pastTense = wiped wish pastTense = wished wobble pastTense = wobbled wonder pastTense = wondered work pastTense = worked worry pastTense = worried wrap pastTense = wrapped wreck pastTense = wrecked wrestle pastTense = wrestled wriggle pastTense = wriggled x-ray pastTense = x-rayed yawn pastTense = yawned yell pastTense = yelled zip pastTense = zipped zoom pastTense = zoomed adjective abashed aberrant abhorrent abiding ablaze abnormal aboard aboriginal abortive abounding abrasive abrupt absent absolute absorbed absorbing abstracted absurd abundant abusive academic acceptable accessible accidental acclaimed accomplished accurate aching acidic acoustic acrid acrobatic active ad hoc adamant adaptable addicted adept adhesive adjoining admirable admired adolescent adorable adored advanced adventurous affectionate afraid aged aggravating aggressive agile agitated agonizing agreeable ahead ajar alarmed alarming alcoholic alert alienated alive alleged alluring aloof altruistic amazing ambiguous ambitious amiable amuck amused amusing anchored ancient angelic angry anguished animated annoyed annoying annual another antique antsy anxious apathetic appetizing apprehensive appropriate apt aquatic arctic arid aromatic arrogant artistic ashamed aspiring assorted assured astonishing athletic attached attentive attractive auspicious austere authentic authorized automatic available avaricious average awake aware awesome awful awkward axiomatic babyish bad baggy barbarous bare barren bashful basic batty bawdy beautiful beefy befitting belated belligerent beloved beneficial bent berserk better bewildered bewitched big big-hearted billowy biodegradable bite-sized biting bitter bizarre black black-and-white bland blank blaring bleak blind blissful blond bloody blue blue-eyed blushing bogus boiling bold bony boorish bored boring bossy both bouncy boundless bountiful bowed brainy brash brave brawny breakable breezy brief bright brilliant brisk broad broken bronze brown bruised bubbly bulky bumpy buoyant burdensome burly bustling busy buttery buzzing cagey calculating callous calm candid canine capable capital capricious carefree careful careless caring cautious cavernous ceaseless celebrated certain changeable charming cheap cheeky cheerful cheery chemical chief childlike chilly chivalrous chubby chunky circular clammy classic classy clean clear clear-cut clever cloistered closed cloudy clueless clumsy cluttered coarse coherent cold colorful colorless colossal colossal combative comfortable common compassionate competent complete complex complicated composed concerned concrete condemned condescending confused conscious considerate constant contemplative content conventional convincing convoluted cooing cooked cool cooperative coordinated corny corrupt costly courageous courteous cowardly crabby crafty craven crazy creamy creative creepy criminal crisp critical crooked crowded cruel crushing cuddly cultivated cultured cumbersome curious curly curved curvy cute cylindrical cynical daffy damaged damaging damp dangerous dapper dapper daring dark darling dashing dazzling dead deadly deadpan deafening dearest debonair decayed deceitful decent decimal decisive decorous deep defeated defective defenseless defensive defiant deficient definite delayed delectable delicate delicious delightful delirious demanding demonic dense dental dependable dependent depraved depressed deranged descriptive deserted despicable detailed determined devilish devoted didactic different difficult digital dilapidated diligent dim diminutive dimpled dimwitted direct direful dirty disagreeable disastrous discreet discrete disfigured disguised disgusted disgusting dishonest disillusioned disloyal dismal dispensable distant distinct distorted distraught distressed disturbed divergent dizzy domineering dopey doting double doubtful downright drab draconian drafty drained dramatic dreary droopy drunk dry dual dull dusty dutiful dynamic dysfunctional eager early earnest earsplitting earthy easy-going economic ecstatic edible educated efficacious efficient elaborate elastic elated elderly electric elegant elementary elfin elite elliptical emaciated embarrassed embellished eminent emotional empty enchanted enchanting encouraging endurable energetic enlightened enormous enraged entertaining enthusiastic entire envious envious equable equatorial erect erratic essential esteemed ethereal ethical euphoric evanescent evasive even evergreen everlasting evil exalted exasperated excellent excitable excited exciting exclusive exemplary exhausted exhilarated exotic expensive experienced expert extensive extra-large extraneous extra-small extroverted exuberant exultant fabulous faded failing faint fair faithful fake fallacious false familiar famous fanatical fancy fantastic faraway far-flung far-off fascinated fast fat fatal fatherly faulty favorable favorite fearful fearless feeble feigned feisty feline female feminine fertile festive fickle fierce filthy fine finicky finished firm first firsthand fitting fixed flagrant flaky flamboyant flashy flat flawed flawless flickering flimsy flippant floppy flowery fluffy fluid flustered fluttering foamy focused fond foolhardy foolish forceful foregoing forgetful forked formal forsaken forthright fortunate fragile fragrant frail frantic frayed free freezing French frequent fresh fretful friendly frightened frightening frigid frilly frivolous frizzy frosty frothy frozen frugal fruitful frustrating full fumbling fumbling functional funny furry furtive fussy future futuristic fuzzy gabby gainful gamy gaping gargantuan garrulous gaseous gaudy generous gentle genuine ghastly giant giddy gifted gigantic giving glamorous glaring gleaming gleeful glib glistening glittering gloomy glorious glossy glum godly golden good good-natured goofy gorgeous graceful gracious grand grandiose grandiose granular grateful grave gray greasy great greedy green gregarious grey grieving grim grimy gripping grizzled groovy gross grotesque grouchy grounded growing growling grown grubby gruesome grumpy guarded guiltless guilty gullible gummy gusty guttural habitual hairy hallowed halting handmade handsome handy hanging hapless happy happy-go-lucky hard hard-to-find harebrained harmful harmless harmonious harsh hasty hateful haunting heady healthy heartbreaking heartfelt hearty heavenly heavy hefty hellish helpful helpless hesitant hidden hideous high highfalutin high-level high-pitched hilarious hissing historical hoarse holistic hollow homeless homely honest honorable honored hopeful horrible horrific hospitable hot huge hulking humble humdrum humiliating humming humongous humorous hungry hurried hurt hurtful hushed husky hypnotic hysterical icky icy ideal ideal idealistic identical idiotic idle idolized ignorant ill illegal ill-fated ill-informed illiterate illustrious imaginary imaginative immaculate immaterial immediate immense imminent impartial impassioned impeccable imperfect imperturbable impish impolite important imported impossible impractical impressionable impressive improbable impure inborn incandescent incomparable incompatible incompetent incomplete inconclusive inconsequential incredible indelible indolent industrious inexpensive inexperienced infamous infantile infatuated inferior infinite informal innate innocent inquisitive insecure insidious insignificant insistent instinctive instructive insubstantial intelligent intentional interesting internal international intrepid intrigued invincible irate ironclad irresponsible irritable irritating itchy jaded jagged jam-packed jaunty jazzy jealous jittery jobless jolly jovial joyful joyous jubilant judicious juicy jumbled jumbo jumpy jumpy junior juvenile kaleidoscopic kaput keen kind kindhearted kindly klutzy knobby knotty knowing knowledgeable kooky kosher labored lackadaisical lacking lame lamentable languid lanky large lasting late laughable lavish lawful lazy leading leafy lean learned left legal legitimate lethal level lewd light lighthearted likable likeable likely limited limp limping linear lined liquid literate little live lively livid living loathsome lone lonely long longing long-term loose lopsided lost loud loutish lovable lovely loving low lowly loyal lucky ludicrous lumbering luminous lumpy lush lustrous luxuriant luxurious lying lyrical macabre macho mad maddening made-up magenta magical magnificent majestic major makeshift male malicious mammoth maniacal marked married marvelous masculine massive material materialistic mature meager mealy mean measly meaty medical mediocre medium meek melancholy mellow melodic melted memorable menacing merciful mere merry messy metallic mighty mild military milky mindless miniature minor minty minute miscreant miserable miserly misguided mistaken misty mixed moaning modern modest moist moldy momentous monstrous monumental moody moral mortified motherly motionless mountainous muddled muddy muffled multicolored mundane mundane murky mushy musty mute muted mysterious naive narrow nasty natural naughty nauseating nautical neat nebulous necessary needless needy negative neglected negligible neighboring neighborly nervous nervous new next nice nice nifty nimble nine nippy nocturnal noiseless noisy nonchalant nondescript nonsensical nonstop normal nostalgic nosy notable noted noteworthy novel noxious numb numberless numerous nutritious nutty oafish obedient obeisant obese oblivious oblong obnoxious obscene obsequious observant obsolete obtainable obvious occasional oceanic odd oddball offbeat offensive official oily old old-fashioned omniscient onerous open opposite optimal optimistic opulent orange orderly ordinary organic original ornate ornery ossified outgoing outlandish outlying outrageous outstanding oval overconfident overcooked overdue overjoyed overlooked overrated overt overwrought painful painstaking palatable pale paltry panicky panoramic parallel parched parsimonious partial passionate pastel pastoral pathetic peaceful penitent peppery perfect perfumed periodic perky permissible perpetual perplexed personal pertinent pesky pessimistic petite petty petty phobic phony physical picayune piercing pink piquant pitiful placid plain plaintive plant plastic plausible playful pleasant pleased pleasing plucky plump plush pointed pointless poised polished polite political pompous poor popular portly posh positive possessive possible potable powerful powerless practical precious premium present present prestigious pretty previous pricey prickly primary prime pristine private prize probable productive profitable profuse proper protective proud prudent psychedelic psychotic public puffy pumped punctual pungent puny pure purple purring pushy pushy putrid puzzled puzzling quack quaint quaint qualified quarrelsome quarterly queasy querulous questionable quick quickest quick-witted quiet quintessential quirky quixotic quixotic quizzical rabid racial radiant ragged rainy rambunctious rampant rapid rare rash raspy ratty raw ready real realistic reasonable rebel recent receptive reckless recondite rectangular red redundant reflecting reflective regal regular reliable relieved remarkable reminiscent remorseful remote repentant repulsive required resolute resonant respectful responsible responsive revolving rewarding rhetorical rich right righteous rightful rigid ringed ripe ritzy roasted robust romantic roomy rosy rotating rotten rotund rough round rowdy royal rubbery ruddy rude rundown runny rural rustic rusty ruthless sable sad safe salty sandy sane sarcastic sardonic sassy satisfied satisfying savory scaly scandalous scant scarce scared scary scattered scented scholarly scientific scintillating scornful scratchy scrawny screeching secondary second-hand secret secretive sedate seemly selective self-assured selfish self-reliant sentimental separate serene serious serpentine several severe shabby shadowy shady shaggy shaky shallow shameful shameless sharp shimmering shiny shivering shocked shocking shoddy short short-term showy shrill shy sick silent silky silly silver similar simple simplistic sincere sinful single six sizzling skeletal skillful skinny sleepy slight slim slimy slippery sloppy slow slushy small smarmy smart smelly smiling smoggy smooth smug snappy snarling sneaky sniveling snobbish snoopy snotty sociable soft soggy solid somber some sophisticated sordid sore sorrowful soulful soupy sour sour Spanish sparkling sparse special specific spectacular speedy spherical spicy spiffy spiky spirited spiritual spiteful splendid spooky spotless spotted spotty spry spurious squalid square squeaky squealing squeamish squiggly stable staid stained staking stale standard standing starchy stark starry statuesque steadfast steady steel steep stereotyped sticky stiff stimulating stingy stormy stout straight strange strict strident striking striped strong studious stunning stunning stupendous stupid sturdy stylish subdued submissive subsequent substantial subtle suburban successful succinct succulent sudden sugary sulky sunny super superb superficial superior supportive supreme sure-footed surprised suspicious svelte swanky sweaty sweet sweltering swift sympathetic symptomatic synonymous taboo tacit tacky talented talkative tall tame tan tangible tangy tart tasteful tasteless tasty tattered taut tawdry tearful tedious teeming teeny teeny-tiny telling temporary tempting tender tense tenuous tepid terrible terrific tested testy thankful therapeutic thick thin thinkable thirsty thorny thorough thoughtful thoughtless threadbare threatening thrifty thundering thunderous tidy tight tightfisted tinted tiny tired tiresome toothsome torn torpid total tough towering tragic trained tranquil trashy traumatic treasured tremendous triangular tricky trifling trite trivial troubled truculent true trusting trustworthy trusty truthful tubby turbulent twin two typical ubiquitous ugliest ugly ultimate ultra unaccountable unarmed unaware unbecoming unbiased uncomfortable uncommon unconscious uncovered understated understood undesirable unequal unequaled uneven unfinished unfit unfolded unfortunate unhappy unhealthy uniform unimportant uninterested unique united unkempt unknown unlawful unlined unlucky unnatural unpleasant unrealistic unripe unruly unselfish unsightly unsteady unsuitable unsung untidy untried untrue unused unusual unwelcome unwieldy unwitting unwritten upbeat uppity upright upset uptight urban usable used used useful useless utilized utopian utter uttermost vacant vacuous vagabond vague vain valid valuable vapid variable various vast velvety venerated vengeful venomous verdant verifiable versed vexed vibrant vicious victorious vigilant vigorous villainous violent violet virtual virtuous visible vital vivacious vivid voiceless volatile voluminous voracious vulgar wacky waggish waiting wakeful wandering wanting warlike warm warmhearted warped wary wasteful watchful waterlogged watery wavy weak wealthy weary webbed wee weekly weepy weighty weird welcome well-documented well-groomed well-informed well-lit well-made well-off well-to-do well-worn wet which whimsical whirlwind whispered whispering white whole wholesale whopping wicked wide wide-eyed wiggly wild willing wilted winding windy winged wiry wise wistful witty wobbly woebegone woeful womanly wonderful wooden woozy wordy workable worldly worn worried worrisome worse worst worthless worthwhile worthy wrathful wretched writhing wrong wry xenophobic yawning yearly yellow yellowish yielding young youthful yummy zany zealous zesty zigzag zippy zonked adverb abnormally absentmindedly accidentally acidly actually adventurously afterwards almost always angrily annually anxiously arrogantly awkwardly badly bashfully beautifully bitterly bleakly blindly blissfully boastfully boldly bravely briefly brightly briskly broadly busily calmly carefully carelessly cautiously certainly cheerfully clearly cleverly closely coaxingly colorfully commonly continually coolly correctly courageously crossly cruelly curiously daily daintily dearly deceivingly deeply defiantly deliberately delightfully diligently dimly doubtfully dreamily easily elegantly energetically enormously enthusiastically equally especially evenly eventually exactly excitedly extremely fairly faithfully famously fatally ferociously fervently fiercely fondly foolishly fortunately frankly frantically freely frenetically frightfully fully furiously generally generously gently gladly gleefully gracefully gratefully greatly greedily happily hastily healthily heavily helpfully helplessly highly honestly hopelessly hourly hungrily immediately innocently inquisitively instantly intensely intently interestingly inwardly irritably jaggedly jealously joshingly jovially joyfully joyously jubilantly judgementally justly keenly kiddingly kindheartedly kindly kissingly knavishly knottily knowingly knowledgeably kookily lazily lightly likely limply lively loftily longingly loosely loudly lovingly loyally madly majestically meaningfully mechanically merrily miserably mockingly monthly mortally mostly mysteriously naturally nearly neatly needily nervously nicely noisily obediently obnoxiously oddly offensively officially often only openly optimistically overconfidently owlishly painfully partially patiently perfectly physically playfully politely poorly positively potentially powerfully promptly properly punctually quaintly quarrelsomely queasily queerly questionably questioningly quicker quickly quietly quirkily quizzically rapidly rarely readily really reassuringly recklessly regularly reluctantly repeatedly reproachfully restfully righteously rightfully rigidly roughly rudely sadly safely scarcely scarily searchingly sedately seemingly seldom selfishly separately seriously shakily sharply sheepishly shrilly shyly silently sleepily slowly smoothly softly solemnly solidly sometimes soon speedily stealthily sternly strictly successfully suddenly surprisingly suspiciously sweetly swiftly sympathetically tenderly tensely terribly thankfully thoroughly thoughtfully tightly tomorrow tremendously triumphantly truly truthfully ultimately unabashedly unaccountably unbearably unethically unexpectedly unfortunately unimpressively unnaturally unnecessarily upbeat upliftingly upright upside-down upward upwardly urgently usefully uselessly usually utterly vacantly vaguely vainly valiantly vastly verbally viciously victoriously violently vivaciously voluntarily warmly weakly wearily wetly wholly wildly willfully wisely woefully wonderfully worriedly wrongly yawningly yearly yearningly yesterday yieldingly youthfully preposition as at but by down for from in into like near next of off on onto out over past plus minus since than to up with aboard about above across after against along around before behind below beneath beside between beyond during except following inside minus onto opposite outside round since through toward under underneath unlike until upon without according to along with alongside among apart from as for atop because of by means of concerning despite except for in addition to in back of in case of in front of in place of in spite of instead of on top of out of regarding throughout till up to via within worth interjection aah ack agreed ah aha ahem alas all right amen argh as if aw ay aye bah blast boo hoo bother boy brr by golly bye cheerio cheers chin up come on crikey curses dear me doggone drat duh easy does it eek egads er exactly fair enough fiddle-dee-dee fiddlesticks fie foo fooey gadzooks gah gangway g'day gee gee whiz geez gesundheit get lost get outta here go on good good golly good job gosh gracious great grr gulp ha ha-ha hah hallelujah harrumph haw hee here hey hmm ho hum hoo hooray hot dog how huh hum humbug hurray huzza I say ick is it ixnay jeez just kidding just a sec just wondering kapish la la-di-dah lo look look here long time lordy man meh mmm most certainly my my my my word nah naw never no no can do nooo not no thanks no way nuts oh oho oh-oh oh no okay okey-dokey om oof ooh oopsey over oy oyez peace pff pew phew pish posh psst ptui quite rah rats ready right right on roger roger that rumble say see ya shame shh shoo shucks sigh sleep tight snap sorry sssh sup ta ta-da ta ta take that tally ho tch thanks there there there time out toodles touche tsk tsk-tsk tut tut-tut ugh uh uh-oh um ur urgh very nice very well voila vroom wah well well done well, well what whatever whee when whoa whoo whoopee whoops whoopsey whew why word wow wuzzup ya yea yeah yech yikes yippee yo yoo-hoo you bet you don't say you know yow yum yummy zap zounds zowie zzz colors White Blue Black Red Green Colorless Multi-color scry_powers "-1", "?", "0", "∞", "*", "+0", "*²", ".5", "+1", "1+*", "1", "1.5", "+2", "2", "2+*", "2.5", "3", "+3", "3.5", "4", "+4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "15", "16", "20", "99" scry_toughness "-1", "+0", "*²", "-0", "?", "0", "*+1", "*", ".5", "+1", "1+*", "1", "1.5", "2+*", "+2", "2", "2.5", "+3", "3", "3.5", "4", "+4", "5", "6", "7-*", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "20", "99" scry_keyword_abilities "Living weapon", "Jump-start", "Basic landcycling", "Commander ninjutsu", "Legendary landwalk", "Nonbasic landwalk", "Totem armor", "Megamorph", "Haunt", "Forecast", "Graft", "Fortify", "Frenzy", "Gravestorm", "Hideaway", "Level Up", "Infect", "Reach", "Rampage", "Phasing", "Multikicker", "Morph", "Provoke", "Modular", "Ninjutsu", "Replicate", "Recover", "Poisonous", "Prowl", "Reinforce", "Persist", "Retrace", "Rebound", "Miracle", "Overload", "Outlast", "Prowess", "Renown", "Myriad", "Shroud", "Trample", "Vigilance", "Shadow", "Storm", "Soulshift", "Splice", "Transmute", "Ripple", "Suspend", "Vanishing", "Transfigure", "Wither", "Undying", "Soulbond", "Unleash", "Ascend", "Assist", "Afterlife", "Companion", "Fabricate", "Embalm", "Escape", "Fuse", "Menace", "Ingest", "Melee", "Improvise", "Mentor", "Partner", "Mutate", "Scavenge", "Tribute", "Surge", "Skulk", "Undaunted", "Riot", "Spectacle", "Forestwalk", "Islandwalk", "Mountainwalk", "Double strike", "Cumulative upkeep", "First strike", "Encore", "Sunburst", "Deathtouch", "Defender", "Foretell", "Amplify", "Affinity", "Bushido", "Convoke", "Bloodthirst", "Absorb", "Aura Swap", "Changeling", "Conspire", "Cascade", "Annihilator", "Battle Cry", "Cipher", "Bestow", "Dash", "Awaken", "Crew", "Aftermath", "Afflict", "Flanking", "Echo", "Fading", "Fear", "Eternalize", "Entwine", "Epic", "Dredge", "Delve", "Evoke", "Exalted", "Evolve", "Extort", "Dethrone", "Exploit", "Devoid", "Emerge", "Escalate", "Flying", "Haste", "Hexproof", "Indestructible", "Intimidate", "Lifelink", "Horsemanship", "Kicker", "Madness", "Hidden agenda", "Swampwalk", "Desertwalk", "Wizardcycling", "Slivercycling", "Cycling", "Landwalk", "Plainswalk", "Champion", "Enchant", "Plainscycling", "Islandcycling", "Swampcycling", "Mountaincycling", "Forestcycling", "Landcycling", "Typecycling", "Split second", "Flash", "Banding", "Augment", "Double agenda", "Partner with", "Hexproof from", "Boast", "Buyback", "Ward", "Demonstrate", "Devour", "Flashback", "Equip", "Reconfigure", "Compleated", "Daybound", "Nightbound", "Decayed", "Disturb", "Training", "Cleave", "Intensity", "Blitz", "Casualty", "Friends forever", "Protection", "Offering", "Enlist", "Read Ahead", "Squad", "Ravenous", "More Than Meets the Eye", "Living metal", "Unearth", "Prototype" scry_keyword_actions "Meld", "Bolster", "Clash", "Fateseal", "Manifest", "Monstrosity", "Populate", "Proliferate", "Scry", "Support", "Detain", "Explore", "Fight", "Amass", "Adapt", "Assemble", "Abandon", "Activate", "Attach", "Seek", "Cast", "Counter", "Create", "Destroy", "Discard", "Double", "Exchange", "Exile", "Investigate", "Play", "Regenerate", "Reveal", "Sacrifice", "Set in motion", "Shuffle", "Tap", "Untap", "Vote", "Transform", "Surveil", "Goad", "Planeswalk", "Mill", "Learn", "Conjure", "Exert", "Connive", "Venture into the dungeon", "Convert", "Open an Attraction", "Roll to Visit Your Attractions" scry_ability_words "Battalion", "Bloodrush", "Channel", "Chroma", "Cohort", "Constellation", "Converge", "Delirium", "Domain", "Fateful hour", "Ferocious", "Formidable", "Grandeur", "Hellbent", "Heroic", "Imprint", "Inspired", "Join forces", "Kinship", "Landfall", "Lieutenant", "Metalcraft", "Morbid", "Parley", "Radiance", "Raid", "Rally", "Spell mastery", "Strive", "Sweep", "Tempting offer", "Threshold", "Will of the council", "Adamant", "Addendum", "Council's dilemma", "Eminence", "Enrage", "Hero's Reward", "Kinfall", "Landship", "Legacy", "Revolt", "Underdog", "Undergrowth", "Magecraft", "Teamwork", "Pack tactics", "Coven", "Alliance
Ann2020/distilbert-base-uncased-finetuned-ner
[ "pytorch", "tensorboard", "distilbert", "token-classification", "dataset:conll2003", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - conversational --- # Peter GriffinV2 DialoGPT Model
Ann2020/model-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - autotrain - vision - image-classification datasets: - molsen/autotrain-data-genderage widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace co2_eq_emissions: emissions: 8.240977060159542 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2709480568 - CO2 Emissions (in grams): 8.2410 ## Validation Metrics - Loss: 1.277 - Accuracy: 0.560 - Macro F1: 0.560 - Micro F1: 0.560 - Weighted F1: 0.560 - Macro Precision: 0.570 - Micro Precision: 0.560 - Weighted Precision: 0.570 - Macro Recall: 0.560 - Micro Recall: 0.560 - Weighted Recall: 0.560
Ann2020/rubert-base-cased-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: Rschmaelzle/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Ann2020/rubert-base-cased-sentence-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 567.50 +/- 187.47 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga Augcos -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga Augcos -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga Augcos ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Ann2020/rubert-base-cased-sentence-finetuned-ner_tags
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('toinsson/sd-class-butterflies-32') image = pipeline().images[0] image ```
Anonymous0230/model_name
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - vi license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 model-index: - name: HuyenNguyen results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # HuyenNguyen This model is a fine-tuned version of [Huyen2310/FPT-S15000](https://huggingface.co/Huyen2310/FPT-S15000) on the Common Voice 11.0 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 450 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
AnonymousSub/AR_EManuals-BERT
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - BeamRiderNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: C51 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: BeamRiderNoFrameskip-v4 type: BeamRiderNoFrameskip-v4 metrics: - type: mean_reward value: 5873.40 +/- 1897.78 name: mean_reward verified: false --- # (CleanRL) **C51** Agent Playing **BeamRiderNoFrameskip-v4** This is a trained model of a C51 agent playing BeamRiderNoFrameskip-v4. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/c51_atari_jax.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[c51_atari_jax]" python -m cleanrl_utils.enjoy --exp-name c51_atari_jax --env-id BeamRiderNoFrameskip-v4 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/kinalmehta/BeamRiderNoFrameskip-v4-c51_atari_jax-seed1/raw/main/c51_atari_jax.py curl -OL https://huggingface.co/kinalmehta/BeamRiderNoFrameskip-v4-c51_atari_jax-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/kinalmehta/BeamRiderNoFrameskip-v4-c51_atari_jax-seed1/raw/main/poetry.lock poetry install --all-extras python c51_atari_jax.py --save-model --upload-model --hf-entity kinalmehta --env-id BeamRiderNoFrameskip-v4 ``` # Hyperparameters ```python {'batch_size': 32, 'buffer_size': 1000000, 'capture_video': False, 'end_e': 0.01, 'env_id': 'BeamRiderNoFrameskip-v4', 'exp_name': 'c51_atari_jax', 'exploration_fraction': 0.1, 'gamma': 0.99, 'hf_entity': 'kinalmehta', 'learning_rate': 0.00025, 'learning_starts': 80000, 'n_atoms': 51, 'save_model': True, 'seed': 1, 'start_e': 1, 'target_network_frequency': 10000, 'total_timesteps': 10000000, 'track': False, 'train_frequency': 4, 'upload_model': True, 'v_max': 10, 'v_min': -10, 'wandb_entity': None, 'wandb_project_name': 'cleanRL'} ```
AnonymousSub/AR_bert-base-uncased
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
This model classifies sentiment of the scientific text based on it's context, i.e text from scientific journals to negative (n), positive (p) and neutrals (o).
AnonymousSub/AR_rule_based_hier_triplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: distilbart-podimo-data-eval-2-2e results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbart-podimo-data-eval-2-2e This model is a fine-tuned version of [sshleifer/distilbart-cnn-12-6](https://huggingface.co/sshleifer/distilbart-cnn-12-6) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 3.7374 - Rouge1: 32.9705 - Rouge2: 6.9494 - Rougel: 17.922 - Rougelsum: 29.4629 - Gen Len: 137.5363 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 64 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:|:--------:| | 4.1649 | 0.98 | 44 | 3.8138 | 32.12 | 6.544 | 17.5999 | 28.8314 | 136.4553 | | 3.6772 | 1.98 | 88 | 3.7374 | 32.9705 | 6.9494 | 17.922 | 29.4629 | 137.5363 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.11.0 - Datasets 2.2.1 - Tokenizers 0.12.1
AnonymousSub/AR_rule_based_roberta_bert_quadruplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 622.50 +/- 139.04 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga keshan -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga keshan -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga keshan ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 3000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
AnonymousSub/AR_rule_based_twostagetriplet_hier_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: mit --- ### egorey on Stable Diffusion This is the `<gorey>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<gorey> 0](https://huggingface.co/sd-concepts-library/egorey/resolve/main/concept_images/0.jpeg) ![<gorey> 1](https://huggingface.co/sd-concepts-library/egorey/resolve/main/concept_images/3.jpeg) ![<gorey> 2](https://huggingface.co/sd-concepts-library/egorey/resolve/main/concept_images/2.jpeg) ![<gorey> 3](https://huggingface.co/sd-concepts-library/egorey/resolve/main/concept_images/1.jpeg)