modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
bert-base-multilingual-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "multilingual", "af", "sq", "ar", "an", "hy", "ast", "az", "ba", "eu", "bar", "be", "bn", "inc", "bs", "br", "bg", "my", "ca", "ceb", "ce", "zh", "cv", "hr", "cs", "da", "nl", "en", "et", "fi", "fr", "gl", "ka", "de", "el", "gu", "ht", "he", "hi", "hu", "is", "io", "id", "ga", "it", "ja", "jv", "kn", "kk", "ky", "ko", "la", "lv", "lt", "roa", "nds", "lm", "mk", "mg", "ms", "ml", "mr", "mn", "min", "ne", "new", "nb", "nn", "oc", "fa", "pms", "pl", "pt", "pa", "ro", "ru", "sco", "sr", "scn", "sk", "sl", "aze", "es", "su", "sw", "sv", "tl", "tg", "th", "ta", "tt", "te", "tr", "uk", "ud", "uz", "vi", "vo", "war", "cy", "fry", "pnb", "yo", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4,749,504
2022-12-10T11:20:30Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 259.18 +/- 20.19 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
bert-base-uncased
[ "pytorch", "tf", "jax", "rust", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
59,663,489
2022-12-10T11:36:29Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: HeineKayn/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
bert-large-cased-whole-word-masking-finetuned-squad
[ "pytorch", "tf", "jax", "rust", "safetensors", "bert", "question-answering", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8,214
2022-12-10T11:42:22Z
--- license: mit --- ### ihylc on Stable Diffusion This is the `<ihylc>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<ihylc> 0](https://huggingface.co/sd-concepts-library/ihylc/resolve/main/concept_images/1.jpeg) ![<ihylc> 1](https://huggingface.co/sd-concepts-library/ihylc/resolve/main/concept_images/0.jpeg) ![<ihylc> 2](https://huggingface.co/sd-concepts-library/ihylc/resolve/main/concept_images/2.jpeg)
bert-large-cased-whole-word-masking
[ "pytorch", "tf", "jax", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,316
2022-12-10T11:43:08Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 253.24 +/- 22.99 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
bert-large-uncased-whole-word-masking-finetuned-squad
[ "pytorch", "tf", "jax", "safetensors", "bert", "question-answering", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
480,510
null
Access to model artinzareie/RikkaDiffusion is restricted and you are not in the authorized list. Visit https://huggingface.co/artinzareie/RikkaDiffusion to ask for access.
camembert-base
[ "pytorch", "tf", "safetensors", "camembert", "fill-mask", "fr", "dataset:oscar", "arxiv:1911.03894", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "CamembertForMaskedLM" ], "model_type": "camembert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,440,898
2022-12-10T12:06:59Z
--- license: bigscience-bloom-rail-1.0 tags: - generated_from_trainer model-index: - name: bloom-560m-finetuned-the-stack-prolog results: [] widget: - text: '% Define un hecho que indica que "hello" es un saludo saludo("hello"). % Define una regla que indica que "world" es un objeto objeto("world"). % Define una regla que combina el saludo y el objeto para producir la salida "Hola mundo" hola_mundo :-' --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bloom-560m-finetuned-the-stack-prolog This model is a fine-tuned version of [bigscience/bloom-560m](https://huggingface.co/bigscience/bloom-560m) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2433 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.2334 | 0.2 | 200 | 0.9993 | | 0.9174 | 0.4 | 400 | 0.7460 | | 0.7892 | 0.6 | 600 | 0.6046 | | 0.6805 | 0.8 | 800 | 0.4964 | | 0.5898 | 0.99 | 1000 | 0.4283 | | 0.411 | 1.19 | 1200 | 0.3721 | | 0.3705 | 1.39 | 1400 | 0.3182 | | 0.3516 | 1.59 | 1600 | 0.2795 | | 0.3298 | 1.79 | 1800 | 0.2528 | | 0.2721 | 1.99 | 2000 | 0.2433 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0+cu117 - Datasets 2.5.1 - Tokenizers 0.13.0
ctrl
[ "pytorch", "tf", "ctrl", "en", "arxiv:1909.05858", "arxiv:1910.09700", "transformers", "license:bsd-3-clause", "has_space" ]
null
{ "architectures": null, "model_type": "ctrl", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17,007
2022-12-10T12:13:22Z
--- license: cc-by-4.0 metrics: - bleu4 - meteor - rouge-l - bertscore - moverscore language: en datasets: - lmqg/qg_squad pipeline_tag: text2text-generation tags: - answer extraction widget: - text: "<hl> Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records. <hl> Her performance in the film received praise from critics, and she garnered several nominations for her portrayal of James, including a Satellite Award nomination for Best Supporting Actress, and a NAACP Image Award nomination for Outstanding Supporting Actress." example_title: "Answering Extraction Example 1" - text: "Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records. <hl> Her performance in the film received praise from critics, and she garnered several nominations for her portrayal of James, including a Satellite Award nomination for Best Supporting Actress, and a NAACP Image Award nomination for Outstanding Supporting Actress. <hl>" example_title: "Answering Extraction Example 2" model-index: - name: lmqg/bart-large-squad-ae results: - task: name: Text2text Generation type: text2text-generation dataset: name: lmqg/qg_squad type: default args: default metrics: - name: BLEU4 (Answer Extraction) type: bleu4_answer_extraction value: 58.61 - name: ROUGE-L (Answer Extraction) type: rouge_l_answer_extraction value: 68.96 - name: METEOR (Answer Extraction) type: meteor_answer_extraction value: 41.89 - name: BERTScore (Answer Extraction) type: bertscore_answer_extraction value: 91.93 - name: MoverScore (Answer Extraction) type: moverscore_answer_extraction value: 82.41 - name: AnswerF1Score (Answer Extraction) type: answer_f1_score__answer_extraction value: 69.67 - name: AnswerExactMatch (Answer Extraction) type: answer_exact_match_answer_extraction value: 58.95 --- # Model Card of `lmqg/bart-large-squad-ae` This model is fine-tuned version of [facebook/bart-large](https://huggingface.co/facebook/bart-large) for answer extraction on the [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) (dataset_name: default) via [`lmqg`](https://github.com/asahi417/lm-question-generation). ### Overview - **Language model:** [facebook/bart-large](https://huggingface.co/facebook/bart-large) - **Language:** en - **Training data:** [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) (default) - **Online Demo:** [https://autoqg.net/](https://autoqg.net/) - **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation) - **Paper:** [https://arxiv.org/abs/2210.03992](https://arxiv.org/abs/2210.03992) ### Usage - With [`lmqg`](https://github.com/asahi417/lm-question-generation#lmqg-language-model-for-question-generation-) ```python from lmqg import TransformersQG # initialize model model = TransformersQG(language="en", model="lmqg/bart-large-squad-ae") # model prediction answers = model.generate_a("William Turner was an English painter who specialised in watercolour landscapes") ``` - With `transformers` ```python from transformers import pipeline pipe = pipeline("text2text-generation", "lmqg/bart-large-squad-ae") output = pipe("<hl> Beyonce further expanded her acting career, starring as blues singer Etta James in the 2008 musical biopic, Cadillac Records. <hl> Her performance in the film received praise from critics, and she garnered several nominations for her portrayal of James, including a Satellite Award nomination for Best Supporting Actress, and a NAACP Image Award nomination for Outstanding Supporting Actress.") ``` ## Evaluation - ***Metric (Answer Extraction)***: [raw metric file](https://huggingface.co/lmqg/bart-large-squad-ae/raw/main/eval/metric.first.answer.paragraph_sentence.answer.lmqg_qg_squad.default.json) | | Score | Type | Dataset | |:-----------------|--------:|:--------|:---------------------------------------------------------------| | AnswerExactMatch | 58.95 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | AnswerF1Score | 69.67 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | BERTScore | 91.93 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_1 | 65.82 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_2 | 63.21 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_3 | 60.73 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | Bleu_4 | 58.61 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | METEOR | 41.89 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | MoverScore | 82.41 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | | ROUGE_L | 68.96 | default | [lmqg/qg_squad](https://huggingface.co/datasets/lmqg/qg_squad) | ## Training hyperparameters The following hyperparameters were used during fine-tuning: - dataset_path: lmqg/qg_squad - dataset_name: default - input_types: ['paragraph_sentence'] - output_types: ['answer'] - prefix_types: None - model: facebook/bart-large - max_length: 512 - max_length_output: 32 - epoch: 5 - batch: 32 - lr: 5e-05 - fp16: False - random_seed: 1 - gradient_accumulation_steps: 2 - label_smoothing: 0.15 The full configuration can be found at [fine-tuning config file](https://huggingface.co/lmqg/bart-large-squad-ae/raw/main/trainer_config.json). ## Citation ``` @inproceedings{ushio-etal-2022-generative, title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration", author = "Ushio, Asahi and Alva-Manchego, Fernando and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", month = dec, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
distilbert-base-german-cased
[ "pytorch", "safetensors", "distilbert", "fill-mask", "de", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
43,667
2022-12-10T12:21:18Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### messy_sketch_art_style Dreambooth model trained by apurik-parv with [Shivamshri rao's DreamBooth implementation] Instance prompt:**meartsty** As the name implies the the model is trained on messy art style sketch /doodle images for 50000 steps. Simple prompts can replicate faithfully. complicated and contradicting prompts will add elements of noise to the image. Feel free to experiment with it.
distilbert-base-uncased-finetuned-sst-2-english
[ "pytorch", "tf", "rust", "safetensors", "distilbert", "text-classification", "en", "dataset:sst2", "dataset:glue", "arxiv:1910.01108", "doi:10.57967/hf/0181", "transformers", "license:apache-2.0", "model-index", "has_space" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3,060,704
2022-12-10T12:43:19Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 512 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 187841 with parameters: ``` {'batch_size': 2, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `__main__.BregmanRankingLoss` Parameters of the fit()-Method: ``` { "epochs": 4, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 3e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 5000, "warmup_steps": 75137, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 4096, 'do_lower_case': False}) with Transformer model: LongformerModel (1): Pooling({'word_embedding_dimension': 512, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
distilroberta-base
[ "pytorch", "tf", "jax", "rust", "safetensors", "roberta", "fill-mask", "en", "dataset:openwebtext", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3,342,240
2022-12-10T12:47:10Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 292.51 +/- 14.48 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
gpt2-medium
[ "pytorch", "tf", "jax", "rust", "safetensors", "gpt2", "text-generation", "en", "arxiv:1910.09700", "transformers", "license:mit", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
759,601
2022-12-10T13:09:48Z
--- title: SpeakToChatGPT emoji: 📊 colorFrom: blue colorTo: blue sdk: gradio sdk_version: 3.12.0 app_file: app.py pinned: false duplicated_from: yizhangliu/chatGPT --- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
Pinwheel/wav2vec2-large-xls-r-300m-hi-v3
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-10T22:10:52Z
--- language: - pt license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 model-index: - name: Whisper Tiny Portuguese - Prince Canuma results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Tiny Portuguese - Prince Canuma This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on the Common Voice 11.0 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 64 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
Pinwheel/wav2vec2-large-xls-r-300m-tr-colab
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-12-10T22:16:53Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3-nice results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="osanseviero/q-Taxi-v3-nice", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
AbidHasan95/movieHunt2
[ "pytorch", "distilbert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2022-12-10T23:03:57Z
--- tags: - generated_from_trainer datasets: - samsum model-index: - name: pegasus-samsum-5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-samsum-5 This model is a fine-tuned version of [google/pegasus-cnn_dailymail](https://huggingface.co/google/pegasus-cnn_dailymail) on the samsum dataset. It achieves the following results on the evaluation set: - Loss: 1.3386 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.6957 | 0.54 | 500 | 1.4857 | | 1.4033 | 1.09 | 1000 | 1.4117 | | 1.497 | 1.63 | 1500 | 1.3742 | | 1.4132 | 2.17 | 2000 | 1.3582 | | 1.3858 | 2.72 | 2500 | 1.3482 | | 1.2908 | 3.26 | 3000 | 1.3477 | | 1.2357 | 3.8 | 3500 | 1.3386 | | 1.2499 | 4.35 | 4000 | 1.3419 | | 1.2349 | 4.89 | 4500 | 1.3386 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
Adinda/Adinda
[ "license:artistic-2.0" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-11T06:48:37Z
--- language: - en thumbnail: "https://s3.amazonaws.com/moonup/production/uploads/1670742434498-633a20a88f27255b6b56290b.png" license: creativeml-openrail-m tags: - stable-diffusion - text-to-image --- # Chinese Digital Art Diffusion **Trigger Words: CNDigitalArt Style** This is a fine-tuned Stable Diffusion model trained on some of the **Chinese Digital Arts** style that usually uses on Chinese Interactive Reading (Visual Novel) platforms such as **Orange Light** [66rpg.com](https://66rpg.com) or **NetEase Interactive Reading Platform** [avg.163.com](https://avg.163.com/). _if you don't know what that is, don't worry, it's just one of those really big thing in China that majority of Westerners had no clue about._ ![Trained.png](https://s3.amazonaws.com/moonup/production/uploads/1670748193502-633a20a88f27255b6b56290b.png) Use the tokens **_CNDigitalArt Style_** in your prompts to test and experiment it yourself. **EXAMPLES:** _These results were tested on the 2000 Steps model [ **CNDigitalArt_2000.ckpt**](https://huggingface.co/CultivatorX/Chinese-Digital-Art/blob/main/CNDigitalArt_2000.ckpt). I just did 20 batches of -1 seeds in random for each of the prompt (most of which isn't that good) but it does have some really good ones. Prompt: **a portrait of Megan Fox in CNDigitalArt Style** Negative prompt: _lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, two faces, two heads_ Steps: 20, Sampler: Euler, CFG scale: 7, Seed: 593563256, Face restoration: GFPGAN, Size: 512x512, Model hash: 2258c119 ![Scarlett Fox.png](https://s3.amazonaws.com/moonup/production/uploads/1670742434498-633a20a88f27255b6b56290b.png) Prompt: **a portrait of Scarlett Johansson in CNDigitalArt Style** Negative prompt: lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, two faces, two heads Steps: 20, Sampler: Euler, CFG scale: 7, Seed: 4272335413, Face restoration: GFPGAN, Size: 512x512, Model hash: 2258c119 ===================================================================== ===================================================================== Prompt: **a portrait of Emma Watson in CNDigitalArt Style** Negative prompt: lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, two faces, two heads Steps: 20, Sampler: Euler, CFG scale: 7, Seed: 3813059825, Face restoration: GFPGAN, Size: 512x512, Model hash: 2258c119 ![Emma Zendeya.png](https://s3.amazonaws.com/moonup/production/uploads/1670742782225-633a20a88f27255b6b56290b.png) Prompt: **a portrait of Zendaya in CNDigitalArt Style** Negative prompt: lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, two faces, two heads Steps: 20, Sampler: Euler, CFG scale: 7, Seed: 962052606, Face restoration: GFPGAN, Size: 512x512, Model hash: 2258c119
Aimendo/Triage
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: other --- Carpet Cleaning Plano TX https://carpetcleaningplanotx.com/ ‪(469) 444-1903‬ At Rug Cleaning Plano in TX we likewise have a truck mounted cover cleaning framework. These versatile vehicles have a force to be reckoned with of hardware. They generally have these on them and they can finish any occupation properly. Whether it is a little home, an enormous house or a gigantic modern intricate, the undertaking is rarely too large or intense.
Aimendo/autonlp-triage-35248482
[ "pytorch", "bert", "text-classification", "en", "dataset:Aimendo/autonlp-data-triage", "transformers", "autonlp", "co2_eq_emissions" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- license: other --- Carpet Stain Removal Plano TX https://carpetcleaningplanotx.com/carpet-stain-removal.html ‪(469) 444-1903‬ Carpet Cleaning Plano in Texas is the company of choice for the majority of customers when it comes to stain removal.We have the best-trained staff and professional technology.We will get rid of even the worst stain.That is if it comes from your upholstery, fabrics, curtains, and carpets.Try us out today, and you'll see why the majority of people prefer us to everyone else.
Akashpb13/Central_kurdish_xlsr
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "ckb", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "model_for_talk", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: other --- Upholstery Cleaning Fort Worth TX https://txfortworthcarpetcleaning.com/upholstery-cleaning.html (817) 523-1237 When you sit on your upholstery, you inhale allergens, dirt, and dust that are trapped in its fibers.Therefore, if you want to ensure the safety of your upholstery—especially if you have children or pets—you need to hire experts in carpet cleaning for upholstery in Worth, Texas.We have the best upholstery cleaners who will come to your house and do an excellent job of cleaning it.Understanding the various fibers of your furniture is important to our technicians because it helps them choose effective and safe cleaning methods.When you hire us, we promise to give you a lot of attention and care, and we won't start cleaning your upholstery until we make sure the products we use are safe for the kind of fabric it is made of.
Akashpb13/xlsr_maltese_wav2vec2
[ "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "mt", "dataset:common_voice", "transformers", "audio", "speech", "xlsr-fine-tuning-week", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: other --- Rug Cleaning Mesquite TX http://mesquitecarpetcleaningtx.com/rug-cleaning.html (469) 213-8132 Carpet and area rug manufacturers recommend using the free hot water extraction system from Our Rug Cleaning.Carpet Cleaning Mesquite TX can also clean some area rugs at a lower temperature, depending on how many fibers they have. These rugs need to be cleaned with cool water routines.Using a high-controlled cleaning process and a deposit-free cleaning result, we remove all dirt, sand, coarseness, and grime from the area rugs.
Akira-Yana/distilbert-base-uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-11T07:58:17Z
--- language: - en license: creativeml-openrail-m tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers inference: true --- # Anything V3 Welcome to Anything V3 - a latent diffusion model for weebs. This model is intended to produce high-quality, highly detailed anime style with just a few prompts. Like other anime-style Stable Diffusion models, it also supports danbooru tags to generate images. e.g. **_1girl, white hair, golden eyes, beautiful eyes, detail, flower meadow, cumulonimbus clouds, lighting, detailed sky, garden_** ## Gradio We support a [Gradio](https://github.com/gradio-app/gradio) Web UI to run Anything-V3.0: [Open in Spaces](https://huggingface.co/spaces/akhaliq/anything-v3.0) ## 🧨 Diffusers This model can be used just like any other Stable Diffusion model. For more information, please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion). You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX](). ```python from diffusers import StableDiffusionPipeline import torch model_id = "Linaqruf/anything-v3.0" branch_name= "diffusers" pipe = StableDiffusionPipeline.from_pretrained(model_id, revision=branch_name, torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "pikachu" image = pipe(prompt).images[0] image.save("./pikachu.png") ``` ## Examples Below are some examples of images generated using this model: **Anime Girl:** ![Anime Girl](https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/1girl.png) ``` 1girl, brown hair, green eyes, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden Steps: 50, Sampler: DDIM, CFG scale: 12 ``` **Anime Boy:** ![Anime Boy](https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/1boy.png) ``` 1boy, medium hair, blonde hair, blue eyes, bishounen, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden Steps: 50, Sampler: DDIM, CFG scale: 12 ``` **Scenery:** ![Scenery](https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/scenery.png) ``` scenery, shibuya tokyo, post-apocalypse, ruins, rust, sky, skyscraper, abandoned, blue sky, broken window, building, cloud, crane machine, outdoors, overgrown, pillar, sunset Steps: 50, Sampler: DDIM, CFG scale: 12 ``` ## License This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
Akiva/Joke
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: other --- Upholstery Cleaning Mesquite TX http://mesquitecarpetcleaningtx.com/upholstery-cleaning.html (469) 213-8132 We will either dry clean the upholstery or stream steam clean it, depending on the fabric.To bring your upholstery back to life, we use a highly specialized upholstery cleaning tool, a protective 2 inch cover process, and a buildup-free cleaning solution.
AkshatSurolia/DeiT-FaceMask-Finetuned
[ "pytorch", "deit", "image-classification", "dataset:Face-Mask18K", "transformers", "license:apache-2.0", "autotrain_compatible" ]
image-classification
{ "architectures": [ "DeiTForImageClassification" ], "model_type": "deit", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
46
2022-12-11T08:12:22Z
--- license: other --- Green Carpet Cleaning Garland http://garlandcarpetcleaner.com/ (972) 256-8544 One of methods we follow at cover cleaning is "Steam Cleaning Administration" that depends on utilizing minimal high temp water and more steam, centering steam - which infiltrating into profound on spots and stain to dissolve every one of them even the hardest ones and kill all poisons from your rug. Then, at that point, the job of our compelling green items starts to clear this large number of components, returning your floor covering shimmered and bright. At last, we utilize our excellent dry machines, so your rug will be full dry inside no time. We have specific floor covering steam cleaners, so they know how to follow the high amazing skill simultaneously, safeguarding your rug from any harms.
AkshayDev/BERT_Fine_Tuning
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: other --- Richardson TX Carpet Cleaning https://carpetcleaning-richardson.com/ (972) 454-9815 Pets are outlandish, and generally they are tomfoolery, and that is the explanation a large portion of us keep them. Notwithstanding, usually now and again they wreck in the house and right on the costly rug or carpet. A specialist from Richardson Texas Pet Stain Cleaning prescribes that it's fundamental to have the stain eliminated right away and inappropriate or lacking pet stain evacuation can set the color for all time and any further stain can harm your carpet completely or significantly more peeing can cause the scent that appears never to disappear.
AkshaySg/GrammarCorrection
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: other --- Carpet Stain Removal Richardson TX https://carpetcleaning-richardson.com/carpet-stain-removal.html (972) 454-9815 One of the reasons our carpet stain cleaning is so popular with customers is that it is eco-friendly.Our products are safe for the home, pets, and children.We are able to quickly clean tough stains that you believe are permanent and cannot be removed from your carpet.You will quickly observe the disappearance of what you thought was a stain that would not go away.
AkshaySg/LanguageIdentification
[ "multilingual", "dataset:VoxLingua107", "LID", "spoken language recognition", "license:apache-2.0" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: convnext-tiny-224-finetuned-eurosat-att-auto results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9506172839506173 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # convnext-tiny-224-finetuned-eurosat-att-auto This model is a fine-tuned version of [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.5076 - Accuracy: 0.9506 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.5583 | 0.97 | 23 | 1.6008 | 0.7160 | | 1.2953 | 1.97 | 46 | 1.2957 | 0.7531 | | 0.9488 | 2.97 | 69 | 1.0720 | 0.8148 | | 0.7036 | 3.97 | 92 | 0.8965 | 0.8642 | | 0.5446 | 4.97 | 115 | 0.7574 | 0.9383 | | 0.4113 | 5.97 | 138 | 0.6522 | 0.9383 | | 0.2259 | 6.97 | 161 | 0.5720 | 0.9383 | | 0.1863 | 7.97 | 184 | 0.5076 | 0.9506 | | 0.1443 | 8.97 | 207 | 0.4795 | 0.9383 | | 0.1289 | 9.97 | 230 | 0.4685 | 0.9383 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
AlanDev/DallEMiniButBetter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: other --- Dryer Vent Cleaning Richardson TX https://carpetcleaning-richardson.com/dryer-vent-cleaning.html (972) 454-9815 Additionally, if your vents are clogged, we can assist you in preventing dryer fires.If your clothes get too hot in your dryer or if it is too hot, this means that the hot air vents are blocked.When we remove the accumulated lint from the vents, we will be able to resolve this issue quickly.When customers need their dryers reconditioned or all of the lint that has built up in their vents removed, our skilled team is there to help.
AlanDev/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: other --- Coppell Carpet Cleaning https://coppellcarpetcleaning.com/ (972) 914-8246 Cover Green Cleaners utilizes the most complicated and better strategies than play out the entirety of your home's cleaning. Our clients comment how satisfied they are that we just utilize material or cleaning items that are alright for their kids, pets and other relatives. They generally value the way that we volunteer to make their homes completely safe.
Aleksandar/distilbert-srb-ner
[ "pytorch", "distilbert", "token-classification", "sr", "dataset:wikiann", "transformers", "generated_from_trainer", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - generated_from_trainer datasets: - imagefolder model-index: - name: convnext-tiny-224-finetuned-eurosat-vitconfig-test results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # convnext-tiny-224-finetuned-eurosat-vitconfig-test This model is a fine-tuned version of [](https://huggingface.co/) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 2 ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
Aleksandar/electra-srb-oscar
[ "pytorch", "electra", "fill-mask", "transformers", "generated_from_trainer", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "ElectraForMaskedLM" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- language: - zh inference: parameters: top_p: 0.9 max_new_tokens: 128 num_return_sequences: 3 do_sample: true repetition_penalty: 1.1 license: apache-2.0 tags: - generate - gpt2 widget: - 北京是中国的 - 西湖的景色 --- # Wenzhong2.0-GPT2-110M-BertTokenizer-chinese - Github: [Fengshenbang-LM](https://github.com/IDEA-CCNL/Fengshenbang-LM) - Docs: [Fengshenbang-Docs](https://fengshenbang-doc.readthedocs.io/) ## 简介 Brief Introduction 善于处理NLG任务,中文版的GPT2-Small。基于BertTokenizer,实现字级别token,更便于受控文本生成。 Focused on handling NLG tasks, Chinese GPT2-Small. ## 模型分类 Model Taxonomy | 需求 Demand | 任务 Task | 系列 Series | 模型 Model | 参数 Parameter | 额外 Extra | | :----: | :----: | :----: | :----: | :----: | :----: | | 通用 General | 自然语言生成 NLG | 闻仲 Wenzhong | GPT2 | 110M | 中文 Chinese | ## 模型信息 Model Information 类似于Wenzhong2.0-GPT2-3.5B-chinese,我们实现了一个small版本的12层的Wenzhong2.0-GPT2-110M-BertTokenizer-chinese,并在悟道(300G版本)上面进行预训练。本次开源别于之前开源的闻仲-GPT2系列,主要在于将BPE的分词换成了BertTokenzier的字级别分词。 Similar to Wenzhong2.0-GPT2-3.5B-chinese, we implement a small size Wenzhong2.0-GPT2-110M-BertTokenizer-chinese with 12 layers, which is pre-trained on Wudao Corpus (300G version).This open source version is different from the previous open source Wenzhong-GPT2 series, mainly because the word segmentation of BPE is replaced by the word level word segmentation of BertTokenzier. ## 使用 Usage ### 加载模型 Loading Models ```python from transformers import BertTokenizer,GPT2LMHeadModel hf_model_path = 'IDEA-CCNL/Wenzhong2.0-GPT2-110M-BertTokenizer-chinese' tokenizer = BertTokenizer.from_pretrained(hf_model_path) model = GPT2LMHeadModel.from_pretrained(hf_model_path) ``` ### 使用示例 Usage Examples 这里需要提一点,GPT在训练的时候是没有添加special_tokens的,BertTokenizer会默认补充special_tokens,所以在tokenzier的时候需要将add_special_tokens设置为false,这样生产效果会更好。 ```python def generate_word_level(input_text,n_return=5,max_length=128,top_p=0.9): inputs = tokenizer(input_text,return_tensors='pt',add_special_tokens=False).to(model.device) gen = model.generate( inputs=inputs['input_ids'], max_length=max_length, do_sample=True, top_p=top_p, eos_token_id=21133, pad_token_id=0, num_return_sequences=n_return) sentences = tokenizer.batch_decode(gen) for idx,sentence in enumerate(sentences): print(f'sentence {idx}: {sentence}') print('*'*20) return gen outputs = generate_word_level('西湖的景色',n_return=5,max_length=128) ``` ## 引用 Citation 如果您在您的工作中使用了我们的模型,可以引用我们的[论文](https://arxiv.org/abs/2209.02970): If you are using the resource for your work, please cite the our [paper](https://arxiv.org/abs/2209.02970): ```text @article{fengshenbang, author = {Jiaxing Zhang and Ruyi Gan and Junjie Wang and Yuxiang Zhang and Lin Zhang and Ping Yang and Xinyu Gao and Ziwei Wu and Xiaoqun Dong and Junqing He and Jianheng Zhuo and Qi Yang and Yongfeng Huang and Xiayu Li and Yanghan Wu and Junyu Lu and Xinyu Zhu and Weifeng Chen and Ting Han and Kunhao Pan and Rui Wang and Hao Wang and Xiaojun Wu and Zhongshen Zeng and Chongpei Chen}, title = {Fengshenbang 1.0: Being the Foundation of Chinese Cognitive Intelligence}, journal = {CoRR}, volume = {abs/2209.02970}, year = {2022} } ``` 也可以引用我们的[网站](https://github.com/IDEA-CCNL/Fengshenbang-LM/): You can also cite our [website](https://github.com/IDEA-CCNL/Fengshenbang-LM/): ```text @misc{Fengshenbang-LM, title={Fengshenbang-LM}, author={IDEA-CCNL}, year={2021}, howpublished={\url{https://github.com/IDEA-CCNL/Fengshenbang-LM}}, } ```
Aleksandra/herbert-base-cased-finetuned-squad
[ "pytorch", "tensorboard", "bert", "question-answering", "transformers", "generated_from_trainer", "license:cc-by-4.0", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- language: - sl license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Small Slovenian results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 sl type: mozilla-foundation/common_voice_11_0 config: sl split: test args: sl metrics: - name: Wer type: wer value: 26.588921282798832 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Slovenian This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the mozilla-foundation/common_voice_11_0 sl dataset. It achieves the following results on the evaluation set: - Loss: 0.4625 - Wer: 26.5889 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant_with_warmup - lr_scheduler_warmup_steps: 50 - training_steps: 1000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0027 | 13.01 | 1000 | 0.4625 | 26.5889 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 2.0.0.dev20221210+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
AlekseyKorshuk/bert
[ "pytorch", "distilbert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
심리상담 챗봇 메아리를 만들기 위한 버트 모델입니다. 챗봇 https://ai-meisterbin-project-chatbot-main-chatbot-qj3hxl.streamlit.app/ 깃허브 https://github.com/AI-MeisterBin/project_chatbot
AlekseyKorshuk/horror-scripts
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
19
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 247.93 +/- 32.87 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AlekseyKulnevich/Pegasus-QuestionGeneration
[ "pytorch", "pegasus", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "PegasusForConditionalGeneration" ], "model_type": "pegasus", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 264.38 +/- 14.94 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Alerosae/SocratesGPT-2
[ "pytorch", "gpt2", "feature-extraction", "en", "transformers", "text-generation" ]
text-generation
{ "architectures": [ "GPT2Model" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: Alan1999/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
AlexDemon/Alex
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1533.59 +/- 80.45 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AlexaRyck/KEITH
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - ka license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Small Georgian results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 kab type: mozilla-foundation/common_voice_11_0 config: kab split: test args: kab metrics: - name: Wer type: wer value: 53.84203447245193 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Georgian This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the mozilla-foundation/common_voice_11_0 kab dataset. It achieves the following results on the evaluation set: - Loss: 0.6125 - Wer: 53.8420 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant_with_warmup - lr_scheduler_warmup_steps: 50 - training_steps: 1000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.5555 | 1.06 | 1000 | 0.6125 | 53.8420 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 2.0.0.dev20221210+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
Alexander-Learn/bert-finetuned-ner-accelerate
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2022-12-11T11:29:45Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 859.00 +/- 348.69 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga hanq0212 -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga hanq0212 -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga hanq0212 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 10000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Alexandru/creative_copilot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: vit-base-patch16-224-in21k-eurosat results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9801587301587301 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-in21k-eurosat This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.3394 - Accuracy: 0.9802 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.7076 | 0.98 | 33 | 0.6119 | 0.9696 | | 0.4469 | 1.98 | 66 | 0.4190 | 0.9788 | | 0.3497 | 2.98 | 99 | 0.3555 | 0.9788 | | 0.3048 | 3.98 | 132 | 0.3394 | 0.9802 | | 0.2983 | 4.98 | 165 | 0.3394 | 0.9802 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
AlexeyIgnatov/albert-xlarge-v2-squad-v2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - ru - en tags: - abusive text classification license: "apache-2.0" datasets: - AbusiveLanguageDataset --- ```py from transformers import pipeline model_path = 'marianna13/xlm-roberta-fine-tuned-on-russian-abusive-language' id2label = { 0:'неопасный тескт', 1:'опасный тескт' } label2id = { 'неопасный тескт':0, 'опасный тескт':1 } config = AutoConfig.from_pretrained(model_path, id2label=id2label, label2id=label2id) tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForSequenceClassification.from_pretrained(model_path, config=config) text = "Прекрасный день." pipe = pipeline('text-classification', model=model, tokenizer=tokenizer) pipe(text) ``` ```json [{'label': 'неопасный текcт', 'score': 0.9249424934387207}] ```
AliPotter24/a
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 247.38 +/- 22.45 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Aliraza47/BERT
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 language: - ar tags: - summarization - AraBERT - BERT - BERT2BERT - MSA - Arabic Text Summarization - Arabic News Title Generation - Arabic Paraphrasing - Summarization - generated_from_trainer - Transformers - PyTorch widget: - text: >- شهدت مدينة طرابلس، مساء أمس الأربعاء، احتجاجات شعبية وأعمال شغب لليوم الثالث على التوالي، وذلك بسبب تردي الوضع المعيشي والاقتصادي. واندلعت مواجهات عنيفة وعمليات كر وفر ما بين الجيش اللبناني والمحتجين استمرت لساعات، إثر محاولة فتح الطرقات المقطوعة، ما أدى إلى إصابة العشرات من الطرفين. datasets: - xlsum model-index: - name: arabartsummarization results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # AraBART-summ ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ## Validation Metrics - Loss: 2.3417 - Rouge1: 2.353 - Rouge2: 1.103 - RougeL: 1.176 - RougeLsum: 1.521 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.7555 | 1.0 | 9380 | 2.3417 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
Alireza1044/bert_classification_lm
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
35
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: finetuning-sentiment-model-3000-samples results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1285 - Accuracy: 0.9667 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
Allybaby21/Allysai
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer metrics: - accuracy model-index: - name: CommitPredictor results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # CommitPredictor This model is a fine-tuned version of [microsoft/codebert-base-mlm](https://huggingface.co/microsoft/codebert-base-mlm) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.8427 - Accuracy: 0.6409 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 292 | 2.2754 | 0.5767 | | 2.5787 | 2.0 | 584 | 2.2006 | 0.5877 | | 2.5787 | 3.0 | 876 | 2.0851 | 0.5953 | | 2.2167 | 4.0 | 1168 | 2.0148 | 0.6142 | | 2.2167 | 5.0 | 1460 | 1.9583 | 0.6144 | | 2.064 | 6.0 | 1752 | 1.8846 | 0.6309 | | 1.9626 | 7.0 | 2044 | 1.9399 | 0.6247 | | 1.9626 | 8.0 | 2336 | 1.8423 | 0.6401 | | 1.8671 | 9.0 | 2628 | 1.8065 | 0.6407 | | 1.8671 | 10.0 | 2920 | 1.7582 | 0.6507 | | 1.7957 | 11.0 | 3212 | 1.7978 | 0.6479 | | 1.7226 | 12.0 | 3504 | 1.8058 | 0.6521 | | 1.7226 | 13.0 | 3796 | 1.8427 | 0.6409 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
Amirosein/roberta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- library_name: sklearn tags: - sklearn - skops - tabular-regression model_file: linreg.pkl widget: structuredData: x0: - -0.3839236795902252 - -0.9788183569908142 - 1.0937178134918213 x1: - -0.5319488644599915 - -1.108436107635498 - 0.9354732036590576 x2: - -0.38279563188552856 - -1.3128694295883179 - 1.4773520231246948 x3: - 0.2815782427787781 - -0.11783809214830399 - -0.9529813528060913 x4: - 1.0 - 1.0 - 0.0 x5: - 0.0 - 0.0 - 0.0 x6: - 0.0 - 0.0 - 0.0 x7: - 0.0 - 0.0 - 1.0 x8: - 0.0 - 1.0 - 0.0 x9: - 0.0 - 0.0 - 0.0 --- # Model description This is a regression model on MPG dataset trained. ## Intended uses & limitations This model is not ready to be used in production. ## Training Procedure ### Hyperparameters The model is trained with below hyperparameters. <details> <summary> Click to expand </summary> | Hyperparameter | Value | |------------------|------------| | copy_X | True | | fit_intercept | True | | n_jobs | | | normalize | deprecated | | positive | False | </details> ### Model Plot The model plot is below. <style>#sk-container-id-3 {color: black;background-color: white;}#sk-container-id-3 pre{padding: 0;}#sk-container-id-3 div.sk-toggleable {background-color: white;}#sk-container-id-3 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-3 label.sk-toggleable__label-arrow:before {content: "▸";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-3 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-3 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-3 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-3 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-3 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-3 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: "▾";}#sk-container-id-3 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-3 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-3 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-3 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-3 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-3 div.sk-parallel-item::after {content: "";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-3 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-3 div.sk-serial::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-3 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-3 div.sk-item {position: relative;z-index: 1;}#sk-container-id-3 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-3 div.sk-item::before, #sk-container-id-3 div.sk-parallel-item::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-3 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-3 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-3 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-3 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-3 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-3 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-3 div.sk-label-container {text-align: center;}#sk-container-id-3 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-3 div.sk-text-repr-fallback {display: none;}</style><div id="sk-container-id-3" class="sk-top-container" style="overflow: auto;"><div class="sk-text-repr-fallback"><pre>LinearRegression()</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class="sk-container" hidden><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-3" type="checkbox" checked><label for="sk-estimator-id-3" class="sk-toggleable__label sk-toggleable__label-arrow">LinearRegression</label><div class="sk-toggleable__content"><pre>LinearRegression()</pre></div></div></div></div></div> ## Evaluation Results You can find the details about evaluation process and the evaluation results. | Metric | Value | |--------------------|----------| | Mean Squared Error | 5.01069 | | R-Squared | 0.883503 | # How to Get Started with the Model Use the code below to get started with the model. ```python import joblib import json import pandas as pd clf = joblib.load(linreg.pkl) with open("config.json") as f: config = json.load(f) clf.predict(pd.DataFrame.from_dict(config["sklearn"]["example_input"])) ``` # Model Card Authors This model card is written by following authors: [More Information Needed] # Model Card Contact You can contact the model card authors through following channels: [More Information Needed] # Citation Below you can find information related to citation. **BibTeX:** ``` [More Information Needed] ```
AmitT/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - ta license: apache-2.0 tags: - whisper-event - generated_from_trainer metrics: - wer model-index: - name: Whisper Small Ta - Bharat Ramanathan results: - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: google/fleurs type: google/fleurs config: ta_in split: test metrics: - type: wer value: 15.8 name: WER - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: mozilla-foundation/common_voice_11_0 type: mozilla-foundation/common_voice_11_0 config: ta split: test metrics: - type: wer value: 11.15 name: WER --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Ta - Bharat Ramanathan This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1803 - Wer: 17.1456 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.3374 | 0.1 | 500 | 0.2579 | 23.3804 | | 0.29 | 0.2 | 1000 | 0.2260 | 20.9937 | | 0.2522 | 0.3 | 1500 | 0.2139 | 20.0682 | | 0.2338 | 0.4 | 2000 | 0.2025 | 19.6785 | | 0.223 | 0.5 | 2500 | 0.1979 | 18.3147 | | 0.211 | 0.6 | 3000 | 0.1927 | 17.8276 | | 0.2032 | 0.7 | 3500 | 0.1865 | 17.3892 | | 0.1978 | 0.8 | 4000 | 0.1839 | 17.5353 | | 0.1972 | 0.9 | 4500 | 0.1812 | 17.0969 | | 0.1894 | 1.0 | 5000 | 0.1803 | 17.1456 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
Amro-Kamal/gpt
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Example Fine-Tuned Model for Unit 2 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) Describe your model here ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('lewtun/ddpm-celebahq-finetuned-butterflies-2epochs') image = pipeline().images[0] image ```
Andrey1989/mbert-finetuned-ner
[ "pytorch", "tensorboard", "bert", "token-classification", "dataset:wikiann", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: creativeml-openrail-m tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - rust inference: true extra_gated_prompt: |- This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content 2. CompVis claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license carefully here: https://huggingface.co/spaces/CompVis/stable-diffusion-license extra_gated_heading: Please read the LICENSE to access this model --- This repository hosts weights for a Rust based version of Stable Diffusion. These weights have been directly adapted from the [stabilityai/stable-diffusion-2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1) weights, they can be used with the [diffusers-rs](https://github.com/LaurentMazare/diffusers-rs) crate. To do so, checkout the diffusers-rs repo, copy the weights in the `data/` directory and run the following command: ```bash cargo run --example stable-diffusion --features clap -- --prompt "A rusty robot holding a fire torch." ``` This is for the image-to-text pipeline, example using the image-to-image and inpainting pipelines can be found in the [crate readme](https://github.com/LaurentMazare/diffusers-rs/blob/main/README.md). ## License The license is unchanged, see the [original version](https://huggingface.co/stabilityai/stable-diffusion-2/blob/main/LICENSE-MODEL). In line with paragraph 4, the original copyright is preserved: Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors The model details section below is copied from the runwayml version, refer to the [original repo](https://huggingface.co/stabilityai/stable-diffusion-2-1) for use restrictions, limitations, bias discussion etc. ## Model Details - **Developed by:** Robin Rombach, Patrick Esser - **Model type:** Diffusion-based text-to-image generation model - **Language(s):** English - **License:** [CreativeML Open RAIL++-M License](https://huggingface.co/stabilityai/stable-diffusion-2/blob/main/LICENSE-MODEL) - **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a Latent Diffusion Model that uses a fixed, pretrained text encoder (OpenCLIP-ViT/H). - **Resources for more information:** [GitHub Repository](https://github.com/Stability-AI/), [Paper](https://arxiv.org/abs/2112.10752). - **Cite as:** @InProceedings{Rombach_2022_CVPR, author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn}, title = {High-Resolution Image Synthesis With Latent Diffusion Models}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2022}, pages = {10684-10695} } ## Weight Extraction The weights have been converted by downloading them from the stabilityai/stable-diffusion-2-1 repo, and then running the following commands in the [diffusers-rs repo](https://github.com/LaurentMazare/diffusers-rs). After downloading the files, use Python to convert them to `npz` files. ```python import numpy as np import torch model = torch.load("./vae.bin") np.savez("./vae_v2.1.npz", **{k: v.numpy() for k, v in model.items()}) model = torch.load("./unet.bin") np.savez("./unet_v2.1.npz", **{k: v.numpy() for k, v in model.items()}) ``` Convert these `.npz` files to `.ot` files via `tensor-tools`. ```bash cargo run --release --example tensor-tools cp ./data/vae_v2.1.npz ./data/vae_v2.1.ot cargo run --release --example tensor-tools cp ./data/unet_v2.1.npz ./data/unet_v2.1.ot ```
Andrey78/my_model_nlp
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: openrail # library_name: diffusers tags: - stable-diffusion - text-to-image language: - en --- This is a copy of momocha model. \ I upload it so that it can be easier downloaded when using colab to run webui. examples: ![00201-2222268832-1girl, armpits, pointy_ears, solo, long_hair, breasts, necklace, silver_hair, flower, medium_breasts, upper_body, looking from b.png](https://s3.amazonaws.com/moonup/production/uploads/1671271004654-630eec51d38de32fd045b876.png) ![00045-2495047715-1girl, bodysuit, solo, breasts.png](https://s3.amazonaws.com/moonup/production/uploads/1670775733752-630eec51d38de32fd045b876.png) ![00190-610441010-hair_cones, keqing__(genshin_impact_), 1girl, purple_hair, blue_butterfly, twintails, gloves, hair_ornament, detached_sleeves, e.png](https://s3.amazonaws.com/moonup/production/uploads/1671271138632-630eec51d38de32fd045b876.png) ![00046-2162261515-PORTRAIT maid girl, long hair, 1girl, pretty face, sitting in a garden reading a book.png](https://s3.amazonaws.com/moonup/production/uploads/1670813654288-630eec51d38de32fd045b876.png) ![00047-2553088864-PORTRAIT maid girl, long hair, 1girl, pretty face, sitting in a garden reading a book, art by [ykcp-13000].png](https://s3.amazonaws.com/moonup/production/uploads/1670813687318-630eec51d38de32fd045b876.png) Textual inversion inputs trained on other models seem to have unexpected positive effects in Momocha model. \ Don't know who's the original uploader. If it is infringing, please contact me and I'll remove or transfer the ownership.
Andrey78/my_nlp_test_model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -166.90 +/- 40.93 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AnonymousSub/AR_rule_based_roberta_twostage_quadruplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_en_vp-100k_age_teens-10_sixties-0_s232 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (en)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/SR_rule_based_bert_triplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 269.35 +/- 14.72 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AnonymousSub/SR_rule_based_roberta_bert_quadruplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- language: - es license: apache-2.0 tags: - automatic-speech-recognition - es datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_es_vp-100k_age_teens-10_sixties-0_s261 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (es)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/SR_rule_based_roberta_bert_quadruplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2022-12-11T18:14:27Z
--- language: - en tags: - text-classification metrics: - accuracy (balanced) - F1 (weighted) widget: - text: "اسعدغيرك انت مو بس اسعدت العماله ترا اسعدتنا" example_title: "خليجي" - text: " سبحان الله في الغيوم شكل قلب" example_title: "فصحي" - text: "بلاش تحطي صور متبرجة ع صفحتك..." example_title: "خليجي" - text: "و حضرتك طيبة و شكرا علي الكلام الحلو ده يا مبهجة..." example_title: "مصري" --- # Dialectical-MSA-detection ## Model description This model was trained on 108,173 manually annotated User-Generated Content (e.g. tweets and online comments) to classify the Arabic language of the text into one of two categories: 'Dialectical', or 'MSA' (i.e. Modern Standard Arabic). ## Training data Dialectical-MSA-detection was trained on the English-speaking subset of the [The Arabic online commentary dataset (Zaidan, et al 20211)](https://github.com/sjeblee/AOC). The AOC dataset was created by crawling the websites of three Arabic newspapers, and extracting online articles and readers' comments. ## Training procedure `xlm-roberta-base` was trained using the Hugging Face trainer with the following hyperparameters. ``` training_args = TrainingArguments( num_train_epochs=4, # total number of training epochs learning_rate=2e-5, # learning rate per_device_train_batch_size=32, # batch size per device during training per_device_eval_batch_size=4, # batch size for evaluation warmup_steps=0, # number of warmup steps for learning rate scheduler weight_decay=0.02, # strength of weight decay ) ``` ## Eval results The model was evaluated using 10% of the sentences (90-10 train-dev split). Accuracy 0.88 on the dev set. ## Limitations and bias The model was trained on sentences from the online commentary domain. Other forms of UGT such as tweet can be different in the degree of dialectness. ### BibTeX entry and citation info ```bibtex @article{saadany2022semi, title={A Semi-supervised Approach for a Better Translation of Sentiment in Dialectical Arabic UGT}, author={Saadany, Hadeel and Orasan, Constantin and Mohamed, Emad and Tantawy, Ashraf}, journal={arXiv preprint arXiv:2210.11899}, year={2022} } ```
AnonymousSub/SR_rule_based_roberta_twostage_quadruplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: - es license: apache-2.0 tags: - automatic-speech-recognition - es datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_es_vp-100k_age_teens-2_sixties-8_s481 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (es)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/SR_rule_based_roberta_twostage_quadruplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2022-12-11T18:32:47Z
--- language: - es license: apache-2.0 tags: - automatic-speech-recognition - es datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_es_vp-100k_age_teens-2_sixties-8_s869 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (es)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/SR_rule_based_roberta_twostagetriplet_hier_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: - es license: apache-2.0 tags: - automatic-speech-recognition - es datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_es_vp-100k_age_teens-8_sixties-2_s808 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (es)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/SR_rule_based_roberta_twostagetriplet_hier_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: - fr license: apache-2.0 tags: - automatic-speech-recognition - fr datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_fr_vp-100k_age_teens-5_sixties-5_s408 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (fr)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/SciFive_pubmedqa_question_generation
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
7
null
--- language: - fr license: apache-2.0 tags: - automatic-speech-recognition - fr datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_fr_vp-100k_age_teens-0_sixties-10_s131 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (fr)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/cline-techqa
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2022-12-11T19:13:26Z
--- language: - fr license: apache-2.0 tags: - automatic-speech-recognition - fr datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_fr_vp-100k_age_teens-2_sixties-8_s869 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (fr)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/rule_based_bert_hier_diff_equal_wts_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - generated_from_trainer datasets: - squad model-index: - name: my_awesome_qa_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_qa_model This model is a fine-tuned version of [dmis-lab/biobert-v1.1](https://huggingface.co/dmis-lab/biobert-v1.1) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.4542 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 250 | 1.6813 | | 2.1248 | 2.0 | 500 | 1.4503 | | 2.1248 | 3.0 | 750 | 1.4542 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
AnonymousSub/rule_based_bert_mean_diff_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_de_xls-r_age_teens-5_sixties-5_s200 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/rule_based_bert_triplet_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -132.58 +/- 106.22 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'Honza-cartpolev1.py' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 50000 'learning_rate': 0.00025 'num_envs': 4 'num_steps': 128 'anneal_lr': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.95 'num_minibatches': 4 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'Honza/ppo-LunarLander-v2' 'batch_size': 512 'minibatch_size': 128} ```
AnonymousSub/rule_based_bert_triplet_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_de_xls-r_age_teens-0_sixties-10_s113 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/rule_based_hier_quadruplet_0.1_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_de_xls-r_age_teens-0_sixties-10_s288 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/rule_based_hier_triplet_0.1_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2022-12-11T20:32:23Z
--- license: cc-by-sa-4.0 tags: - generated_from_trainer datasets: - fin metrics: - precision - recall - f1 - accuracy model-index: - name: fin4 results: - task: name: Token Classification type: token-classification dataset: name: fin type: fin config: default split: train args: default metrics: - name: Precision type: precision value: 0.9209486166007905 - name: Recall type: recall value: 0.9282868525896414 - name: F1 type: f1 value: 0.9246031746031745 - name: Accuracy type: accuracy value: 0.9913080347678609 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fin4 This model is a fine-tuned version of [nlpaueb/sec-bert-num](https://huggingface.co/nlpaueb/sec-bert-num) on the fin dataset. It achieves the following results on the evaluation set: - Loss: 0.0549 - Precision: 0.9209 - Recall: 0.9283 - F1: 0.9246 - Accuracy: 0.9913 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 129 | 0.1041 | 0.8242 | 0.8406 | 0.8323 | 0.9788 | | No log | 2.0 | 258 | 0.0511 | 0.9173 | 0.9283 | 0.9228 | 0.9902 | | No log | 3.0 | 387 | 0.0430 | 0.9102 | 0.9283 | 0.9191 | 0.9907 | | 0.0598 | 4.0 | 516 | 0.0501 | 0.9368 | 0.9442 | 0.9405 | 0.9922 | | 0.0598 | 5.0 | 645 | 0.0436 | 0.9325 | 0.9363 | 0.9344 | 0.9924 | | 0.0598 | 6.0 | 774 | 0.0489 | 0.9433 | 0.9283 | 0.9357 | 0.9917 | | 0.0598 | 7.0 | 903 | 0.0499 | 0.932 | 0.9283 | 0.9301 | 0.9919 | | 0.0028 | 8.0 | 1032 | 0.0537 | 0.9209 | 0.9283 | 0.9246 | 0.9913 | | 0.0028 | 9.0 | 1161 | 0.0540 | 0.9170 | 0.9243 | 0.9206 | 0.9911 | | 0.0028 | 10.0 | 1290 | 0.0549 | 0.9209 | 0.9283 | 0.9246 | 0.9913 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
AnonymousSub/rule_based_hier_triplet_0.1_epochs_1_shard_1_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2022-12-11T20:32:47Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_de_xls-r_age_teens-10_sixties-0_s512 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/rule_based_hier_triplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: other --- harita plan düzleminde labirent basit oyun
AnonymousSub/rule_based_hier_triplet_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: creativeml-openrail-m tags: - text-to-image widget: - text: trsldamrl --- ### glitched Dreambooth model trained by abesmon with [Hugging Face Dreambooth Training Space](https://colab.research.google.com/drive/15cxJE2SBYJ0bZwoGzkdOSvqGtgz_Rvhk?usp=sharing) with the v2-1-512 base model You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/drive/1FQkg1LBk99Ujpwn4fBZzGgEcuXz6-52-?usp=sharing). Don't forget to use the concept prompts! concept named **trsldamrl** (use that on your prompt) ### Examples | | | | | |-|-|-|-| |![1](examples/1.png)|![2](examples/2.jpg)|![3](examples/3.png)|![4](examples/4.png)| |![5](examples/5.png)| | | | ### train images | | | | |-|-|-| |![trsldamrl 0](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%289%29.jpg)|![trsldamrl 1](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%2811%29.jpg)|![trsldamrl 2](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%282%29.jpg)| |![trsldamrl 3](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%2821%29.jpg)|![trsldamrl 4](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%281%29.jpg)|![trsldamrl 5](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%2814%29.jpg)| |![trsldamrl 6](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%2823%29.jpg)|![trsldamrl 7](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%2818%29.jpg)|![trsldamrl 8](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%2813%29.jpg)| |![trsldamrl 9](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%285%29.jpg)|![trsldamrl 10](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%286%29.jpg)|![trsldamrl 11](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%2816%29.jpg)| |![trsldamrl 12](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%2822%29.jpg)|![trsldamrl 13](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%2820%29.jpg)|![trsldamrl 14](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%2810%29.jpg)| |![trsldamrl 15](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%2819%29.jpg)|![trsldamrl 16](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%2824%29.jpg)|![trsldamrl 17](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%288%29.jpg)| |![trsldamrl 18](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%283%29.jpg)|![trsldamrl 19](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%2817%29.jpg)|![trsldamrl 20](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%287%29.jpg)| |![trsldamrl 21](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%2815%29.jpg)|![trsldamrl 22](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%2812%29.jpg)|![trsldamrl 23](https://huggingface.co/sd-dreambooth-library/glitched/resolve/main/concept_images/trsldamrl_%284%29.jpg)|
AnonymousSub/rule_based_hier_triplet_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- license: creativeml-openrail-m --- **Prompts:** The model is dreamboothed on tagged suisei no majo images; some prompts that work are 1. suletta mercury 2. miorine rembran 3. gundam aerial --- **Training details:** Trained with [kanewallmann Dreambooth repository](https://github.com/kanewallmann/Dreambooth-Stable-Diffusion) using tags as captions 1. Trained for 10000 steps probably at the default learning ratet lr=1e-6 2. Dataset: around 500 tagged images of suise no majo + thousands of customized reg images --- **Problems:** As the model is trained only on tagged images, it is more flexible but it is only harder to prompt. Some detailed description may be needed to get the character right, especially when trying to prompt suletta and miorine in the same image. --- **Example Generations:** ![00038-2321521523-long](https://huggingface.co/alea31415/suremio-suisei-no-majo/resolve/main/00038-2321521523-long.png) ![00005-894260846-miorine](https://huggingface.co/alea31415/suremio-suisei-no-majo/resolve/main/00005-894260846-miorine.png) ![00046-2321521523-suletta](https://huggingface.co/alea31415/suremio-suisei-no-majo/resolve/main/00046-2321521523-suletta.png) ![00060-2516217770-suletta](https://huggingface.co/alea31415/suremio-suisei-no-majo/resolve/main/00060-2516217770-suletta.png) ![00176-1352431307-miorine](https://huggingface.co/alea31415/suremio-suisei-no-majo/resolve/main/00176-1352431307-miorine.png) ![00184-1661291290-long](https://huggingface.co/alea31415/suremio-suisei-no-majo/resolve/main/00184-1661291290-long.png) ![00316-2911672629-miorine](https://huggingface.co/alea31415/suremio-suisei-no-majo/resolve/main/00316-2911672629-miorine.png) ![00147-1397396354-miorine](https://huggingface.co/alea31415/suremio-suisei-no-majo/resolve/main/00147-1397396354-miorine.png) ![00400-3442904358-gundam](https://huggingface.co/alea31415/suremio-suisei-no-majo/resolve/main/00400-3442904358-gundam.png) ![00407-2385989155-gundam](https://huggingface.co/alea31415/suremio-suisei-no-majo/resolve/main/00407-2385989155-gundam.png)
AnonymousSub/rule_based_hier_triplet_epochs_1_shard_1_wikiqa_copy
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: creativeml-openrail-m --- ![00005-2932446845.png](https://s3.amazonaws.com/moonup/production/uploads/1670800545537-62954ae4f937190946e238fa.png) Anime based model merged from f222/Anythingv3/WDv1.3/NAI_AnimeFull/HassanBlend14 on a 50/30 /50/50 30/70 40/60 and 23/77 ratio Based of SDv1.5 Pruned works best with ancestral samplers, but great results with the others too on cfg between 5-12 and 15-50 steps. none of the samples used a hypernet or embedding. <3 ![grid-0000.png](https://s3.amazonaws.com/moonup/production/uploads/1670791728969-62954ae4f937190946e238fa.png) ![grid-0000.png](https://s3.amazonaws.com/moonup/production/uploads/1670793412056-62954ae4f937190946e238fa.png) ![grid-0000.png](https://s3.amazonaws.com/moonup/production/uploads/1670793451620-62954ae4f937190946e238fa.png)
AnonymousSub/rule_based_only_classfn_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 275.85 +/- 20.59 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AnonymousSub/rule_based_only_classfn_twostage_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_de_xls-r_age_teens-8_sixties-2_s945 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/rule_based_roberta_bert_triplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - generated_from_trainer metrics: - bleu model-index: - name: fa-t5-paraphraser results: [] datasets: - alighasemi/fa-paraphrase language: - fa --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fa-t5-paraphraser This model is a fine-tuned version of [alighasemi/fa-t5-base](https://huggingface.co/alighasemi/fa-t5-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: nan - Bleu: 0.7025 - Gen Len: 5.8813 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:------:|:-------:| | 0.0 | 1.0 | 55088 | nan | 0.7025 | 5.8813 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
AnonymousSub/rule_based_roberta_hier_quadruplet_0.1_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: Convolution/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
AnonymousSub/rule_based_roberta_hier_quadruplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: RamonAnkersmit/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
AnonymousSub/rule_based_roberta_hier_quadruplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: cc0-1.0 --- ![VntgCrm_example_grid.jpg](https://s3.amazonaws.com/moonup/production/uploads/1670833927638-6334a32686c3fdcdc7adf4c0.jpeg) [![Become A Patreon](https://badgen.net/badge/become/a%20patron/F96854)](https://www.patreon.com/sebastiankamph) # Vintage cream photo film Based on SD 2.1 768x768 **Token word: vntgcrm style** **Example prompt to start out with** RAW candid cinema, woman portrait, vntgcrm style, 16mm, ((remarkable color)), (ultra realistic) Negative: ugly, disfigured, deformed, too many hands, makeup, cartoon, render **Support my work on Patreon for Early access model releases** https://www.patreon.com/sebastiankamph **AI Art, Stable diffusion guides and tutorials on Youtube** https://www.youtube.com/@sebastiankamph **Chat in our community discord** https://discord.com/invite/dFB7zuXyFY **Installation** Download the .ckpt and the .yaml file. Put them inside \stable-diffusion-webui\Models\Stable-diffusion\ https://huggingface.co/SebastianKamphYT/VintageCream/blob/main/VintageCream.ckpt https://huggingface.co/SebastianKamphYT/VintageCream/blob/main/VintageCream.yaml
AnonymousSub/rule_based_roberta_hier_quadruplet_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: Jairnetojp/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - generated_from_trainer model-index: - name: test-erfan results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # test-erfan This model is a fine-tuned version of [erfan226/persian-t5-paraphraser](https://huggingface.co/erfan226/persian-t5-paraphraser) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:-------:| | No log | 1.0 | 63 | nan | 13.083 | 15.31 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
AnonymousSub/rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_en_xls-r_age_teens-5_sixties-5_s769 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (en)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_en_xls-r_age_teens-5_sixties-5_s870 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (en)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - food101 metrics: - accuracy model-index: - name: swin-finetuned-food101 results: - task: name: Image Classification type: image-classification dataset: name: food101 type: food101 config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9220198019801981 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-finetuned-food101 This model is a fine-tuned version of [microsoft/swin-base-patch4-window7-224](https://huggingface.co/microsoft/swin-base-patch4-window7-224) on the food101 dataset. It achieves the following results on the evaluation set: - Loss: 0.4401 - Accuracy: 0.9220 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.0579 | 1.0 | 1183 | 0.4190 | 0.9102 | | 0.0129 | 2.0 | 2366 | 0.4179 | 0.9155 | | 0.0076 | 3.0 | 3549 | 0.4219 | 0.9198 | | 0.0197 | 4.0 | 4732 | 0.4487 | 0.9160 | | 0.0104 | 5.0 | 5915 | 0.4414 | 0.9210 | | 0.0007 | 6.0 | 7098 | 0.4401 | 0.9220 | | 0.0021 | 7.0 | 8281 | 0.4401 | 0.9220 | | 0.0015 | 8.0 | 9464 | 0.4401 | 0.9220 | | 0.0056 | 9.0 | 10647 | 0.4401 | 0.9220 | | 0.0019 | 10.0 | 11830 | 0.4401 | 0.9220 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
AnonymousSub/rule_based_roberta_twostagetriplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_en_xls-r_age_teens-0_sixties-10_s732 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (en)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/rule_based_roberta_twostagetriplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_en_xls-r_age_teens-0_sixties-10_s847 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (en)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/rule_based_twostage_quadruplet_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 256.41 +/- 26.76 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AnonymousSub/rule_based_twostagetriplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2r_en_xls-r_age_teens-2_sixties-8_s717 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (en)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
AnonymousSub/rule_based_twostagetriplet_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
2022-12-11T23:25:09Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 256.76 +/- 18.46 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
ArBert/roberta-base-finetuned-ner-agglo
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-12T01:57:14Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 286.12 +/- 17.95 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
ArcQ/gpt-experiments
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3TTT results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="ThomasSimonini/q-Taxi-v3TTT", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Archie/myProject
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit --- # ERNIE-Layout_Pytorch [This repo](https://github.com/NormXU/ERNIE-Layout-Pytorch) is an unofficial Pytorch implementation of [ERNIE-Layout](http://arxiv.org/abs/2210.06155) which is originally released through PaddleNLP. The model is translated from [PaddlePaddle/ernie-layoutx-base-uncased](https://huggingface.co/PaddlePaddle/ernie-layoutx-base-uncased) with [tools/convert2torch.py](https://github.com/NormXU/ERNIE-Layout-Pytorch/blob/main/tools/convert2torch.py). It is a script to translate all state dicts of ERNIE-pretrained models for PaddlePaddle into Pytorch style. Feel free to edit it if necessary. **A Quick Example** ```python import torch from PIL import Image import numpy as np import torch.nn.functional as F from networks.model_util import ernie_qa_processing from networks import ErnieLayoutConfig, ErnieLayoutForQuestionAnswering, ErnieLayoutImageProcessor, \ ERNIELayoutProcessor, ErnieLayoutTokenizerFast pretrain_torch_model_or_path = "Norm/ERNIE-Layout-Pytorch" doc_imag_path = "/path/to/dummy_input.jpeg" device = torch.device("cuda:0") # Dummy Input context = ['This is an example document', 'All ocr boxes are inserted into this list'] layout = [[381, 91, 505, 115], [738, 96, 804, 122]] # all boxes are resized between 0 - 1000 pil_image = Image.open(doc_imag_path).convert("RGB") # initialize tokenizer tokenizer = ErnieLayoutTokenizerFast.from_pretrained(pretrained_model_name_or_path=pretrain_torch_model_or_path) # initialize feature extractor feature_extractor = ErnieLayoutImageProcessor(apply_ocr=False) processor = ERNIELayoutProcessor(image_processor=feature_extractor, tokenizer=tokenizer) # Tokenize context & questions context_encodings = processor(pil_image, context) question = "what is it?" tokenized_res = ernie_qa_processing(tokenizer, question, layout, context_encodings) tokenized_res['input_ids'] = torch.tensor([tokenized_res['input_ids']]).to(device) tokenized_res['bbox'] = torch.tensor([tokenized_res['bbox']]).to(device) tokenized_res['pixel_values'] = torch.tensor(np.array(context_encodings.data['pixel_values'])).to(device) # dummy answer start && end index tokenized_res['start_positions'] = torch.tensor([6]).to(device) tokenized_res['end_positions'] = torch.tensor([12]).to(device) # initialize config config = ErnieLayoutConfig.from_pretrained(pretrained_model_name_or_path=pretrain_torch_model_or_path) config.num_classes = 2 # start and end # initialize ERNIE for VQA model = ErnieLayoutForQuestionAnswering.from_pretrained( pretrained_model_name_or_path=pretrain_torch_model_or_path, config=config, ) model.to(device) output = model(**tokenized_res) # decode output start_max = torch.argmax(F.softmax(output.start_logits, dim=-1)) end_max = torch.argmax(F.softmax(output.end_logits, dim=-1)) + 1 # add one ##because of python list indexing answer = tokenizer.decode(tokenized_res["input_ids"][0][start_max: end_max]) print(answer) ```
ArenaGrenade/char-cnn
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - whisper-event - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 - google/fleurs - bayartsogt/ulaanbal-v0 - bayartsogt/youtube-mongolian-v1 metrics: - wer - cer model-index: - name: whisper-small-mn-8-bayartsogt results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: mn split: test args: language: mn metrics: - name: Wer type: wer value: 26.518461874590344 - name: Cer type: cer value: 9.46811616603981 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-small-mn-8 This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2421 - Wer: 26.5185 - Cer: 9.4681 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 15000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | Cer | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:| | 0.3717 | 0.35 | 1000 | 0.4004 | 46.9576 | 16.9664 | | 0.286 | 0.69 | 2000 | 0.3129 | 37.3935 | 13.5504 | | 0.2287 | 1.04 | 3000 | 0.2768 | 33.1931 | 11.7806 | | 0.2257 | 1.39 | 4000 | 0.2590 | 30.7243 | 11.0232 | | 0.2029 | 1.73 | 5000 | 0.2428 | 29.2003 | 10.4144 | | 0.1691 | 2.08 | 6000 | 0.2408 | 28.4357 | 10.0306 | | 0.1626 | 2.43 | 7000 | 0.2369 | 28.0588 | 10.0486 | | 0.1588 | 2.77 | 8000 | 0.2321 | 27.2340 | 9.6819 | | 0.1271 | 3.12 | 9000 | 0.2349 | 26.8407 | 9.5574 | | 0.1263 | 3.47 | 10000 | 0.2356 | 27.1630 | 9.6519 | | 0.1314 | 3.81 | 11000 | 0.2340 | 26.5567 | 9.4278 | | 0.1062 | 4.16 | 12000 | 0.2390 | 26.6332 | 9.5162 | | 0.1081 | 4.5 | 13000 | 0.2398 | 26.5840 | 9.5085 | | 0.1033 | 4.85 | 14000 | 0.2402 | 26.7096 | 9.4801 | | 0.097 | 5.2 | 15000 | 0.2421 | 26.5185 | 9.4681 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
Arghyad/Loki_small
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 267.90 +/- 19.51 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Arpita/opus-mt-en-ro-finetuned-syn-to-react
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- library_name: paddlenlp --- # PaddleCI/tiny-random-bert
AryanLala/autonlp-Scientific_Title_Generator-34558227
[ "pytorch", "pegasus", "text2text-generation", "en", "dataset:AryanLala/autonlp-data-Scientific_Title_Generator", "transformers", "autonlp", "co2_eq_emissions", "autotrain_compatible", "has_space" ]
text2text-generation
{ "architectures": [ "PegasusForConditionalGeneration" ], "model_type": "pegasus", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
103
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('azimuth3d/sd-class-butterflies-64') image = pipeline().images[0] image ```
AshLukass/AshLukass
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-12T04:26:12Z
--- language: en thumbnail: http://www.huggingtweets.com/cantliveinpeace/1670819358402/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1591132987923496969/1_qRWCXK_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">roof</div> <div style="text-align: center; font-size: 14px;">@cantliveinpeace</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from roof. | Data | roof | | --- | --- | | Tweets downloaded | 3189 | | Retweets | 177 | | Short tweets | 893 | | Tweets kept | 2119 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/28dcnrwo/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @cantliveinpeace's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/127zdj5s) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/127zdj5s/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/cantliveinpeace') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Ashagi/Ashvx
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('reachrkr/sd-class-butterflies-32') image = pipeline().images[0] image ```
AshiNLP/Bert_model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- inference: true language: - en tags: - stable-diffusion - text-to-image license: creativeml-openrail-m --- # Seek.art MEGA is a general use "anything" model that significantly improves on 1.5 across dozens of styles. Created by Coreco at [seek.art](https://seek.art/) This model was trained on nearly 10k high-quality public domain digital artworks with the goal of improving output quality across the board. We find the model to be highly flexible in its ability to mix various styles, subjects, and details. We recommend resolutions above 640px in one or both dimensions for best results. You can try this model and several others for free at [seek.art](https://seek.art/). We also recommend an inference tool supporting prompt weighting and high resolution optimization / fixing for best results. We suggest [InvokeAI](https://github.com/invoke-ai/InvokeAI) as a sensibly licensed and fully featured open-source inference tool. ### Examples <img src="https://huggingface.co/coreco/seek.art_MEGA/resolve/main/examples.png" style="max-width: 800px;" width="100%"/> The above example images including the prompts and all relevant settings are available [here](https://seek.art/explore/search?collection=6112a64d-bd8b-4043-8d96-88c7cfa65c43). Additionally, search thousands of high quality prompts on [seek.art](https://seek.art/) for free. ### License - This model carries a commercial restricted sub-license, please read carefully: [License](https://huggingface.co/coreco/seek.art_MEGA/blob/main/LICENSE.txt) ### Use Restrictions You agree not to use the Model or Derivatives of the Model: - for the commercial purpose of hosted content generation (inference) without the express written permission of seek.art. Model output for personal use carries no such commercial restriction. - In any way that violates any applicable national, federal, state, local or international law or regulation; - For the purpose of exploiting, harming or attempting to exploit or harm minors in any way; - To generate or disseminate verifiably false information and/or content with the purpose of harming others; - To generate or disseminate personal identifiable information that can be used to harm an individual; - To defame, disparage or otherwise harass others; - For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation; - For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics; - To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm; - For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories; - To provide medical advice and medical results interpretation; - To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use).
AshtonBenson/DialoGPT-small-quentin-coldwater
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-12T04:43:19Z
--- license: mit --- ### Pokemon classic artwork on Stable Diffusion Pokémon classic artwork from RGBY and GSC concept (re-scaled to max width and height 512 px) This is the `<pkmn-classic>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<pkmn-classic> 0](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/0.jpeg) ![<pkmn-classic> 1](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/1.jpeg) ![<pkmn-classic> 2](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/2.jpeg) ![<pkmn-classic> 3](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/3.jpeg) ![<pkmn-classic> 4](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/4.jpeg) ![<pkmn-classic> 5](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/5.jpeg) ![<pkmn-classic> 6](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/6.jpeg) ![<pkmn-classic> 7](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/7.jpeg) ![<pkmn-classic> 8](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/8.jpeg) ![<pkmn-classic> 9](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/9.jpeg) ![<pkmn-classic> 10](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/10.jpeg) ![<pkmn-classic> 11](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/11.jpeg) ![<pkmn-classic> 12](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/12.jpeg) ![<pkmn-classic> 13](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/13.jpeg) ![<pkmn-classic> 14](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/14.jpeg) ![<pkmn-classic> 15](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/15.jpeg) ![<pkmn-classic> 16](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/16.jpeg) ![<pkmn-classic> 17](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/17.jpeg) ![<pkmn-classic> 18](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/18.jpeg) ![<pkmn-classic> 19](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/19.jpeg) ![<pkmn-classic> 20](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/20.jpeg) ![<pkmn-classic> 21](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/21.jpeg) ![<pkmn-classic> 22](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/22.jpeg) ![<pkmn-classic> 23](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/23.jpeg) ![<pkmn-classic> 24](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/24.jpeg) ![<pkmn-classic> 25](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/25.jpeg) ![<pkmn-classic> 26](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/26.jpeg) ![<pkmn-classic> 27](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/27.jpeg) ![<pkmn-classic> 28](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/28.jpeg) ![<pkmn-classic> 29](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/29.jpeg) ![<pkmn-classic> 30](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/30.jpeg) ![<pkmn-classic> 31](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/31.jpeg) ![<pkmn-classic> 32](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/32.jpeg) ![<pkmn-classic> 33](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/33.jpeg) ![<pkmn-classic> 34](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/34.jpeg) ![<pkmn-classic> 35](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/35.jpeg) ![<pkmn-classic> 36](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/36.jpeg) ![<pkmn-classic> 37](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/37.jpeg) ![<pkmn-classic> 38](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/38.jpeg) ![<pkmn-classic> 39](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/39.jpeg) ![<pkmn-classic> 40](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/40.jpeg) ![<pkmn-classic> 41](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/41.jpeg) ![<pkmn-classic> 42](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/42.jpeg) ![<pkmn-classic> 43](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/43.jpeg) ![<pkmn-classic> 44](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/44.jpeg) ![<pkmn-classic> 45](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/45.jpeg) ![<pkmn-classic> 46](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/46.jpeg) ![<pkmn-classic> 47](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/47.jpeg) ![<pkmn-classic> 48](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/48.jpeg) ![<pkmn-classic> 49](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/49.jpeg) ![<pkmn-classic> 50](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/50.jpeg) ![<pkmn-classic> 51](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/51.jpeg) ![<pkmn-classic> 52](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/52.jpeg) ![<pkmn-classic> 53](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/53.jpeg) ![<pkmn-classic> 54](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/54.jpeg) ![<pkmn-classic> 55](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/55.jpeg) ![<pkmn-classic> 56](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/56.jpeg) ![<pkmn-classic> 57](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/57.jpeg) ![<pkmn-classic> 58](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/58.jpeg) ![<pkmn-classic> 59](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/59.jpeg) ![<pkmn-classic> 60](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/60.jpeg) ![<pkmn-classic> 61](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/61.jpeg) ![<pkmn-classic> 62](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/62.jpeg) ![<pkmn-classic> 63](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/63.jpeg) ![<pkmn-classic> 64](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/64.jpeg) ![<pkmn-classic> 65](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/65.jpeg) ![<pkmn-classic> 66](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/66.jpeg) ![<pkmn-classic> 67](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/67.jpeg) ![<pkmn-classic> 68](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/68.jpeg) ![<pkmn-classic> 69](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/69.jpeg) ![<pkmn-classic> 70](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/70.jpeg) ![<pkmn-classic> 71](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/71.jpeg) ![<pkmn-classic> 72](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/72.jpeg) ![<pkmn-classic> 73](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/73.jpeg) ![<pkmn-classic> 74](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/74.jpeg) ![<pkmn-classic> 75](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/75.jpeg) ![<pkmn-classic> 76](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/76.jpeg) ![<pkmn-classic> 77](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/77.jpeg) ![<pkmn-classic> 78](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/78.jpeg) ![<pkmn-classic> 79](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/79.jpeg) ![<pkmn-classic> 80](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/80.jpeg) ![<pkmn-classic> 81](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/81.jpeg) ![<pkmn-classic> 82](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/82.jpeg) ![<pkmn-classic> 83](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/83.jpeg) ![<pkmn-classic> 84](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/84.jpeg) ![<pkmn-classic> 85](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/85.jpeg) ![<pkmn-classic> 86](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/86.jpeg) ![<pkmn-classic> 87](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/87.jpeg) ![<pkmn-classic> 88](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/88.jpeg) ![<pkmn-classic> 89](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/89.jpeg) ![<pkmn-classic> 90](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/90.jpeg) ![<pkmn-classic> 91](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/91.jpeg) ![<pkmn-classic> 92](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/92.jpeg) ![<pkmn-classic> 93](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/93.jpeg) ![<pkmn-classic> 94](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/94.jpeg) ![<pkmn-classic> 95](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/95.jpeg) ![<pkmn-classic> 96](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/96.jpeg) ![<pkmn-classic> 97](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/97.jpeg) ![<pkmn-classic> 98](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/98.jpeg) ![<pkmn-classic> 99](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/99.jpeg) ![<pkmn-classic> 100](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/100.jpeg) ![<pkmn-classic> 101](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/101.jpeg) ![<pkmn-classic> 102](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/102.jpeg) ![<pkmn-classic> 103](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/103.jpeg) ![<pkmn-classic> 104](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/104.jpeg) ![<pkmn-classic> 105](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/105.jpeg) ![<pkmn-classic> 106](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/106.jpeg) ![<pkmn-classic> 107](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/107.jpeg) ![<pkmn-classic> 108](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/108.jpeg) ![<pkmn-classic> 109](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/109.jpeg) ![<pkmn-classic> 110](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/110.jpeg) ![<pkmn-classic> 111](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/111.jpeg) ![<pkmn-classic> 112](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/112.jpeg) ![<pkmn-classic> 113](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/113.jpeg) ![<pkmn-classic> 114](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/114.jpeg) ![<pkmn-classic> 115](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/115.jpeg) ![<pkmn-classic> 116](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/116.jpeg) ![<pkmn-classic> 117](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/117.jpeg) ![<pkmn-classic> 118](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/118.jpeg) ![<pkmn-classic> 119](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/119.jpeg) ![<pkmn-classic> 120](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/120.jpeg) ![<pkmn-classic> 121](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/121.jpeg) ![<pkmn-classic> 122](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/122.jpeg) ![<pkmn-classic> 123](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/123.jpeg) ![<pkmn-classic> 124](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/124.jpeg) ![<pkmn-classic> 125](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/125.jpeg) ![<pkmn-classic> 126](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/126.jpeg) ![<pkmn-classic> 127](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/127.jpeg) ![<pkmn-classic> 128](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/128.jpeg) ![<pkmn-classic> 129](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/129.jpeg) ![<pkmn-classic> 130](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/130.jpeg) ![<pkmn-classic> 131](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/131.jpeg) ![<pkmn-classic> 132](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/132.jpeg) ![<pkmn-classic> 133](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/133.jpeg) ![<pkmn-classic> 134](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/134.jpeg) ![<pkmn-classic> 135](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/135.jpeg) ![<pkmn-classic> 136](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/136.jpeg) ![<pkmn-classic> 137](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/137.jpeg) ![<pkmn-classic> 138](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/138.jpeg) ![<pkmn-classic> 139](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/139.jpeg) ![<pkmn-classic> 140](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/140.jpeg) ![<pkmn-classic> 141](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/141.jpeg) ![<pkmn-classic> 142](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/142.jpeg) ![<pkmn-classic> 143](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/143.jpeg) ![<pkmn-classic> 144](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/144.jpeg) ![<pkmn-classic> 145](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/145.jpeg) ![<pkmn-classic> 146](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/146.jpeg) ![<pkmn-classic> 147](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/147.jpeg) ![<pkmn-classic> 148](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/148.jpeg) ![<pkmn-classic> 149](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/149.jpeg) ![<pkmn-classic> 150](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/150.jpeg) ![<pkmn-classic> 151](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/151.jpeg) ![<pkmn-classic> 152](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/152.jpeg) ![<pkmn-classic> 153](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/153.jpeg) ![<pkmn-classic> 154](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/154.jpeg) ![<pkmn-classic> 155](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/155.jpeg) ![<pkmn-classic> 156](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/156.jpeg) ![<pkmn-classic> 157](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/157.jpeg) ![<pkmn-classic> 158](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/158.jpeg) ![<pkmn-classic> 159](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/159.jpeg) ![<pkmn-classic> 160](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/160.jpeg) ![<pkmn-classic> 161](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/161.jpeg) ![<pkmn-classic> 162](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/162.jpeg) ![<pkmn-classic> 163](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/163.jpeg) ![<pkmn-classic> 164](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/164.jpeg) ![<pkmn-classic> 165](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/165.jpeg) ![<pkmn-classic> 166](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/166.jpeg) ![<pkmn-classic> 167](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/167.jpeg) ![<pkmn-classic> 168](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/168.jpeg) ![<pkmn-classic> 169](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/169.jpeg) ![<pkmn-classic> 170](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/170.jpeg) ![<pkmn-classic> 171](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/171.jpeg) ![<pkmn-classic> 172](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/172.jpeg) ![<pkmn-classic> 173](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/173.jpeg) ![<pkmn-classic> 174](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/174.jpeg) ![<pkmn-classic> 175](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/175.jpeg) ![<pkmn-classic> 176](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/176.jpeg) ![<pkmn-classic> 177](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/177.jpeg) ![<pkmn-classic> 178](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/178.jpeg) ![<pkmn-classic> 179](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/179.jpeg) ![<pkmn-classic> 180](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/180.jpeg) ![<pkmn-classic> 181](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/181.jpeg) ![<pkmn-classic> 182](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/182.jpeg) ![<pkmn-classic> 183](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/183.jpeg) ![<pkmn-classic> 184](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/184.jpeg) ![<pkmn-classic> 185](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/185.jpeg) ![<pkmn-classic> 186](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/186.jpeg) ![<pkmn-classic> 187](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/187.jpeg) ![<pkmn-classic> 188](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/188.jpeg) ![<pkmn-classic> 189](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/189.jpeg) ![<pkmn-classic> 190](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/190.jpeg) ![<pkmn-classic> 191](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/191.jpeg) ![<pkmn-classic> 192](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/192.jpeg) ![<pkmn-classic> 193](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/193.jpeg) ![<pkmn-classic> 194](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/194.jpeg) ![<pkmn-classic> 195](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/195.jpeg) ![<pkmn-classic> 196](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/196.jpeg) ![<pkmn-classic> 197](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/197.jpeg) ![<pkmn-classic> 198](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/198.jpeg) ![<pkmn-classic> 199](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/199.jpeg) ![<pkmn-classic> 200](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/200.jpeg) ![<pkmn-classic> 201](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/201.jpeg) ![<pkmn-classic> 202](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/202.jpeg) ![<pkmn-classic> 203](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/203.jpeg) ![<pkmn-classic> 204](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/204.jpeg) ![<pkmn-classic> 205](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/205.jpeg) ![<pkmn-classic> 206](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/206.jpeg) ![<pkmn-classic> 207](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/207.jpeg) ![<pkmn-classic> 208](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/208.jpeg) ![<pkmn-classic> 209](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/209.jpeg) ![<pkmn-classic> 210](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/210.jpeg) ![<pkmn-classic> 211](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/211.jpeg) ![<pkmn-classic> 212](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/212.jpeg) ![<pkmn-classic> 213](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/213.jpeg) ![<pkmn-classic> 214](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/214.jpeg) ![<pkmn-classic> 215](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/215.jpeg) ![<pkmn-classic> 216](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/216.jpeg) ![<pkmn-classic> 217](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/217.jpeg) ![<pkmn-classic> 218](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/218.jpeg) ![<pkmn-classic> 219](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/219.jpeg) ![<pkmn-classic> 220](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/220.jpeg) ![<pkmn-classic> 221](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/221.jpeg) ![<pkmn-classic> 222](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/222.jpeg) ![<pkmn-classic> 223](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/223.jpeg) ![<pkmn-classic> 224](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/224.jpeg) ![<pkmn-classic> 225](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/225.jpeg) ![<pkmn-classic> 226](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/226.jpeg) ![<pkmn-classic> 227](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/227.jpeg) ![<pkmn-classic> 228](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/228.jpeg) ![<pkmn-classic> 229](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/229.jpeg) ![<pkmn-classic> 230](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/230.jpeg) ![<pkmn-classic> 231](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/231.jpeg) ![<pkmn-classic> 232](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/232.jpeg) ![<pkmn-classic> 233](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/233.jpeg) ![<pkmn-classic> 234](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/234.jpeg) ![<pkmn-classic> 235](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/235.jpeg) ![<pkmn-classic> 236](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/236.jpeg) ![<pkmn-classic> 237](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/237.jpeg) ![<pkmn-classic> 238](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/238.jpeg) ![<pkmn-classic> 239](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/239.jpeg) ![<pkmn-classic> 240](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/240.jpeg) ![<pkmn-classic> 241](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/241.jpeg) ![<pkmn-classic> 242](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/242.jpeg) ![<pkmn-classic> 243](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/243.jpeg) ![<pkmn-classic> 244](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/244.jpeg) ![<pkmn-classic> 245](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/245.jpeg) ![<pkmn-classic> 246](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/246.jpeg) ![<pkmn-classic> 247](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/247.jpeg) ![<pkmn-classic> 248](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/248.jpeg) ![<pkmn-classic> 249](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/249.jpeg) ![<pkmn-classic> 250](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/250.jpeg) ![<pkmn-classic> 251](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/251.jpeg) ![<pkmn-classic> 252](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/252.jpeg) ![<pkmn-classic> 253](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/253.jpeg) ![<pkmn-classic> 254](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/254.jpeg) ![<pkmn-classic> 255](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/255.jpeg) ![<pkmn-classic> 256](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/256.jpeg) ![<pkmn-classic> 257](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/257.jpeg) ![<pkmn-classic> 258](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/258.jpeg) ![<pkmn-classic> 259](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/259.jpeg) ![<pkmn-classic> 260](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/260.jpeg) ![<pkmn-classic> 261](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/261.jpeg) ![<pkmn-classic> 262](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/262.jpeg) ![<pkmn-classic> 263](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/263.jpeg) ![<pkmn-classic> 264](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/264.jpeg) ![<pkmn-classic> 265](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/265.jpeg) ![<pkmn-classic> 266](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/266.jpeg) ![<pkmn-classic> 267](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/267.jpeg) ![<pkmn-classic> 268](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/268.jpeg) ![<pkmn-classic> 269](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/269.jpeg) ![<pkmn-classic> 270](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/270.jpeg) ![<pkmn-classic> 271](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/271.jpeg) ![<pkmn-classic> 272](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/272.jpeg) ![<pkmn-classic> 273](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/273.jpeg) ![<pkmn-classic> 274](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/274.jpeg) ![<pkmn-classic> 275](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/275.jpeg) ![<pkmn-classic> 276](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/276.jpeg) ![<pkmn-classic> 277](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/277.jpeg) ![<pkmn-classic> 278](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/278.jpeg) ![<pkmn-classic> 279](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/279.jpeg) ![<pkmn-classic> 280](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/280.jpeg) ![<pkmn-classic> 281](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/281.jpeg) ![<pkmn-classic> 282](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/282.jpeg) ![<pkmn-classic> 283](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/283.jpeg) ![<pkmn-classic> 284](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/284.jpeg) ![<pkmn-classic> 285](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/285.jpeg) ![<pkmn-classic> 286](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/286.jpeg) ![<pkmn-classic> 287](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/287.jpeg) ![<pkmn-classic> 288](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/288.jpeg) ![<pkmn-classic> 289](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/289.jpeg) ![<pkmn-classic> 290](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/290.jpeg) ![<pkmn-classic> 291](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/291.jpeg) ![<pkmn-classic> 292](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/292.jpeg) ![<pkmn-classic> 293](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/293.jpeg) ![<pkmn-classic> 294](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/294.jpeg) ![<pkmn-classic> 295](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/295.jpeg) ![<pkmn-classic> 296](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/296.jpeg) ![<pkmn-classic> 297](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/297.jpeg) ![<pkmn-classic> 298](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/298.jpeg) ![<pkmn-classic> 299](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/299.jpeg) ![<pkmn-classic> 300](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/300.jpeg) ![<pkmn-classic> 301](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/301.jpeg) ![<pkmn-classic> 302](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/302.jpeg) ![<pkmn-classic> 303](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/303.jpeg) ![<pkmn-classic> 304](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/304.jpeg) ![<pkmn-classic> 305](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/305.jpeg) ![<pkmn-classic> 306](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/306.jpeg) ![<pkmn-classic> 307](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/307.jpeg) ![<pkmn-classic> 308](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/308.jpeg) ![<pkmn-classic> 309](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/309.jpeg) ![<pkmn-classic> 310](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/310.jpeg) ![<pkmn-classic> 311](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/311.jpeg) ![<pkmn-classic> 312](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/312.jpeg) ![<pkmn-classic> 313](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/313.jpeg) ![<pkmn-classic> 314](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/314.jpeg) ![<pkmn-classic> 315](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/315.jpeg) ![<pkmn-classic> 316](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/316.jpeg) ![<pkmn-classic> 317](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/317.jpeg) ![<pkmn-classic> 318](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/318.jpeg) ![<pkmn-classic> 319](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/319.jpeg) ![<pkmn-classic> 320](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/320.jpeg) ![<pkmn-classic> 321](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/321.jpeg) ![<pkmn-classic> 322](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/322.jpeg) ![<pkmn-classic> 323](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/323.jpeg) ![<pkmn-classic> 324](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/324.jpeg) ![<pkmn-classic> 325](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/325.jpeg) ![<pkmn-classic> 326](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/326.jpeg) ![<pkmn-classic> 327](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/327.jpeg) ![<pkmn-classic> 328](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/328.jpeg) ![<pkmn-classic> 329](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/329.jpeg) ![<pkmn-classic> 330](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/330.jpeg) ![<pkmn-classic> 331](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/331.jpeg) ![<pkmn-classic> 332](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/332.jpeg) ![<pkmn-classic> 333](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/333.jpeg) ![<pkmn-classic> 334](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/334.jpeg) ![<pkmn-classic> 335](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/335.jpeg) ![<pkmn-classic> 336](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/336.jpeg) ![<pkmn-classic> 337](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/337.jpeg) ![<pkmn-classic> 338](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/338.jpeg) ![<pkmn-classic> 339](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/339.jpeg) ![<pkmn-classic> 340](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/340.jpeg) ![<pkmn-classic> 341](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/341.jpeg) ![<pkmn-classic> 342](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/342.jpeg) ![<pkmn-classic> 343](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/343.jpeg) ![<pkmn-classic> 344](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/344.jpeg) ![<pkmn-classic> 345](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/345.jpeg) ![<pkmn-classic> 346](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/346.jpeg) ![<pkmn-classic> 347](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/347.jpeg) ![<pkmn-classic> 348](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/348.jpeg) ![<pkmn-classic> 349](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/349.jpeg) ![<pkmn-classic> 350](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/350.jpeg) ![<pkmn-classic> 351](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/351.jpeg) ![<pkmn-classic> 352](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/352.jpeg) ![<pkmn-classic> 353](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/353.jpeg) ![<pkmn-classic> 354](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/354.jpeg) ![<pkmn-classic> 355](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/355.jpeg) ![<pkmn-classic> 356](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/356.jpeg) ![<pkmn-classic> 357](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/357.jpeg) ![<pkmn-classic> 358](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/358.jpeg) ![<pkmn-classic> 359](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/359.jpeg) ![<pkmn-classic> 360](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/360.jpeg) ![<pkmn-classic> 361](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/361.jpeg) ![<pkmn-classic> 362](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/362.jpeg) ![<pkmn-classic> 363](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/363.jpeg) ![<pkmn-classic> 364](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/364.jpeg) ![<pkmn-classic> 365](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/365.jpeg) ![<pkmn-classic> 366](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/366.jpeg) ![<pkmn-classic> 367](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/367.jpeg) ![<pkmn-classic> 368](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/368.jpeg) ![<pkmn-classic> 369](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/369.jpeg) ![<pkmn-classic> 370](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/370.jpeg) ![<pkmn-classic> 371](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/371.jpeg) ![<pkmn-classic> 372](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/372.jpeg) ![<pkmn-classic> 373](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/373.jpeg) ![<pkmn-classic> 374](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/374.jpeg) ![<pkmn-classic> 375](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/375.jpeg) ![<pkmn-classic> 376](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/376.jpeg) ![<pkmn-classic> 377](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/377.jpeg) ![<pkmn-classic> 378](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/378.jpeg) ![<pkmn-classic> 379](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/379.jpeg) ![<pkmn-classic> 380](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/380.jpeg) ![<pkmn-classic> 381](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/381.jpeg) ![<pkmn-classic> 382](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/382.jpeg) ![<pkmn-classic> 383](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/383.jpeg) ![<pkmn-classic> 384](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/384.jpeg) ![<pkmn-classic> 385](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/385.jpeg) ![<pkmn-classic> 386](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/386.jpeg) ![<pkmn-classic> 387](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/387.jpeg) ![<pkmn-classic> 388](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/388.jpeg) ![<pkmn-classic> 389](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/389.jpeg) ![<pkmn-classic> 390](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/390.jpeg) ![<pkmn-classic> 391](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/391.jpeg) ![<pkmn-classic> 392](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/392.jpeg) ![<pkmn-classic> 393](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/393.jpeg) ![<pkmn-classic> 394](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/394.jpeg) ![<pkmn-classic> 395](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/395.jpeg) ![<pkmn-classic> 396](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/396.jpeg) ![<pkmn-classic> 397](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/397.jpeg) ![<pkmn-classic> 398](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/398.jpeg) ![<pkmn-classic> 399](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/399.jpeg) ![<pkmn-classic> 400](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/400.jpeg) ![<pkmn-classic> 401](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/401.jpeg) ![<pkmn-classic> 402](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/402.jpeg) ![<pkmn-classic> 403](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/403.jpeg) ![<pkmn-classic> 404](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/404.jpeg) ![<pkmn-classic> 405](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/405.jpeg) ![<pkmn-classic> 406](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/406.jpeg) ![<pkmn-classic> 407](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/407.jpeg) ![<pkmn-classic> 408](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/408.jpeg) ![<pkmn-classic> 409](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/409.jpeg) ![<pkmn-classic> 410](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/410.jpeg) ![<pkmn-classic> 411](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/411.jpeg) ![<pkmn-classic> 412](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/412.jpeg) ![<pkmn-classic> 413](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/413.jpeg) ![<pkmn-classic> 414](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/414.jpeg) ![<pkmn-classic> 415](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/415.jpeg) ![<pkmn-classic> 416](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/416.jpeg) ![<pkmn-classic> 417](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/417.jpeg) ![<pkmn-classic> 418](https://huggingface.co/sd-concepts-library/pokemon-classic-artwork/resolve/main/concept_images/418.jpeg)
Ateeb/QA
[ "pytorch", "distilbert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: dfm794/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Ateeb/asd
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - hi license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Large V2 finetuned Hindi results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: common_voice_11_0 type: mozilla-foundation/common_voice_11_0 config: hi split: test args: hi metrics: - name: Wer type: wer value: 10.72246131306657 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Large V2 finetuned Hindi This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on the common_voice_11_0 dataset. It achieves the following results on the evaluation set: - Loss: 0.2043 - Wer: 10.7225 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - training_steps: 1000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0153 | 3.18 | 1000 | 0.2043 | 10.7225 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
Augustvember/test
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2022-12-12T06:08:43Z
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-PolicyGradient results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 49.30 +/- 8.37 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 5 of the Deep Reinforcement Learning Class: https://github.com/huggingface/deep-rl-class/tree/main/unit5