modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
AdapterHub/bert-base-uncased-pf-scicite
[ "bert", "en", "dataset:scicite", "arxiv:2104.08247", "adapter-transformers", "text-classification" ]
text-classification
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_keras_callback model-index: - name: YSKartal/scibert_scivocab_uncased-finetuned-2-ref_disam results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # YSKartal/scibert_scivocab_uncased-finetuned-2-ref_disam This model is a fine-tuned version of [allenai/scibert_scivocab_uncased](https://huggingface.co/allenai/scibert_scivocab_uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 4.3345 - Validation Loss: 5.5243 - Train Accuracy: 0.1562 - Epoch: 3 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 16308, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 6.8503 | 6.9170 | 0.0323 | 0 | | 5.7494 | 6.3086 | 0.0738 | 1 | | 4.9365 | 5.8427 | 0.1206 | 2 | | 4.3345 | 5.5243 | 0.1562 | 3 | ### Framework versions - Transformers 4.27.4 - TensorFlow 2.12.0 - Datasets 2.11.0 - Tokenizers 0.13.2
Adinda/Adinda
[ "license:artistic-2.0" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -1.48 +/- 0.45 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AethiQs-Max/aethiqs-base_bertje-data_rotterdam-epochs_30-epoch_30
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 277.42 +/- 22.19 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters
Akash7897/fill_mask_model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-27T11:28:50Z
--- license: openrail datasets: - NbAiLab/norwegian-alpaca library_name: peft language: - 'no' - nb pipeline_tag: text-generation --- # NB-Alpaca-LoRA 7B This is an Norwegian adapter generated by fine-tuning LLaMA-7B on a [Norwegian Alpaca](https://huggingface.co/datasets/NbAiLab/norwegian-alpaca) dataset. ## Usage ```python from peft import PeftModel from transformers import LLaMATokenizer, LLaMAForCausalLM base_model = "decapoda-research/llama-7b-hf" tokenizer = LLaMATokenizer.from_pretrained(base_model) model = LLaMAForCausalLM.from_pretrained( base_model, load_in_8bit=True, device_map="auto", ) model = PeftModel.from_pretrained(model, "NbAiLab/nb-alpaca-lora-7b") ``` For generation, the promtp still needs the English template: ```python from transformers import pipeline pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) instruction = "Skriv en e-post der du ønsker velkommen til en ny medarbeider ved navn Svein" pipe.generate(f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Response: """) # Kjære Svein, # # Velkommen til vårt team! Vi er så glade for å ha deg med oss. Vi ser frem til å hjelpe deg med å nå dine mål og oppnå dine drømmer. # # Vi er alltid tilgjengelige hvis du har noen spørsmål eller ønsker å diskutere noen av våre prosjekter. # # Vi ser frem til å jobbe sammen med deg! # # Med vennlig ``` ## Data The dataset is a translation to Norwegian Bokmål of [alpaca_data_cleaned.json](https://github.com/tloen/alpaca-lora/blob/main/alpaca_data_cleaned.json) (a clean version of the [Alpaca dataset made at Stanford](https://huggingface.co/datasets/tatsu-lab/alpaca)) using OpenAI's `gpt-3.5-turbo` model. We translated using a full-sample prompt instead of per strings, which resulted in more coherent tuples of `(instruction, input, output)` and costed around $60.0. **This dataset cannot be used to create models that compete in any way with OpenAI.** ## Finetuning To fine-tune the LLaMA model we used the code available on [Alpaca Lora](https://github.com/tloen/alpaca-lora), which provides code to finetune the LLaMA model using PEFT from Hugging Face. We run finetuning for 3 epochs using sequence length of 512 with no gradient accumulation on a single A6000 with 48GB VRAM for 12 hours on top of LLaMA-7B. ## Example outputs Stanford Alpaca: ``` Instruction: Kom med en kreativ unnskyldning for å si at jeg ikke trenger å gå på festen. Response: I'm sorry, but I don't need to go to the party. ``` NB-Alpaca-LoRA-7B: ``` Instruction: Kom med en kreativ unnskyldning for å si at jeg ikke trenger å gå på festen. Response: Jeg beklager at jeg ikke kan gå på festen, men jeg tror at det er bedre at jeg ikke gjør det. ``` [NB-GTP-J-6B-Alpaca](https://huggingface.co/NbAiLab/nb-gpt-j-6B-alpaca): ``` Instruction: Kom med en kreativ unnskyldning for å si at jeg ikke trenger å gå på festen. Response: Jeg beklager, men jeg har en avtale jeg ikke kan avlyse. ``` Stanford Alpaca: ``` Instruction: Hvordan kan jeg starte en karriere som dataforsker? Skriv i listeform. Response: Starting a career as a data scientist requires a combination of education, experience, and skills. 1. Obtain a Bachelor's or Master's degree in computer science, statistics, mathematics, or a related field. 2. Gain practical experience through internships or part-time jobs. 3. Learn programming languages such as Python, R, and SQL. 4. Develop strong analytical and problem-solving skills. 5. Build a portfolio of projects to showcase your work. 6. Network with other data scientists to learn ``` NB-Alpaca-LoRA-7B: ``` Instruction: Hvordan kan jeg starte en karriere som dataforsker? Skriv i listeform. Response: 1. Søk etter en universitetsgrad i datavitenskap, matematikk, informatikk eller en relatert fag. 2. Søk etter en jobb som dataforsker eller dataanalytiker. 3. Utvikle dine ferdigheter i datavitenskap, matematikk, informatikk eller en relatert fag. 4. Utvikle dine ferdigheter innen dataanalyse. ``` [NB-GTP-J-6B-Alpaca](https://huggingface.co/NbAiLab/nb-gpt-j-6B-alpaca): ``` Instruction: Hvordan kan jeg starte en karriere som dataforsker? Skriv i listeform. Response: 1. Lær programmeringsspråk som Python og Java. 2. Skaff deg en grad i informatikk, datavitenskap eller relaterte felt. 3. Bli sertifisert innen datamaskinsyn, maskinlæring eller andre relaterte felt. 4. Bygg nettverk med andre dataforskere. 5. Delta på konferanser og kurs for å holde deg oppdatert på de siste utviklingene innen feltet. ``` You can test it using the eval notebook [here](https://colab.research.google.com/github/22-hours/cabrita/blob/main/notebooks/cabrita-lora.ipynb). ## References - [LLaMA](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) - [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) - [Norwegian Alpaca](https://huggingface.co/datasets/NbAiLab/norwegian-alpaca) - [Alpaca LoRA](https://github.com/tloen/alpaca-lora) - [ChatGPT](https://openai.com/blog/chatgpt) - [Hugging Face](https://huggingface.co/) ## Hardware Requirements For training we have used an A6000 48GB VRAM Nvidia GPU. For eval, you can use a T4.
Akash7897/gpt2-wikitext2
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "generated_from_trainer", "license:mit" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: BERT-SA results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # BERT-SA This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: None - training_precision: float32 ### Training results ### Framework versions - Transformers 4.24.0 - TensorFlow 2.10.0 - Tokenizers 0.11.0
Akash7897/my-newtokenizer
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-27T11:32:32Z
--- language: - en - cs - multilingual license: cc-by-sa-4.0 tags: - bicleaner-ai tasks: - text-classification --- # Bicleaner AI full model for en-cs Bicleaner AI is a tool that aims at detecting noisy sentence pairs in a parallel corpus. It indicates the likelihood of a pair of sentences being mutual translations (with a value near to 1) or not (with a value near to 0). Sentence pairs considered very noisy are scored with 0. Find out at our repository for further instructions on how to use it: https://github.com/bitextor/bicleaner-ai
Akashamba/distilbert-base-uncased-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-27T11:38:17Z
--- language: - en - de - multilingual license: cc-by-sa-4.0 tags: - bicleaner-ai tasks: - text-classification --- # Bicleaner AI full model for en-de Bicleaner AI is a tool that aims at detecting noisy sentence pairs in a parallel corpus. It indicates the likelihood of a pair of sentences being mutual translations (with a value near to 1) or not (with a value near to 0). Sentence pairs considered very noisy are scored with 0. Find out at our repository for further instructions on how to use it: https://github.com/bitextor/bicleaner-ai
Akashpb13/Central_kurdish_xlsr
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "ckb", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "model_for_talk", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- language: - en - da - multilingual license: cc-by-sa-4.0 tags: - bicleaner-ai tasks: - text-classification --- # Bicleaner AI full model for en-da Bicleaner AI is a tool that aims at detecting noisy sentence pairs in a parallel corpus. It indicates the likelihood of a pair of sentences being mutual translations (with a value near to 1) or not (with a value near to 0). Sentence pairs considered very noisy are scored with 0. Find out at our repository for further instructions on how to use it: https://github.com/bitextor/bicleaner-ai
AkshaySg/langid
[ "multilingual", "dataset:VoxLingua107", "speechbrain", "audio-classification", "embeddings", "Language", "Identification", "pytorch", "ECAPA-TDNN", "TDNN", "VoxLingua107", "license:apache-2.0" ]
audio-classification
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- language: - en - sv - multilingual license: cc-by-sa-4.0 tags: - bicleaner-ai tasks: - text-classification --- # Bicleaner AI full model for en-sv Bicleaner AI is a tool that aims at detecting noisy sentence pairs in a parallel corpus. It indicates the likelihood of a pair of sentences being mutual translations (with a value near to 1) or not (with a value near to 0). Sentence pairs considered very noisy are scored with 0. Find out at our repository for further instructions on how to use it: https://github.com/bitextor/bicleaner-ai
AlbertHSU/ChineseFoodBert
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
null
# Vocabulary Trimmed [lmqg/mbart-large-cc25-esquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-esquad-qa): `vocabtrimmer/mbart-large-cc25-esquad-qa-trimmed-es` This model is a trimmed version of [lmqg/mbart-large-cc25-esquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-esquad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-esquad-qa | vocabtrimmer/mbart-large-cc25-esquad-qa-trimmed-es | |:---------------------------|:----------------------------------|:-----------------------------------------------------| | parameter_size_full | 610,852,864 | 443,997,184 | | parameter_size_embedding | 512,057,344 | 178,345,984 | | vocab_size | 250,028 | 87,083 | | compression_rate_full | 100.0 | 72.68 | | compression_rate_embedding | 100.0 | 34.83 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|:--------------------|----------------:| | es | vocabtrimmer/mc4_validation | text | es | validation | | 2 |
Aleksandar/distilbert-srb-base-cased-oscar
[ "pytorch", "distilbert", "fill-mask", "transformers", "generated_from_trainer", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: bigscience-bloom-rail-1.0 --- We finetune bloom-7b1 using LoRA (Low-rank adatation) with CodeAlpaca dataset from https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k. So we name the trained weight as bloom-7b1-lora-codeaplaca20k.
Aleksandar1932/distilgpt2-rock
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- language: - es - ca - multilingual license: cc-by-sa-4.0 tags: - bicleaner-ai tasks: - text-classification --- # Bicleaner AI full model for es-ca Bicleaner AI is a tool that aims at detecting noisy sentence pairs in a parallel corpus. It indicates the likelihood of a pair of sentences being mutual translations (with a value near to 1) or not (with a value near to 0). Sentence pairs considered very noisy are scored with 0. Find out at our repository for further instructions on how to use it: https://github.com/bitextor/bicleaner-ai
Aleksandra/distilbert-base-uncased-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - setfit - sentence-transformers - text-classification pipeline_tag: text-classification --- # /content/drive/MyDrive/setfit_tatwa_email_classification/model This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("/content/drive/MyDrive/setfit_tatwa_email_classification/model") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
AlexMaclean/sentence-compression
[ "pytorch", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
2023-03-27T13:25:47Z
--- license: gpl-3.0 datasets: - BelleGroup/generated_train_0.5M_CN - JosephusCheung/GuanacoDataset language: - zh tags: - alpaca - Chinese-Vicuna - llama --- Checkpoint of Chinese-Vicuna model (https://github.com/Facico/Chinese-Vicuna) finetuned on belle0.5M+guanaco(3epoch). The model is based on llama7B, so it can be loaded on applications related to llama7B
Alexander-Learn/bert-finetuned-squad-accelerate
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### dreambooth_NgAndrew_BilluA Dreambooth model trained by RafiulCV with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
Alexandru/creative_copilot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 datasets: - squad tags: - generated_from_trainers ---
AlexeyYazev/my-awesome-model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Alfia/anekdotes
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1126635834747305989/j_LwXFNt_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/830300998203633664/ae-ffzuX_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Vinay & Vinay Gupta/Hexayurt</div> <div style="text-align: center; font-size: 14px;">@hexayurt-leashless</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Vinay & Vinay Gupta/Hexayurt. | Data | Vinay | Vinay Gupta/Hexayurt | | --- | --- | --- | | Tweets downloaded | 3221 | 3186 | | Retweets | 1260 | 348 | | Short tweets | 151 | 43 | | Tweets kept | 1810 | 2795 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/4yf52sif/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @hexayurt-leashless's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/urnb3qco) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/urnb3qco/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/hexayurt-leashless') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
AliPotter24/a
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: balanced-augmented-mlroberta-gest-pred-seqeval-partialmatch results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # balanced-augmented-mlroberta-gest-pred-seqeval-partialmatch This model is a fine-tuned version of [xlm-roberta-large-finetuned-conll03-english](https://huggingface.co/xlm-roberta-large-finetuned-conll03-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.9051 - Precision: 0.8395 - Recall: 0.8114 - F1: 0.8189 - Accuracy: 0.7987 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 3.1888 | 1.0 | 32 | 2.4935 | 0.2783 | 0.1689 | 0.1470 | 0.3131 | | 2.25 | 2.0 | 64 | 1.6646 | 0.6169 | 0.5430 | 0.5416 | 0.5888 | | 1.4955 | 3.0 | 96 | 1.2759 | 0.7516 | 0.6600 | 0.6688 | 0.6586 | | 0.9512 | 4.0 | 128 | 1.0307 | 0.8052 | 0.7394 | 0.7513 | 0.7147 | | 0.6053 | 5.0 | 160 | 0.9993 | 0.7975 | 0.7757 | 0.7724 | 0.7398 | | 0.4064 | 6.0 | 192 | 0.9347 | 0.8335 | 0.7939 | 0.7988 | 0.7732 | | 0.2802 | 7.0 | 224 | 0.9249 | 0.8285 | 0.7970 | 0.8013 | 0.7818 | | 0.2062 | 8.0 | 256 | 0.9051 | 0.8395 | 0.8114 | 0.8189 | 0.7987 | | 0.1372 | 9.0 | 288 | 0.9771 | 0.8447 | 0.7922 | 0.8079 | 0.7910 | | 0.1 | 10.0 | 320 | 1.0232 | 0.8246 | 0.8086 | 0.8042 | 0.7974 | | 0.0815 | 11.0 | 352 | 1.0103 | 0.8391 | 0.8173 | 0.8209 | 0.8024 | | 0.0586 | 12.0 | 384 | 1.0424 | 0.8366 | 0.7980 | 0.8085 | 0.7932 | | 0.0534 | 13.0 | 416 | 1.1246 | 0.8318 | 0.8070 | 0.8126 | 0.7969 | | 0.0412 | 14.0 | 448 | 1.0816 | 0.8338 | 0.8186 | 0.8167 | 0.8028 | | 0.0346 | 15.0 | 480 | 1.1178 | 0.8277 | 0.8222 | 0.8182 | 0.8037 | | 0.0312 | 16.0 | 512 | 1.1570 | 0.8387 | 0.8237 | 0.8219 | 0.8037 | | 0.0268 | 17.0 | 544 | 1.1548 | 0.8375 | 0.8279 | 0.8240 | 0.8028 | | 0.0221 | 18.0 | 576 | 1.1514 | 0.8316 | 0.8149 | 0.8169 | 0.8005 | | 0.0215 | 19.0 | 608 | 1.1698 | 0.8351 | 0.8221 | 0.8204 | 0.8037 | | 0.0213 | 20.0 | 640 | 1.1691 | 0.8311 | 0.8196 | 0.8166 | 0.8015 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
AliReza/distilbert-emotion
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: taxi-v3-initial results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="arbts/taxi-v3-initial", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Alicanke/Wyau
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: artbreguez/ML-Agents-SnowballTarget 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Alireza-rw/testbot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - text-to-image widget: - text: bloomify1 --- ### bloomify1 Dreambooth model trained by ckao1030 with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training) with the v1-5 base model You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts! Sample pictures of: bloomify1 (use that on your prompt) ![bloomify1 0](https://huggingface.co/ckao1030/bloomify1/resolve/main/concept_images/bloomify1_%281%29.jpg)![bloomify1 1](https://huggingface.co/ckao1030/bloomify1/resolve/main/concept_images/bloomify1_%282%29.jpg)![bloomify1 2](https://huggingface.co/ckao1030/bloomify1/resolve/main/concept_images/bloomify1_%283%29.jpg)![bloomify1 3](https://huggingface.co/ckao1030/bloomify1/resolve/main/concept_images/bloomify1_%284%29.jpg)![bloomify1 4](https://huggingface.co/ckao1030/bloomify1/resolve/main/concept_images/bloomify1_%285%29.jpg)![bloomify1 5](https://huggingface.co/ckao1030/bloomify1/resolve/main/concept_images/bloomify1_%286%29.jpg)
Alireza1044/albert-base-v2-cola
[ "pytorch", "tensorboard", "albert", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
2023-03-27T13:52:23Z
--- language: - en - nn - multilingual license: cc-by-sa-4.0 tags: - bicleaner-ai tasks: - text-classification --- # Bicleaner AI full model for en-nn Bicleaner AI is a tool that aims at detecting noisy sentence pairs in a parallel corpus. It indicates the likelihood of a pair of sentences being mutual translations (with a value near to 1) or not (with a value near to 0). Sentence pairs considered very noisy are scored with 0. Find out at our repository for further instructions on how to use it: https://github.com/bitextor/bicleaner-ai
Alireza1044/albert-base-v2-mrpc
[ "pytorch", "tensorboard", "albert", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
204
null
--- license: openrail datasets: - bertin-project/alpaca-spanish library_name: peft language: - es pipeline_tag: text-generation --- # BERTIN-Alpaca-LoRA 7B This is a Spanish adapter generated by fine-tuning LLaMA-7B on a [Spanish Alpaca](https://huggingface.co/datasets/bertin-project/alpaca-spanish) dataset. ## Usage ```python from peft import PeftModel from transformers import LLaMATokenizer, LLaMAForCausalLM, GenerationConfig base_model = "decapoda-research/llama-7b-hf" tokenizer = LLaMATokenizer.from_pretrained(base_model) model = LLaMAForCausalLM.from_pretrained( base_model, load_in_8bit=True, device_map="auto", ) model = PeftModel.from_pretrained(model, "bertin-project/bertin-alpaca-lora-7b") ``` Until `PEFT` is fully supported in Hugginface's pipelines, for generation we can either consolidate the LoRA weights into the LLaMA model weights, or use the adapter's `generate()` method. Remember that the prompt still needs the English template: ```python # Generate responses def generate(instruction, input=None): if input: prompt = f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. # noqa: E501 ### Instruction: {instruction} ### Input: {input} ### Response: """ else: prompt = f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. # noqa: E501 ### Instruction: {instruction} ### Response: """ inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].cuda() generation_output = model.generate( input_ids=input_ids, generation_config=GenerationConfig(temperature=0.2, top_p=0.75, num_beams=4), return_dict_in_generate=True, output_scores=True, max_new_tokens=256 ) for seq in generation_output.sequences: output = tokenizer.decode(seq) print(output.split("### Response:")[1].strip()) generate("Escribe un correo electrónico dando la bienvenida a un nuevo empleado llamado Manolo.") # Estimado Manolo, # # ¡Bienvenido a nuestro equipo! Estamos muy contentos de que hayas decidido unirse a nosotros y estamos ansiosos por comenzar a trabajar juntos. # # Nos gustaría darte las gracias por tu interés en nuestro equipo y esperamos que tengas un gran tiempo aquí. # # Si tienes alguna pregunta o duda, no dudes en contactarnos. # # Atentamente, # Equipo de [Nombre del Departamento] ``` ## Data The dataset is a translation to Spanish of [alpaca_data_cleaned.json](https://github.com/tloen/alpaca-lora/blob/main/alpaca_data_cleaned.json) (a clean version of the [Alpaca dataset made at Stanford](https://huggingface.co/datasets/tatsu-lab/alpaca)) using OpenAI's `gpt-3.5-turbo` model. We translated using a full-sample prompt instead of per strings, which resulted in more coherent tuples of `(instruction, input, output)` and costed around $60.0. **This dataset cannot be used to create models that compete in any way with OpenAI.** ## Finetuning To fine-tune the LLaMA model we used the code available on [Alpaca Lora](https://github.com/tloen/alpaca-lora), which provides code to finetune the LLaMA model using PEFT from Hugging Face. We run finetuning for 3 epochs using sequence length of 512 with no gradient accumulation on a single A6000 with 48GB VRAM for 12 hours on top of LLaMA-7B. ## Example outputs Stanford Alpaca: ``` Instruction: Inventa una excusa creativa para decir que no tengo que ir a la fiesta. Response: I'm sorry, I can't make it to the party. I have to stay home and take care of my sick pet. ``` BERTIN Alpaca LoRA: ``` Instruction: Inventa una excusa creativa para decir que no tengo que ir a la fiesta. Response: No puedo ir a la fiesta porque tengo que terminar un proyecto urgente. ``` Stanford Alpaca: ``` Instruction: ¿Cómo puedo comenzar una carrera como científico de datos? Responde con una lista de pasos a seguir. Response: To begin a career as a data scientist, there are a few steps to take. First, you should have a bachelor's degree in a STEM-related field, such as computer science, mathematics, statistics, or engineering. You should also have experience with programming languages such as Python, R, or Java. Additionally, it is helpful to have knowledge of machine learning, data visualization, and data analytics. Finally, it is important to stay up to date with the latest trends and technologies in the field. ``` BERTIN Alpaca LoRA: ``` Instruction: ¿Cómo puedo comenzar una carrera como científico de datos? Responde con una lista de pasos a seguir. Response: Para comenzar una carrera como científico de datos, hay varios pasos a seguir. Primero, obtener una licenciatura en matemáticas, informática o ciencias de la computación. Luego, obtener una maestría o doctorado en ciencias de la computación, informática o alguna otra área relacionada. Finalmente, obtener experiencia en el campo trabajando en proyectos de investigación o desarrollando aplicaciones. ``` You can test it using the eval notebook [here](https://colab.research.google.com/github/22-hours/cabrita/blob/main/notebooks/cabrita-lora.ipynb). ## References - [LLaMA](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) - [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) - [BERTIN Alpaca](https://huggingface.co/datasets/bertin-project/alpaca-spanish) - [Alpaca LoRA](https://github.com/tloen/alpaca-lora) - [ChatGPT](https://openai.com/blog/chatgpt) - [Hugging Face](https://huggingface.co/) ## Hardware Requirements For training we have used an A6000 48GB VRAM Nvidia GPU. For eval, you can use a T4.
Alireza1044/albert-base-v2-qnli
[ "pytorch", "tensorboard", "albert", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
41
2023-03-27T14:00:21Z
--- license: gpl-3.0 datasets: - BelleGroup/generated_train_0.5M_CN - JosephusCheung/GuanacoDataset language: - zh tags: - alpaca - Chinese-Vicuna - llama --- Checkpoint of Chinese-Vicuna model (https://github.com/Facico/Chinese-Vicuna) finetuned on belle0.5M+guanaco(about 1.5 epoch). The model is based on llama7B, so it can be loaded on applications related to llama7B
Alireza1044/albert-base-v2-qqp
[ "pytorch", "albert", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
37
2023-03-27T14:00:27Z
--- # For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1 # Doc / guide: https://huggingface.co/docs/hub/model-cards {} --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Alireza1044/albert-base-v2-rte
[ "pytorch", "tensorboard", "albert", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
2023-03-27T14:01:19Z
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -1.69 +/- 0.51 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Alireza1044/albert-base-v2-sst2
[ "pytorch", "tensorboard", "albert", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
52
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Alireza1044/bert_classification_lm
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
35
2023-03-27T14:02:48Z
--- license: gpl-3.0 datasets: - BelleGroup/generated_train_0.5M_CN - JosephusCheung/GuanacoDataset language: - zh tags: - alpaca - Chinese-Vicuna - llama --- Checkpoint of Chinese-Vicuna model (https://github.com/Facico/Chinese-Vicuna) finetuned on belle0.5M+guanaco(about 0.75epoch). The model is based on llama7B, so it can be loaded on applications related to llama7B
Alireza1044/michael_bert_lm
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
2023-03-27T14:05:33Z
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1824002576/pg-railsconf_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1524713190587871232/xcONaVmh_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1586859332104343552/V1HRpbP1_400x400.jpg&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Paul Graham & Harry Stebbings & Sahil Bloom</div> <div style="text-align: center; font-size: 14px;">@harrystebbings-paulg-sahilbloom</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Paul Graham & Harry Stebbings & Sahil Bloom. | Data | Paul Graham | Harry Stebbings | Sahil Bloom | | --- | --- | --- | --- | | Tweets downloaded | 3249 | 3237 | 3249 | | Retweets | 515 | 44 | 161 | | Short tweets | 184 | 297 | 604 | | Tweets kept | 2550 | 2896 | 2484 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/rq738a0q/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @harrystebbings-paulg-sahilbloom's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/z9stajgu) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/z9stajgu/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/harrystebbings-paulg-sahilbloom') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
AllwynJ/HarryBoy
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2023-03-27T14:14:44Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: BERT_ep7_lr2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BERT_ep7_lr2 This model is a fine-tuned version of [ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT](https://huggingface.co/ajtamayoh/NER_EHR_Spanish_model_Mulitlingual_BERT) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0921 - Precision: 0.8435 - Recall: 0.8668 - F1: 0.8550 - Accuracy: 0.9757 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 7 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 467 | 0.0875 | 0.8131 | 0.8272 | 0.8201 | 0.9712 | | 0.1124 | 2.0 | 934 | 0.0855 | 0.8073 | 0.8649 | 0.8351 | 0.9728 | | 0.075 | 3.0 | 1401 | 0.0824 | 0.8359 | 0.8579 | 0.8467 | 0.9754 | | 0.0603 | 4.0 | 1868 | 0.0835 | 0.8409 | 0.8587 | 0.8497 | 0.9754 | | 0.0474 | 5.0 | 2335 | 0.0886 | 0.8428 | 0.8695 | 0.8560 | 0.9755 | | 0.0434 | 6.0 | 2802 | 0.0899 | 0.8450 | 0.8682 | 0.8565 | 0.9758 | | 0.0391 | 7.0 | 3269 | 0.0921 | 0.8435 | 0.8668 | 0.8550 | 0.9757 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
Amir99/toxic
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-27T14:44:44Z
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.de split: validation args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8622564567285909 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1380 - F1: 0.8623 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2805 | 1.0 | 525 | 0.1536 | 0.8337 | | 0.13 | 2.0 | 1050 | 0.1404 | 0.8455 | | 0.0816 | 3.0 | 1575 | 0.1380 | 0.8623 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
Anamika/autonlp-Feedback1-479512837
[ "pytorch", "xlm-roberta", "text-classification", "unk", "dataset:Anamika/autonlp-data-Feedback1", "transformers", "autonlp", "co2_eq_emissions" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
34
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Andranik/TestPytorchClassification
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
36
null
--- language: - as metrics: - accuracy license: bsd ---
Andres2015/HiggingFaceTest
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-27T15:22:39Z
项目地址:[LLMPruner:大语言模型裁剪工具](https://github.com/yangjianxin1/LLMPruner) LLMPruner是一个大语言模型裁剪工具,通过对大语言模型的冗余词表进行裁剪,减少模型参数量,降低显存占用,提升训练速度,并且能够保留预训练中学习到的知识。 本项目对Bloom进行词表裁剪,保留中文token和常用的英文token,词表由250880将至46145,缩减为原来的18.39%。裁剪得到的Bloom模型如下表: | 裁剪模型 | 原模型 | 参数量比例 | |-----------------------------------------------------------------------------|-----------------------------------------------------------------------------|--------| | [YeungNLP/bloom-396m-zh](https://huggingface.co/YeungNLP/bloom-396m-zh) | [bigscience/bloom-560m](https://huggingface.co/bigscience/bloom-560m) | 70.96% | | [YeungNLP/bloom-820m-zh](https://huggingface.co/YeungNLP/bloom-820m-zh) | [bigscience/bloom-1b1](https://huggingface.co/bigscience/bloom-1b1) | 77.13% | | [YeungNLP/bloom-1b4-zh](https://huggingface.co/YeungNLP/bloom-1b4-zh) | [bigscience/bloom-1b7](https://huggingface.co/bigscience/bloom-1b7) | 81.14% | | [YeungNLP/bloom-2b6-zh](https://huggingface.co/YeungNLP/bloom-2b6-zh) | [bigscience/bloom-3b](https://huggingface.co/bigscience/bloom-3b) | 86.48% | | [YeungNLP/bloom-6b4-zh](https://huggingface.co/YeungNLP/bloom-6b4-zh) | [bigscience/bloom-7b1](https://huggingface.co/bigscience/bloom-7b1) | 90.81% | | [YeungNLP/bloomz-396m-zh](https://huggingface.co/YeungNLP/bloomz-396m-zh) | [bigscience/bloomz-560m](https://huggingface.co/bigscience/bloomz-560m) | 70.96% | | [YeungNLP/bloomz-820m-zh](https://huggingface.co/YeungNLP/bloomz-820m-zh) | [bigscience/bloomz-1b1](https://huggingface.co/bigscience/bloomz-1b1) | 77.13% | | [YeungNLP/bloomz-1b4-zh](https://huggingface.co/YeungNLP/bloomz-1b4-zh) | [bigscience/bloomz-1b7](https://huggingface.co/bigscience/bloomz-1b7) | 81.14% | | [YeungNLP/bloomz-2b6-zh](https://huggingface.co/YeungNLP/bloomz-2b6-zh) | [bigscience/bloomz-3b](https://huggingface.co/bigscience/bloomz-3b) | 86.48% | | [YeungNLP/bloomz-6b4-zh](https://huggingface.co/YeungNLP/bloomz-6b4-zh) | [bigscience/bloomz-7b1](https://huggingface.co/bigscience/bloomz-7b1) | 90.81% | | [YeungNLP/bloomz-6b4-mt-zh](https://huggingface.co/YeungNLP/bloomz-6b4-mt-zh) | [bigscience/bloomz-7b1-mt](https://huggingface.co/bigscience/bloomz-7b1-mt) | 90.81% | 使用方法: ```python from transformers import BloomTokenizerFast, BloomForCausalLM tokenizer = BloomTokenizerFast.from_pretrained('YeungNLP/bloom-1b4-zh') model = BloomForCausalLM.from_pretrained('YeungNLP/bloom-1b4-zh') print(tokenizer.batch_decode(model.generate(tokenizer.encode('长风破浪会有时', return_tensors='pt')))) ```
Anji/roberta-base-squad2-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.50 +/- 2.76 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="pmgautam/Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
AnonymousSub/AR_rule_based_roberta_only_classfn_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - autotrain - vision - image-classification datasets: - SebasV/autotrain-data-tableros_factibilidad widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace co2_eq_emissions: emissions: 0.6678858266803156 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 44246111621 - CO2 Emissions (in grams): 0.6679 ## Validation Metrics - Loss: 1.097 - Accuracy: 0.200 - Macro F1: 0.167 - Micro F1: 0.200 - Weighted F1: 0.133 - Macro Precision: 0.125 - Micro Precision: 0.200 - Weighted Precision: 0.100 - Macro Recall: 0.250 - Micro Recall: 0.200 - Weighted Recall: 0.200
AnonymousSub/AR_rule_based_roberta_only_classfn_twostage_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - autotrain - vision - image-classification datasets: - SebasV/autotrain-data-tableros_factibilidad widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace co2_eq_emissions: emissions: 0.7280371574302341 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 44246111623 - CO2 Emissions (in grams): 0.7280 ## Validation Metrics - Loss: 0.962 - Accuracy: 0.400 - Macro F1: 0.375 - Micro F1: 0.400 - Weighted F1: 0.300 - Macro Precision: 0.333 - Micro Precision: 0.400 - Weighted Precision: 0.267 - Macro Recall: 0.500 - Micro Recall: 0.400 - Weighted Recall: 0.400
AnonymousSub/AR_rule_based_roberta_twostage_quadruplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - autotrain - vision - image-classification datasets: - SebasV/autotrain-data-tableros_factibilidad widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace co2_eq_emissions: emissions: 0.36296304345687347 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 44246111624 - CO2 Emissions (in grams): 0.3630 ## Validation Metrics - Loss: 1.413 - Accuracy: 0.400 - Macro F1: 0.375 - Micro F1: 0.400 - Weighted F1: 0.400 - Macro Precision: 0.375 - Micro Precision: 0.400 - Weighted Precision: 0.400 - Macro Recall: 0.375 - Micro Recall: 0.400 - Weighted Recall: 0.400
AnonymousSub/AR_rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: my_qa_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_qa_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 5.0884 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 7 | 5.5284 | | No log | 2.0 | 14 | 5.2180 | | No log | 3.0 | 21 | 5.0884 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
AnonymousSub/SR_consert
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: ViditRaj/Distil_BERT_Hindi_Ads_Classifier results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # ViditRaj/Distil_BERT_Hindi_Ads_Classifier This model is a fine-tuned version of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.1092 - Validation Loss: 0.1996 - Train Accuracy: 0.9347 - Epoch: 4 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 480, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.3621 | 0.2848 | 0.9012 | 0 | | 0.2283 | 0.2223 | 0.9210 | 1 | | 0.1774 | 0.2084 | 0.9255 | 2 | | 0.1389 | 0.2367 | 0.9073 | 3 | | 0.1092 | 0.1996 | 0.9347 | 4 | ### Framework versions - Transformers 4.27.3 - TensorFlow 2.11.0 - Datasets 2.10.1 - Tokenizers 0.13.2
AnonymousSub/SR_declutr
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - autotrain - text-classification language: - en widget: - text: "I love AutoTrain 🤗" datasets: - Cleighton071/autotrain-data-detection-for-product-location co2_eq_emissions: emissions: 2.30199726014708 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 44269111681 - CO2 Emissions (in grams): 2.3020 ## Validation Metrics - Loss: 0.005 - Accuracy: 0.999 - Macro F1: 0.999 - Micro F1: 0.999 - Weighted F1: 0.999 - Macro Precision: 0.999 - Micro Precision: 0.999 - Weighted Precision: 0.999 - Macro Recall: 0.999 - Micro Recall: 0.999 - Weighted Recall: 0.999 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/Cleighton071/autotrain-detection-for-product-location-44269111681 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("Cleighton071/autotrain-detection-for-product-location-44269111681", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("Cleighton071/autotrain-detection-for-product-location-44269111681", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
AnonymousSub/SR_rule_based_roberta_bert_quadruplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: rare-puppers results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.939393937587738 --- # rare-puppers Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### corgi ![corgi](images/corgi.jpg) #### samoyed ![samoyed](images/samoyed.jpg) #### shiba inu ![shiba inu](images/shiba_inu.jpg)
AnonymousSub/SR_rule_based_roberta_twostagetriplet_hier_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
# ⚠️ Type of model/library unknown. # Feel free to open a Pull request # for integration of the huggingface model hub # into the corresponding library =)
AnonymousSub/SR_rule_based_twostagequadruplet_hier_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- inference: false language: pt datasets: - assin2 --- # BERTimbau base for Recognizing Textual Entailment This is the [neuralmind/bert-base-portuguese-cased](https://huggingface.co/neuralmind/bert-base-portuguese-cased) model finetuned for Recognizing Textual Entailment with the [ASSIN 2](https://huggingface.co/datasets/assin2) dataset. This model is suitable for Portuguese. - Git Repo: [Evaluation of Portuguese Language Models](https://github.com/ruanchaves/eplm). - Demo: [Portuguese Textual Entailment](https://ruanchaves-portuguese-textual-entailment.hf.space) ### **Labels**: * 0 : There is no entailment between premise and hypothesis. * 1 : There is entailment between premise and hypothesis. ## Full classification example ```python from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoConfig import numpy as np import torch from scipy.special import softmax model_name = "ruanchaves/bert-base-portuguese-cased-assin2-entailment" s1 = "Os homens estão cuidadosamente colocando as malas no porta-malas de um carro." s2 = "Os homens estão colocando bagagens dentro do porta-malas de um carro." model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name) model_input = tokenizer(*([s1], [s2]), padding=True, return_tensors="pt") with torch.no_grad(): output = model(**model_input) scores = output[0][0].detach().numpy() scores = softmax(scores) ranking = np.argsort(scores) ranking = ranking[::-1] for i in range(scores.shape[0]): l = config.id2label[ranking[i]] s = scores[ranking[i]] print(f"{i+1}) Label: {l} Score: {np.round(float(s), 4)}") ``` ## Citation Our research is ongoing, and we are currently working on describing our experiments in a paper, which will be published soon. In the meanwhile, if you would like to cite our work or models before the publication of the paper, please cite our [GitHub repository](https://github.com/ruanchaves/eplm): ``` @software{Chaves_Rodrigues_eplm_2023, author = {Chaves Rodrigues, Ruan and Tanti, Marc and Agerri, Rodrigo}, doi = {10.5281/zenodo.7781848}, month = {3}, title = {{Evaluation of Portuguese Language Models}}, url = {https://github.com/ruanchaves/eplm}, version = {1.0.0}, year = {2023} } ```
AnonymousSub/SR_rule_based_twostagetriplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- inference: false language: pt datasets: - assin2 --- # BERTimbau base for Semantic Textual Similarity This is the [neuralmind/bert-base-portuguese-cased](https://huggingface.co/neuralmind/bert-base-portuguese-cased) model finetuned for Semantic Textual Similarity with the [ASSIN 2](https://huggingface.co/datasets/assin2) dataset. This model is suitable for Portuguese. - Git Repo: [Evaluation of Portuguese Language Models](https://github.com/ruanchaves/eplm). - Demo: [Portuguese Semantic Similarity](https://ruanchaves-portuguese-semantic-similarity.hf.space) ## Full regression example ```python from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoConfig import numpy as np import torch model_name = "ruanchaves/bert-base-portuguese-cased-assin2-similarity" s1 = "A gente faz o aporte financeiro, é como se a empresa fosse parceira do Monte Cristo." s2 = "Fernando Moraes afirma que não tem vínculo com o Monte Cristo além da parceira." model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name) model_input = tokenizer(*([s1], [s2]), padding=True, return_tensors="pt") with torch.no_grad(): output = model(**model_input) score = output[0][0].detach().numpy().item() print(f"Similarity Score: {np.round(float(score), 4)}") ``` ## Citation Our research is ongoing, and we are currently working on describing our experiments in a paper, which will be published soon. In the meanwhile, if you would like to cite our work or models before the publication of the paper, please cite our [GitHub repository](https://github.com/ruanchaves/eplm): ``` @software{Chaves_Rodrigues_eplm_2023, author = {Chaves Rodrigues, Ruan and Tanti, Marc and Agerri, Rodrigo}, doi = {10.5281/zenodo.7781848}, month = {3}, title = {{Evaluation of Portuguese Language Models}}, url = {https://github.com/ruanchaves/eplm}, version = {1.0.0}, year = {2023} } ```
AnonymousSub/SR_rule_based_twostagetriplet_hier_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- inference: false language: pt datasets: - ruanchaves/faquad-nli --- # BERTimbau base for Question Answering This is the [neuralmind/bert-base-portuguese-cased](https://huggingface.co/neuralmind/bert-base-portuguese-cased) model finetuned for Text Simplification with the [FaQUaD-NLI](https://huggingface.co/ruanchaves/faquad-nli) dataset. This model is suitable for Portuguese. - Git Repo: [Evaluation of Portuguese Language Models](https://github.com/ruanchaves/eplm). - Demo: [Hugging Face Space: Question Answering](https://ruanchaves-portuguese-text-simplification.hf.space) ### **Labels**: * 0 : The answer is not suitable for the provided question. * 1 : The answer is suitable for the provided question. ## Full classification example ```python from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoConfig import numpy as np import torch from scipy.special import softmax model_name = "ruanchaves/bert-base-portuguese-cased-faquad-nli" s1 = "Qual a montanha mais alta do mundo?" s2 = "Monte Everest é a montanha mais alta do mundo." model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name) model_input = tokenizer(*([s1], [s2]), padding=True, return_tensors="pt") with torch.no_grad(): output = model(**model_input) scores = output[0][0].detach().numpy() scores = softmax(scores) ranking = np.argsort(scores) ranking = ranking[::-1] for i in range(scores.shape[0]): l = config.id2label[ranking[i]] s = scores[ranking[i]] print(f"{i+1}) Label: {l} Score: {np.round(float(s), 4)}") ``` ## Citation Our research is ongoing, and we are currently working on describing our experiments in a paper, which will be published soon. In the meanwhile, if you would like to cite our work or models before the publication of the paper, please cite our [GitHub repository](https://github.com/ruanchaves/eplm): ``` @software{Chaves_Rodrigues_eplm_2023, author = {Chaves Rodrigues, Ruan and Tanti, Marc and Agerri, Rodrigo}, doi = {10.5281/zenodo.7781848}, month = {3}, title = {{Evaluation of Portuguese Language Models}}, url = {https://github.com/ruanchaves/eplm}, version = {1.0.0}, year = {2023} } ```
AnonymousSub/SR_specter
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- inference: false language: pt datasets: - ruanchaves/hatebr --- # BERTimbau base for Offensive Language Detection This is the [neuralmind/bert-base-portuguese-cased](https://huggingface.co/neuralmind/bert-base-portuguese-cased) model finetuned for Offensive Language Detection with the [HateBR](https://huggingface.co/ruanchaves/hatebr) dataset. This model is suitable for Portuguese. - Git Repo: [Evaluation of Portuguese Language Models](https://github.com/ruanchaves/eplm). - Demo: [Hugging Face Space: Offensive Language Detection](https://ruanchaves-portuguese-offensive-language-de-d4d0507.hf.space) ### **Labels**: * 0 : The text is not offensive. * 1 : The text is offensive. ## Full classification example ```python from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoConfig import numpy as np import torch from scipy.special import softmax model_name = "ruanchaves/bert-base-portuguese-cased-hatebr" s1 = "Quem não deve não teme!!" model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name) model_input = tokenizer(*([s1],), padding=True, return_tensors="pt") with torch.no_grad(): output = model(**model_input) scores = output[0][0].detach().numpy() scores = softmax(scores) ranking = np.argsort(scores) ranking = ranking[::-1] for i in range(scores.shape[0]): l = config.id2label[ranking[i]] s = scores[ranking[i]] print(f"{i+1}) Label: {l} Score: {np.round(float(s), 4)}") ``` ## Licensing Information The HateBR dataset, including all its components, is provided strictly for academic and research purposes. The use of the dataset for any commercial or non-academic purpose is expressly prohibited without the prior written consent of [SINCH](https://www.sinch.com/). ## Citation Our research is ongoing, and we are currently working on describing our experiments in a paper, which will be published soon. In the meanwhile, if you would like to cite our work or models before the publication of the paper, please cite our [GitHub repository](https://github.com/ruanchaves/eplm): ``` @software{Chaves_Rodrigues_eplm_2023, author = {Chaves Rodrigues, Ruan and Tanti, Marc and Agerri, Rodrigo}, doi = {10.5281/zenodo.7781848}, month = {3}, title = {{Evaluation of Portuguese Language Models}}, url = {https://github.com/ruanchaves/eplm}, version = {1.0.0}, year = {2023} } ```
AnonymousSub/T5_pubmedqa_question_generation
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
6
null
--- inference: false language: pt datasets: - assin --- # BERTimbau large for Recognizing Textual Entailment This is the [neuralmind/bert-large-portuguese-cased](https://huggingface.co/neuralmind/bert-large-portuguese-cased) model finetuned for Recognizing Textual Entailment with the [ASSIN](https://huggingface.co/datasets/assin) dataset. This model is suitable for Portuguese. - Git Repo: [Evaluation of Portuguese Language Models](https://github.com/ruanchaves/eplm). - Demo: [Portuguese Textual Entailment](https://ruanchaves-portuguese-textual-entailment.hf.space) ### **Labels**: * 0 : There is no entailment between premise and hypothesis. * 1 : There is entailment between premise and hypothesis. * 2 : The premise is a paraphrase of the hypothesis. ## Full classification example ```python from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoConfig import numpy as np import torch from scipy.special import softmax model_name = "ruanchaves/bert-large-portuguese-cased-assin-entailment" s1 = "Os homens estão cuidadosamente colocando as malas no porta-malas de um carro." s2 = "Os homens estão colocando bagagens dentro do porta-malas de um carro." model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name) model_input = tokenizer(*([s1], [s2]), padding=True, return_tensors="pt") with torch.no_grad(): output = model(**model_input) scores = output[0][0].detach().numpy() scores = softmax(scores) ranking = np.argsort(scores) ranking = ranking[::-1] for i in range(scores.shape[0]): l = config.id2label[ranking[i]] s = scores[ranking[i]] print(f"{i+1}) Label: {l} Score: {np.round(float(s), 4)}") ``` ## Citation Our research is ongoing, and we are currently working on describing our experiments in a paper, which will be published soon. In the meanwhile, if you would like to cite our work or models before the publication of the paper, please cite our [GitHub repository](https://github.com/ruanchaves/eplm): ``` @software{Chaves_Rodrigues_eplm_2023, author = {Chaves Rodrigues, Ruan and Tanti, Marc and Agerri, Rodrigo}, doi = {10.5281/zenodo.7781848}, month = {3}, title = {{Evaluation of Portuguese Language Models}}, url = {https://github.com/ruanchaves/eplm}, version = {1.0.0}, year = {2023} } ```
AnonymousSub/bert_hier_diff_equal_wts_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- inference: false language: pt datasets: - ruanchaves/faquad-nli --- # BERTimbau large for Question Answering This is the [neuralmind/bert-large-portuguese-cased](https://huggingface.co/neuralmind/bert-large-portuguese-cased) model finetuned for Text Simplification with the [FaQUaD-NLI](https://huggingface.co/ruanchaves/faquad-nli) dataset. This model is suitable for Portuguese. - Git Repo: [Evaluation of Portuguese Language Models](https://github.com/ruanchaves/eplm). - Demo: [Hugging Face Space: Question Answering](https://ruanchaves-portuguese-text-simplification.hf.space) ### **Labels**: * 0 : The answer is not suitable for the provided question. * 1 : The answer is suitable for the provided question. ## Full classification example ```python from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoConfig import numpy as np import torch from scipy.special import softmax model_name = "ruanchaves/bert-large-portuguese-cased-faquad-nli" s1 = "Qual a montanha mais alta do mundo?" s2 = "Monte Everest é a montanha mais alta do mundo." model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name) model_input = tokenizer(*([s1], [s2]), padding=True, return_tensors="pt") with torch.no_grad(): output = model(**model_input) scores = output[0][0].detach().numpy() scores = softmax(scores) ranking = np.argsort(scores) ranking = ranking[::-1] for i in range(scores.shape[0]): l = config.id2label[ranking[i]] s = scores[ranking[i]] print(f"{i+1}) Label: {l} Score: {np.round(float(s), 4)}") ``` ## Citation Our research is ongoing, and we are currently working on describing our experiments in a paper, which will be published soon. In the meanwhile, if you would like to cite our work or models before the publication of the paper, please cite our [GitHub repository](https://github.com/ruanchaves/eplm): ``` @software{Chaves_Rodrigues_eplm_2023, author = {Chaves Rodrigues, Ruan and Tanti, Marc and Agerri, Rodrigo}, doi = {10.5281/zenodo.7781848}, month = {3}, title = {{Evaluation of Portuguese Language Models}}, url = {https://github.com/ruanchaves/eplm}, version = {1.0.0}, year = {2023} } ```
AnonymousSub/bert_mean_diff_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- inference: false language: pt datasets: - ruanchaves/hatebr --- # BERTimbau large for Offensive Language Detection This is the [neuralmind/bert-large-portuguese-cased](https://huggingface.co/neuralmind/bert-large-portuguese-cased) model finetuned for Offensive Language Detection with the [HateBR](https://huggingface.co/ruanchaves/hatebr) dataset. This model is suitable for Portuguese. - Git Repo: [Evaluation of Portuguese Language Models](https://github.com/ruanchaves/eplm). - Demo: [Hugging Face Space: Offensive Language Detection](https://ruanchaves-portuguese-offensive-language-de-d4d0507.hf.space) ### **Labels**: * 0 : The text is not offensive. * 1 : The text is offensive. ## Full classification example ```python from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoConfig import numpy as np import torch from scipy.special import softmax model_name = "ruanchaves/bert-large-portuguese-cased-hatebr" s1 = "Quem não deve não teme!!" model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name) model_input = tokenizer(*([s1],), padding=True, return_tensors="pt") with torch.no_grad(): output = model(**model_input) scores = output[0][0].detach().numpy() scores = softmax(scores) ranking = np.argsort(scores) ranking = ranking[::-1] for i in range(scores.shape[0]): l = config.id2label[ranking[i]] s = scores[ranking[i]] print(f"{i+1}) Label: {l} Score: {np.round(float(s), 4)}") ``` ## Licensing Information The HateBR dataset, including all its components, is provided strictly for academic and research purposes. The use of the dataset for any commercial or non-academic purpose is expressly prohibited without the prior written consent of [SINCH](https://www.sinch.com/). ## Citation Our research is ongoing, and we are currently working on describing our experiments in a paper, which will be published soon. In the meanwhile, if you would like to cite our work or models before the publication of the paper, please cite our [GitHub repository](https://github.com/ruanchaves/eplm): ``` @software{Chaves_Rodrigues_eplm_2023, author = {Chaves Rodrigues, Ruan and Tanti, Marc and Agerri, Rodrigo}, doi = {10.5281/zenodo.7781848}, month = {3}, title = {{Evaluation of Portuguese Language Models}}, url = {https://github.com/ruanchaves/eplm}, version = {1.0.0}, year = {2023} } ```
AnonymousSub/cline-emanuals-s10-AR
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- inference: false language: pt datasets: - ruanchaves/faquad-nli --- # mDeBERTa v3 base for Question Answering This is the [microsoft/mdeberta-v3-base](https://huggingface.co/microsoft/mdeberta-v3-base) model finetuned for Text Simplification with the [FaQUaD-NLI](https://huggingface.co/ruanchaves/faquad-nli) dataset. This model is suitable for Portuguese. - Git Repo: [Evaluation of Portuguese Language Models](https://github.com/ruanchaves/eplm). - Demo: [Hugging Face Space: Question Answering](https://ruanchaves-portuguese-text-simplification.hf.space) ### **Labels**: * 0 : The answer is not suitable for the provided question. * 1 : The answer is suitable for the provided question. ## Full classification example ```python from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoConfig import numpy as np import torch from scipy.special import softmax model_name = "ruanchaves/mdeberta-v3-base-faquad-nli" s1 = "Qual a montanha mais alta do mundo?" s2 = "Monte Everest é a montanha mais alta do mundo." model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name) model_input = tokenizer(*([s1], [s2]), padding=True, return_tensors="pt") with torch.no_grad(): output = model(**model_input) scores = output[0][0].detach().numpy() scores = softmax(scores) ranking = np.argsort(scores) ranking = ranking[::-1] for i in range(scores.shape[0]): l = config.id2label[ranking[i]] s = scores[ranking[i]] print(f"{i+1}) Label: {l} Score: {np.round(float(s), 4)}") ``` ## Citation Our research is ongoing, and we are currently working on describing our experiments in a paper, which will be published soon. In the meanwhile, if you would like to cite our work or models before the publication of the paper, please cite our [GitHub repository](https://github.com/ruanchaves/eplm): ``` @software{Chaves_Rodrigues_eplm_2023, author = {Chaves Rodrigues, Ruan and Tanti, Marc and Agerri, Rodrigo}, doi = {10.5281/zenodo.7781848}, month = {3}, title = {{Evaluation of Portuguese Language Models}}, url = {https://github.com/ruanchaves/eplm}, version = {1.0.0}, year = {2023} } ```
AnonymousSub/consert-s10-SR
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
# Vocabulary Trimmed [lmqg/mbart-large-cc25-esquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-esquad-qa): `vocabtrimmer/mbart-large-cc25-esquad-qa-trimmed-es-5000` This model is a trimmed version of [lmqg/mbart-large-cc25-esquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-esquad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-esquad-qa | vocabtrimmer/mbart-large-cc25-esquad-qa-trimmed-es-5000 | |:---------------------------|:----------------------------------|:----------------------------------------------------------| | parameter_size_full | 610,852,864 | 359,948,288 | | parameter_size_embedding | 512,057,344 | 10,248,192 | | vocab_size | 250,028 | 5,004 | | compression_rate_full | 100.0 | 58.93 | | compression_rate_embedding | 100.0 | 2.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | es | vocabtrimmer/mc4_validation | text | es | validation | 5000 | 2 |
AnonymousSub/consert-techqa
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
# Vocabulary Trimmed [lmqg/mbart-large-cc25-frquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-frquad-qa): `vocabtrimmer/mbart-large-cc25-frquad-qa-trimmed-fr-5000` This model is a trimmed version of [lmqg/mbart-large-cc25-frquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-frquad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-frquad-qa | vocabtrimmer/mbart-large-cc25-frquad-qa-trimmed-fr-5000 | |:---------------------------|:----------------------------------|:----------------------------------------------------------| | parameter_size_full | 610,852,864 | 359,948,288 | | parameter_size_embedding | 512,057,344 | 10,248,192 | | vocab_size | 250,028 | 5,004 | | compression_rate_full | 100.0 | 58.93 | | compression_rate_embedding | 100.0 | 2.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | fr | vocabtrimmer/mc4_validation | text | fr | validation | 5000 | 2 |
AnonymousSub/declutr-emanuals-s10-AR
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
Access to model c-nemo/bert-for-movie-review-classification is restricted and you are not in the authorized list. Visit https://huggingface.co/c-nemo/bert-for-movie-review-classification to ask for access.
AnonymousSub/declutr-emanuals-s10-SR
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
# LLama 7B Hugging Face model This repo hosts model weights and it's for research purpose. If it against some policies that I don't know, feel free to reach out to me and I will delete it. --- license: other --- LLaMA-7B converted to work with Transformers/HuggingFace. This is under a special license, please see the LICENSE file for details. License Non-commercial bespoke license > Note: I copied above statement from https://huggingface.co/decapoda-research/llama-7b-hf
AnonymousSub/declutr-emanuals-techqa
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-03-27T18:37:58Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 264.03 +/- 21.90 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AnonymousSub/declutr-model-emanuals
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: - de pipeline_tag: fill-mask tags: - parliamentary protocols - political texts widget: - text: >- Diese Themen gehören nicht ins [MASK]. --- ⚠️ This version is only trained on around 5 million sentences (perplexity w/ adaption: 3.38 and w/o 13.38). The final version trained on around 30 million sentences will be available soon. 🚀 ParlBERT-v2 is a more general version of [ParlBERT-v1](https://huggingface.co/chkla/parlbert-german) including texts from federal and state level in Germany. This first version was only trained on state level. # ParlBERT v2 This model is based on the German BERT (GBERT) architecture, specifically the "deepset/gbert-base" base model. It has been trained on over 30 million German political sentences from the ["GerParCor" (Abrami et al. 2022)](http://gerparcor.texttechnologylab.org) corpus for three epochs to provide a domain-adapted language model for German political texts. The German Political Texts Adapted GBERT model is designed for tasks related to German political texts. It can be used in a variety of applications. 📚 **Datasset** "GerParCor is a genre-specific corpus of (predominantly historical) German-language parliamentary protocols from three centuries and four countries, including state and federal level data." (Abrami et al. 2022) 🤖 **Model training** During the model training process, a masked language modeling approach was used with a token masking probability of 15\%. The training was performed for three epochs, which means that the entire dataset was passed through the model three times during the training process. 👨‍💻 **Model Use** ```python from transformers import pipeline model = pipeline('fill-mask', model='parlbert-german-v2') model("Diese Themen gehören nicht ins [MASK].") ``` ⚠️ **Limitations** The German ParlBERT has limitations and potential biases. The GerParCor corpus only contains texts from the domain of politics, so the model may not perform well on texts from other domains. Additionally, the model may not be suitable for analyzing social media posts and many more. The model's training data is derived from contemporary German political texts, which may reflect certain biases or perspectives. For instance, the corpus includes texts from specific political parties or interest groups, which may lead to overrepresentation or underrepresentation of certain viewpoints. To address these limitations and potential biases, users are encouraged to evaluate the model's performance on their specific use case and carefully consider the training data's representativeness for their target text domain. 🐦 Twitter: [@chklamm](http://twitter.com/chklamm)
AnonymousSub/declutr-model
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: SpookyWooky5/PyramidsRND 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
AnonymousSub/declutr-model_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 252.15 +/- 44.85 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AnonymousSub/rule_based_bert_mean_diff_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
# Vocabulary Trimmed [lmqg/mbart-large-cc25-frquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-frquad-qa): `vocabtrimmer/mbart-large-cc25-frquad-qa-trimmed-fr-15000` This model is a trimmed version of [lmqg/mbart-large-cc25-frquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-frquad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-frquad-qa | vocabtrimmer/mbart-large-cc25-frquad-qa-trimmed-fr-15000 | |:---------------------------|:----------------------------------|:-----------------------------------------------------------| | parameter_size_full | 610,852,864 | 370,188,288 | | parameter_size_embedding | 512,057,344 | 30,728,192 | | vocab_size | 250,028 | 15,004 | | compression_rate_full | 100.0 | 60.6 | | compression_rate_embedding | 100.0 | 6.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | fr | vocabtrimmer/mc4_validation | text | fr | validation | 15000 | 2 |
AnonymousSub/rule_based_bert_quadruplet_epochs_1_shard_1_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
# Vocabulary Trimmed [lmqg/mbart-large-cc25-esquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-esquad-qa): `vocabtrimmer/mbart-large-cc25-esquad-qa-trimmed-es-10000` This model is a trimmed version of [lmqg/mbart-large-cc25-esquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-esquad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-esquad-qa | vocabtrimmer/mbart-large-cc25-esquad-qa-trimmed-es-10000 | |:---------------------------|:----------------------------------|:-----------------------------------------------------------| | parameter_size_full | 610,852,864 | 365,068,288 | | parameter_size_embedding | 512,057,344 | 20,488,192 | | vocab_size | 250,028 | 10,004 | | compression_rate_full | 100.0 | 59.76 | | compression_rate_embedding | 100.0 | 4.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | es | vocabtrimmer/mc4_validation | text | es | validation | 10000 | 2 |
AnonymousSub/rule_based_bert_triplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 274.50 +/- 31.50 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga Jupiterian9 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga Jupiterian9 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga Jupiterian9 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 1024), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0003), ('learning_starts', 100000), ('n_timesteps', 10000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
AnonymousSub/rule_based_bert_triplet_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: creativeml-openrail-m --- https://civitai.com/models/8047/project-sekai-mizuki-akiyama-loha
AnonymousSub/rule_based_bert_triplet_epochs_1_shard_1_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: creativeml-openrail-m --- https://civitai.com/models/24787/chitanda-eru-hyouka
AnonymousSub/rule_based_bert_triplet_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- license: creativeml-openrail-m --- https://civitai.com/models/17798/elysia-hoh-without-bells
AnonymousSub/rule_based_bert_triplet_epochs_1_shard_1_wikiqa_copy
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: creativeml-openrail-m --- https://civitai.com/models/24488/morisaki-alesia-yu-blue-reflection-sun
AnonymousSub/rule_based_hier_quadruplet_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-03-27T19:37:25Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 374.00 +/- 214.89 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga kasseev -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga kasseev -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga kasseev ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 100000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
AnonymousSub/rule_based_hier_quadruplet_epochs_1_shard_1_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: creativeml-openrail-m --- https://civitai.com/models/24748/sn-kursk-or-azur-lane-or-lora
AnonymousSub/rule_based_hier_quadruplet_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
2023-03-27T19:40:00Z
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 943.38 +/- 41.30 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AnonymousSub/rule_based_hier_triplet_0.1_epochs_1_shard_1_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2023-03-27T19:43:35Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 - precision - recall model-index: - name: LKD_Experience_CV5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # LKD_Experience_CV5 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1901 - Accuracy: 0.9328 - F1: 0.9306 - Precision: 0.9335 - Recall: 0.9283 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:| | No log | 1.0 | 48 | 0.5064 | 0.6555 | 0.5380 | 0.8136 | 0.59 | | No log | 2.0 | 96 | 0.3327 | 0.9160 | 0.9114 | 0.9297 | 0.9028 | | No log | 3.0 | 144 | 0.2398 | 0.9244 | 0.9212 | 0.9305 | 0.9155 | | No log | 4.0 | 192 | 0.1995 | 0.9328 | 0.9306 | 0.9335 | 0.9283 | | No log | 5.0 | 240 | 0.1901 | 0.9328 | 0.9306 | 0.9335 | 0.9283 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.0 - Datasets 2.1.0 - Tokenizers 0.13.2
AnonymousSub/rule_based_hier_triplet_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-03-27T19:47:01Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.50 +/- 2.74 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="pinaggle/Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
AnonymousSub/rule_based_hier_triplet_epochs_1_shard_1_wikiqa_copy
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- tags: - autotrain - summarization language: - de widget: - text: "I love AutoTrain 🤗" datasets: - fathyshalab/autotrain-data-dialogsumgerman co2_eq_emissions: emissions: 86.21246024573398 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 44305111787 - CO2 Emissions (in grams): 86.2125 ## Validation Metrics - Loss: 1.069 - Rouge1: 33.702 - Rouge2: 13.478 - RougeL: 29.431 - RougeLsum: 30.710 - Gen Len: 18.952 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/fathyshalab/autotrain-dialogsumgerman-44305111787 ```
AnonymousSub/rule_based_roberta_hier_quadruplet_0.1_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
# Vocabulary Trimmed [lmqg/mbart-large-cc25-frquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-frquad-qa): `vocabtrimmer/mbart-large-cc25-frquad-qa-trimmed-fr-60000` This model is a trimmed version of [lmqg/mbart-large-cc25-frquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-frquad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-frquad-qa | vocabtrimmer/mbart-large-cc25-frquad-qa-trimmed-fr-60000 | |:---------------------------|:----------------------------------|:-----------------------------------------------------------| | parameter_size_full | 610,852,864 | 416,267,264 | | parameter_size_embedding | 512,057,344 | 122,886,144 | | vocab_size | 250,028 | 60,003 | | compression_rate_full | 100.0 | 68.15 | | compression_rate_embedding | 100.0 | 24.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | fr | vocabtrimmer/mc4_validation | text | fr | validation | 60000 | 2 |
AnonymousSub/rule_based_roberta_hier_quadruplet_0.1_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-03-27T20:16:08Z
--- license: creativeml-openrail-m base_model: runwayml/stable-diffusion-v1-5 tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - controlnet inference: true --- # controlnet- yiyixu/fill-circle-controlnet These are controlnet weights trained on runwayml/stable-diffusion-v1-5 with new type of conditioning. You can find some example images in the following. prompt: red circle with blue background ![images_0)](./images_0.png) prompt: cyan circle with brown floral background ![images_1)](./images_1.png)
AnonymousSub/rule_based_roberta_hier_quadruplet_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
# Vocabulary Trimmed [lmqg/mbart-large-cc25-esquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-esquad-qa): `vocabtrimmer/mbart-large-cc25-esquad-qa-trimmed-es-15000` This model is a trimmed version of [lmqg/mbart-large-cc25-esquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-esquad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-esquad-qa | vocabtrimmer/mbart-large-cc25-esquad-qa-trimmed-es-15000 | |:---------------------------|:----------------------------------|:-----------------------------------------------------------| | parameter_size_full | 610,852,864 | 370,188,288 | | parameter_size_embedding | 512,057,344 | 30,728,192 | | vocab_size | 250,028 | 15,004 | | compression_rate_full | 100.0 | 60.6 | | compression_rate_embedding | 100.0 | 6.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | es | vocabtrimmer/mc4_validation | text | es | validation | 15000 | 2 |
AnonymousSub/rule_based_roberta_hier_quadruplet_epochs_1_shard_1_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
24
2023-03-27T20:31:44Z
# Vocabulary Trimmed [lmqg/mbart-large-cc25-jaquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-jaquad-qa): `vocabtrimmer/mbart-large-cc25-jaquad-qa-trimmed-ja-5000` This model is a trimmed version of [lmqg/mbart-large-cc25-jaquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-jaquad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-jaquad-qa | vocabtrimmer/mbart-large-cc25-jaquad-qa-trimmed-ja-5000 | |:---------------------------|:----------------------------------|:----------------------------------------------------------| | parameter_size_full | 610,852,864 | 359,948,288 | | parameter_size_embedding | 512,057,344 | 10,248,192 | | vocab_size | 250,028 | 5,004 | | compression_rate_full | 100.0 | 58.93 | | compression_rate_embedding | 100.0 | 2.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | ja | vocabtrimmer/mc4_validation | text | ja | validation | 5000 | 2 |
AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_1_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
2023-03-27T20:45:13Z
--- license: apache-2.0 tags: - image-classification - generated_from_trainer datasets: - beans metrics: - accuracy widget: - src: https://huggingface.co/platzi/platzi-vit-base-beans/resolve/main/healthy.jpeg example_title: Healthy - src: https://huggingface.co/platzi/platzi-vit-base-beans/resolve/main/bean_rust.jpeg example_title: Bean Rust model-index: - name: platzi-vit-base-beans results: - task: name: Image Classification type: image-classification dataset: name: beans type: beans config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9849624060150376 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # platzi-vit-model-andres-galvis This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0227 - Accuracy: 0.9850 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1343 | 3.85 | 500 | 0.0227 | 0.9850 | ### Framework versions - Transformers 4.27.3 - Pytorch 2.0.0+cpu - Datasets 2.10.1 - Tokenizers 0.13.2
AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_1_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
24
null
--- license: mit tags: - generated_from_keras_callback model-index: - name: chatgpt-gpt4-prompts-bart-large-cnn-samsum results: [] datasets: - fka/awesome-chatgpt-prompts --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # chatgpt-gpt4-prompts-bart-large-cnn-samsum This model generates ChatGPT/BingChat & GPT-3 prompts and is a fine-tuned version of [philschmid/bart-large-cnn-samsum](https://huggingface.co/philschmid/bart-large-cnn-samsum) on an [this](https://huggingface.co/datasets/fka/awesome-chatgpt-prompts) dataset. It achieves the following results on the evaluation set: - Train Loss: 1.2214 - Validation Loss: 2.7584 - Epoch: 4 ### Streamlit This model supports a [Streamlit](https://streamlit.io/) Web UI to run the chatgpt-gpt4-prompts-bart-large-cnn-samsum model: [![Open In HF Spaces](https://camo.githubusercontent.com/00380c35e60d6b04be65d3d94a58332be5cc93779f630bcdfc18ab9a3a7d3388/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f25463025394625413425393725323048756767696e67253230466163652d5370616365732d626c7565)](https://huggingface.co/spaces/Kaludi/ChatGPT-BingChat-GPT3-Prompt-Generator_App) ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 2e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 3.1982 | 2.6801 | 0 | | 2.3601 | 2.5493 | 1 | | 1.9225 | 2.5377 | 2 | | 1.5465 | 2.6794 | 3 | | 1.2214 | 2.7584 | 4 | ### Framework versions - Transformers 4.27.3 - TensorFlow 2.11.0 - Datasets 2.10.1 - Tokenizers 0.13.2
AnonymousSub/rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_1_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
2023-03-27T21:19:04Z
--- datasets: - tencups/gpt2 - pietrolesci/gpt3_nli language: - en ---
AnonymousSub/rule_based_roberta_twostagetriplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
# Vocabulary Trimmed [lmqg/mbart-large-cc25-esquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-esquad-qa): `vocabtrimmer/mbart-large-cc25-esquad-qa-trimmed-es-30000` This model is a trimmed version of [lmqg/mbart-large-cc25-esquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-esquad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-esquad-qa | vocabtrimmer/mbart-large-cc25-esquad-qa-trimmed-es-30000 | |:---------------------------|:----------------------------------|:-----------------------------------------------------------| | parameter_size_full | 610,852,864 | 385,548,288 | | parameter_size_embedding | 512,057,344 | 61,448,192 | | vocab_size | 250,028 | 30,004 | | compression_rate_full | 100.0 | 63.12 | | compression_rate_embedding | 100.0 | 12.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | es | vocabtrimmer/mc4_validation | text | es | validation | 30000 | 2 |
AnonymousSub/rule_based_roberta_twostagetriplet_epochs_1_shard_1_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
24
null
--- tags: - generated_from_keras_callback model-index: - name: CIS6930_DAAGR_Classification results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # CIS6930_DAAGR_Classification This model was trained from scratch on an unknown dataset. It achieves the following results on the evaluation set: ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: None - training_precision: float32 ### Training results ### Framework versions - Transformers 4.27.3 - TensorFlow 2.11.0 - Datasets 2.10.1 - Tokenizers 0.13.2
AnonymousSub/rule_based_roberta_twostagetriplet_hier_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - samsum metrics: - rouge model-index: - name: flan-t5-base-samsum results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: samsum type: samsum config: samsum split: test args: samsum metrics: - name: Rouge1 type: rouge value: 46.8948 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # flan-t5-base-samsum This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-base) on the samsum dataset. It achieves the following results on the evaluation set: - Loss: 1.3794 - Rouge1: 46.8948 - Rouge2: 23.4445 - Rougel: 39.5763 - Rougelsum: 43.209 - Gen Len: 17.2540 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | No log | 1.0 | 231 | 1.3935 | 46.6142 | 23.0937 | 39.1018 | 42.8696 | 17.2076 | | No log | 2.0 | 462 | 1.3848 | 46.5553 | 23.0122 | 39.1493 | 42.764 | 17.1465 | | 1.4249 | 3.0 | 693 | 1.3813 | 46.8705 | 23.5239 | 39.6689 | 43.2545 | 17.2930 | | 1.4249 | 4.0 | 924 | 1.3801 | 46.9726 | 23.6143 | 39.6028 | 43.3278 | 17.2112 | | 1.3528 | 5.0 | 1155 | 1.3794 | 46.8948 | 23.4445 | 39.5763 | 43.209 | 17.2540 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.10.0 - Datasets 2.10.1 - Tokenizers 0.13.2
AnonymousSub/rule_based_roberta_twostagetriplet_hier_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 670.00 +/- 278.20 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga aaronrmm -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga aaronrmm -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga aaronrmm ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 10000), ('n_timesteps', 10000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
AnonymousSub/rule_based_twostage_quadruplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- library_name: ml-agents tags: - Huggy - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Find your model_id: albseverus/ppo-Huggy-v1 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
AnonymousSub/rule_based_twostagequadruplet_hier_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: que_funcione_que_funcione2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # que_funcione_que_funcione2 This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 43.6653 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 5 - training_steps: 20 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 0.5 | 10 | 50.2270 | | No log | 1.0 | 20 | 43.6653 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
AnonymousSub/rule_based_twostagetriplet_hier_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: kmposkid1/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
AnonymousSub/rule_based_twostagetriplet_hier_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
# Vocabulary Trimmed [lmqg/mbart-large-cc25-jaquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-jaquad-qa): `vocabtrimmer/mbart-large-cc25-jaquad-qa-trimmed-ja-30000` This model is a trimmed version of [lmqg/mbart-large-cc25-jaquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-jaquad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-jaquad-qa | vocabtrimmer/mbart-large-cc25-jaquad-qa-trimmed-ja-30000 | |:---------------------------|:----------------------------------|:-----------------------------------------------------------| | parameter_size_full | 610,852,864 | 385,548,288 | | parameter_size_embedding | 512,057,344 | 61,448,192 | | vocab_size | 250,028 | 30,004 | | compression_rate_full | 100.0 | 63.12 | | compression_rate_embedding | 100.0 | 12.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | ja | vocabtrimmer/mc4_validation | text | ja | validation | 30000 | 2 |
AnonymousSub/specter-bert-model
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: que_funcione_que_funcione3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # que_funcione_que_funcione3 This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 26.7271 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 5 - training_steps: 20 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 0.5 | 10 | 29.9242 | | No log | 1.0 | 20 | 26.7271 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
AnonymousSub/specter-bert-model_copy_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: SAL83/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
AnonymousSub/specter-bert-model_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- language: mt datasets: - common_voice tags: - audio - automatic-speech-recognition - maltese - whisper-large - whisper-large-v1 - masri-project - malta - university-of-malta license: cc-by-nc-sa-4.0 widget: null model-index: - name: whisper-large-maltese-8k-steps-64h results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: MASRI-TEST Corpus type: MLRS/masri_test split: test args: language: mt metrics: - name: WER type: wer value: 18.973 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: MASRI-DEV Corpus type: MLRS/masri_dev split: validation args: language: mt metrics: - name: WER type: wer value: 17.372 --- # whisper-large-maltese-8k-steps-64h The "whisper-large-maltese-8k-steps-64h" is an acoustic model suitable for Automatic Speech Recognition in Maltese. It is the result of fine-tuning the model "openai/whisper-large" with around 64 hours of Maltese data developed by the MASRI Project at the University of Malta between 2019 and 2021. Most of the data is available at the the MASRI Project homepage https://www.um.edu.mt/projects/masri/. The specific list of corpora used to fine-tune the model is: - MASRI-HEADSET v2 (6h39m) - MASRI-Farfield (9h37m) - MASRI-Booths (2h27m) - MASRI-MEP (1h17m) - MASRI-COMVO (7h29m) - MASRI-TUBE (13h17m) - MASRI-MERLIN (25h18m) *Not available at the MASRI Project homepage The fine-tuning process was perform during March (2023) in the servers of the Language and Voice Lab (https://lvl.ru.is/) at Reykjavík University (Iceland) by Carlos Daniel Hernández Mena. # Evaluation ```python import torch from transformers import WhisperForConditionalGeneration, WhisperProcessor #Load the processor and model. MODEL_NAME="carlosdanielhernandezmena/whisper-large-maltese-8k-steps-64h" processor = WhisperProcessor.from_pretrained(MODEL_NAME) model = WhisperForConditionalGeneration.from_pretrained(MODEL_NAME).to("cuda") #Load the dataset from datasets import load_dataset, load_metric, Audio ds=load_dataset("MLRS/masri_test",split='test') #Downsample to 16kHz ds = ds.cast_column("audio", Audio(sampling_rate=16_000)) #Process the dataset def map_to_pred(batch): audio = batch["audio"] input_features = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt").input_features batch["reference"] = processor.tokenizer._normalize(batch['normalized_text']) with torch.no_grad(): predicted_ids = model.generate(input_features.to("cuda"))[0] transcription = processor.decode(predicted_ids) batch["prediction"] = processor.tokenizer._normalize(transcription) return batch #Do the evaluation result = ds.map(map_to_pred) #Compute the overall WER now. from evaluate import load wer = load("wer") WER=100 * wer.compute(references=result["reference"], predictions=result["prediction"]) print(WER) ``` **Test Result**: 18.97354497354497 # BibTeX entry and citation info *When publishing results based on these models please refer to:* ```bibtex @misc{mena2023whisperlargemaltese, title={Acoustic Model in Maltese: whisper-large-maltese-8k-steps-64h.}, author={Hernandez Mena, Carlos Daniel}, year={2023}, url={https://huggingface.co/carlosdanielhernandezmena/whisper-large-maltese-8k-steps-64h}, } ``` # Acknowledgements The MASRI Project is funded by the University of Malta Research Fund Awards. We want to thank to Merlin Publishers (Malta) for provinding the audiobooks used to create the MASRI-MERLIN Corpus. Thanks to Jón Guðnason, head of the Language and Voice Lab for providing computational power to make this model possible. We also want to thank to the "Language Technology Programme for Icelandic 2019-2023" which is managed and coordinated by Almannarómur, and it is funded by the Icelandic Ministry of Education, Science and Culture. Special thanks to Björn Ingi Stefánsson for setting up the configuration of the server where this model was trained.
AnonymousSub/unsup-consert-base_copy_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="SharpNLight/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
AnonymousSub/unsup-consert-papers-bert
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: mit tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: fedcsis_translated-intent_baseline-xlm_r-pl results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fedcsis_translated-intent_baseline-xlm_r-pl This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the [leyzer-fedcsis-translated](https://huggingface.co/datasets/cartesinus/leyzer-fedcsis-translated) dataset. Results on untranslated test set: - Accuracy: 0.8769 It achieves the following results on the evaluation set: - Loss: 0.5478 - Accuracy: 0.8769 - F1: 0.8769 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 3.505 | 1.0 | 814 | 1.8819 | 0.5979 | 0.5979 | | 1.5056 | 2.0 | 1628 | 1.1033 | 0.7611 | 0.7611 | | 1.0892 | 3.0 | 2442 | 0.7402 | 0.8470 | 0.8470 | | 0.648 | 4.0 | 3256 | 0.5263 | 0.8902 | 0.8902 | | 0.423 | 5.0 | 4070 | 0.4253 | 0.9152 | 0.9152 | | 0.3429 | 6.0 | 4884 | 0.3654 | 0.9194 | 0.9194 | | 0.2464 | 7.0 | 5698 | 0.3213 | 0.9273 | 0.9273 | | 0.1873 | 8.0 | 6512 | 0.3065 | 0.9328 | 0.9328 | | 0.1666 | 9.0 | 7326 | 0.3046 | 0.9345 | 0.9345 | | 0.1459 | 10.0 | 8140 | 0.2911 | 0.9370 | 0.9370 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
AnonymousSubmission/pretrained-model-1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-taxi-v3-simple results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.44 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="SharpNLight/q-taxi-v3-simple", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
AnthonyNelson/DialoGPT-small-ricksanchez
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: mit tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: fedcsis_translated-slot_baseline-xlm_r-pl results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fedcsis_translated-slot_baseline-xlm_r-pl This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the [leyzer-fedcsis-translated](https://huggingface.co/datasets/cartesinus/leyzer-fedcsis-translated) dataset. Results on untranslated test set: - Precision: 0.5909 - Recall: 0.5766 - F1: 0.5836 - Accuracy: 0.7484 It achieves the following results on the evaluation set: - Loss: 1.0761 - Precision: 0.7299 - Recall: 0.7427 - F1: 0.7363 - Accuracy: 0.8415 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 1.4842 | 1.0 | 814 | 0.7712 | 0.5858 | 0.6026 | 0.5941 | 0.7918 | | 0.5128 | 2.0 | 1628 | 0.6435 | 0.6469 | 0.6828 | 0.6644 | 0.8119 | | 0.3526 | 3.0 | 2442 | 0.7030 | 0.6823 | 0.7045 | 0.6933 | 0.8242 | | 0.2142 | 4.0 | 3256 | 0.7695 | 0.7112 | 0.7243 | 0.7177 | 0.8381 | | 0.1422 | 5.0 | 4070 | 0.8550 | 0.7203 | 0.7310 | 0.7256 | 0.8399 | | 0.1188 | 6.0 | 4884 | 0.9209 | 0.7183 | 0.7333 | 0.7258 | 0.8391 | | 0.0915 | 7.0 | 5698 | 0.9892 | 0.7238 | 0.7372 | 0.7305 | 0.8404 | | 0.072 | 8.0 | 6512 | 1.0271 | 0.7230 | 0.7364 | 0.7296 | 0.8417 | | 0.0626 | 9.0 | 7326 | 1.0608 | 0.7312 | 0.7417 | 0.7364 | 0.8419 | | 0.0613 | 10.0 | 8140 | 1.0761 | 0.7299 | 0.7427 | 0.7363 | 0.8415 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
Anthos23/my-awesome-model
[ "pytorch", "tf", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
2023-03-27T22:24:17Z
# Vocabulary Trimmed [lmqg/mbart-large-cc25-jaquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-jaquad-qa): `vocabtrimmer/mbart-large-cc25-jaquad-qa-trimmed-ja-60000` This model is a trimmed version of [lmqg/mbart-large-cc25-jaquad-qa](https://huggingface.co/lmqg/mbart-large-cc25-jaquad-qa) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | lmqg/mbart-large-cc25-jaquad-qa | vocabtrimmer/mbart-large-cc25-jaquad-qa-trimmed-ja-60000 | |:---------------------------|:----------------------------------|:-----------------------------------------------------------| | parameter_size_full | 610,852,864 | 416,268,288 | | parameter_size_embedding | 512,057,344 | 122,888,192 | | vocab_size | 250,028 | 60,004 | | compression_rate_full | 100.0 | 68.15 | | compression_rate_embedding | 100.0 | 24.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | ja | vocabtrimmer/mc4_validation | text | ja | validation | 60000 | 2 |
Anthos23/test_trainer
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - clinc_oos metrics: - accuracy model-index: - name: distilbert-base-uncased-finetuned-clinc results: - task: name: Text Classification type: text-classification dataset: name: clinc_oos type: clinc_oos args: plus metrics: - name: Accuracy type: accuracy value: 0.9183870967741935 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-clinc This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the clinc_oos dataset. It achieves the following results on the evaluation set: - Loss: 0.7721 - Accuracy: 0.9184 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 4.2896 | 1.0 | 318 | 3.2890 | 0.7432 | | 2.6284 | 2.0 | 636 | 1.8756 | 0.8377 | | 1.5483 | 3.0 | 954 | 1.1572 | 0.8961 | | 1.015 | 4.0 | 1272 | 0.8573 | 0.9132 | | 0.7953 | 5.0 | 1590 | 0.7721 | 0.9184 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.13.1+cu116 - Datasets 1.16.1 - Tokenizers 0.10.3
Anubhav23/model_name
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-27T22:37:01Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 623.50 +/- 145.88 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga pinaggle -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga pinaggle -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga pinaggle ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Anupam/QuestionClassifier
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- datasets: - mc4 language: - pt metrics: - perplexity library_name: transformers --- This model is a Portuguese fine-tuned version of the [facebook/opt-125m](https://huggingface.co/facebook/opt-125m). It has undergone additional causal language modeling pre-training with a context size of 512, using an extra 300 million tokens in Portuguese (sampled from mc4). The Wandb report is publicly available at [here](https://api.wandb.ai/links/thiagolaitz1/ths2zi4c). The code for training using Colab pro (A100 - 40GB) can be found [here](https://github.com/thiagolaitz/IA368-search-engines/blob/main/Project%2004/opt_125m_pt_finetuning.ipynb). The total cost for training this model was R$17.40 or $3.37 USD (as of March 2023). Deterministic use: ```python from transformers import pipeline generator = pipeline('text-generation', model="thiagolaitz/opt-125m-pt-finetuned", max_length=30) generator("Eles brincaram o dia inteiro sob o sol quente, mas") # Output: Eles brincaram o dia inteiro sob o sol quente, mas não se deixaram levar pelo sol. ``` Top-k sampling: ```python from transformers import pipeline generator = pipeline('text-generation', model="thiagolaitz/opt-125m-pt-finetuned", do_sample=True, max_length=30) generator("Eles brincaram o dia inteiro sob o sol quente, mas") ```
gaurishhs/API
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: cc-by-4.0 tags: - generated_from_trainer model-index: - name: 20230328-001-baseline-xlmr-clickbait-spoiling results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 20230328-001-baseline-xlmr-clickbait-spoiling This model is a fine-tuned version of [deepset/xlm-roberta-base-squad2](https://huggingface.co/deepset/xlm-roberta-base-squad2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.9266 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 99 | 2.7788 | | No log | 2.0 | 198 | 2.8201 | | No log | 3.0 | 297 | 2.9266 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2