modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
dccuchile/albert-base-spanish-finetuned-ner
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- license: mit --- ### Aflac duck on Stable Diffusion This is the `<aflac duck>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as an `object`: ![<aflac duck> 0](https://huggingface.co/sd-concepts-library/Aflac-duck/resolve/main/concept_images/duck1.jpg) ![<aflac duck> 1](https://huggingface.co/sd-concepts-library/Aflac-duck/resolve/main/concept_images/duck9.jpg) ![<aflac duck> 2](https://huggingface.co/sd-concepts-library/Aflac-duck/resolve/main/concept_images/duck2.jpg) ![<aflac duck> 3](https://huggingface.co/sd-concepts-library/Aflac-duck/resolve/main/concept_images/duck8.jpg) ![<aflac duck> 4](https://huggingface.co/sd-concepts-library/Aflac-duck/resolve/main/concept_images/duck3.jpg)
dccuchile/albert-base-spanish-finetuned-pos
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - audio-classification - generated_from_trainer datasets: - superb metrics: - accuracy model-index: - name: trillsson3-ft-keyword-spotting-14 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # trillsson3-ft-keyword-spotting-14 This model is a fine-tuned version of [vumichien/nonsemantic-speech-trillsson3](https://huggingface.co/vumichien/nonsemantic-speech-trillsson3) on the superb dataset. It achieves the following results on the evaluation set: - Loss: 0.3015 - Accuracy: 0.9150 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 64 - seed: 0 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 1.2824 | 1.0 | 1597 | 0.7818 | 0.6892 | | 0.8003 | 2.0 | 3194 | 0.4443 | 0.8735 | | 0.7232 | 3.0 | 4791 | 0.3728 | 0.8833 | | 0.73 | 4.0 | 6388 | 0.3465 | 0.8973 | | 0.7015 | 5.0 | 7985 | 0.3211 | 0.9109 | | 0.6981 | 6.0 | 9582 | 0.3200 | 0.9081 | | 0.6807 | 7.0 | 11179 | 0.3209 | 0.9059 | | 0.6873 | 8.0 | 12776 | 0.3206 | 0.9022 | | 0.6416 | 9.0 | 14373 | 0.3124 | 0.9057 | | 0.6698 | 10.0 | 15970 | 0.3288 | 0.8950 | | 0.716 | 11.0 | 17567 | 0.3147 | 0.8998 | | 0.6514 | 12.0 | 19164 | 0.3034 | 0.9112 | | 0.6513 | 13.0 | 20761 | 0.3091 | 0.9092 | | 0.652 | 14.0 | 22358 | 0.3056 | 0.9100 | | 0.7105 | 15.0 | 23955 | 0.3015 | 0.9150 | | 0.6337 | 16.0 | 25552 | 0.3070 | 0.9091 | | 0.63 | 17.0 | 27149 | 0.3018 | 0.9135 | | 0.6672 | 18.0 | 28746 | 0.3084 | 0.9088 | | 0.6479 | 19.0 | 30343 | 0.3060 | 0.9101 | | 0.6658 | 20.0 | 31940 | 0.3072 | 0.9089 | ### Framework versions - Transformers 4.23.0.dev0 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
dccuchile/albert-base-spanish-finetuned-qa-mlqa
[ "pytorch", "albert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: en thumbnail: http://www.huggingtweets.com/kathyalexx/1666709098727/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1583932374550716416/qRWsI19i_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Kathy Alex 📺🏴󠁧󠁢󠁳󠁣󠁴󠁿| CRT VTUBER</div> <div style="text-align: center; font-size: 14px;">@kathyalexx</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Kathy Alex 📺🏴󠁧󠁢󠁳󠁣󠁴󠁿| CRT VTUBER. | Data | Kathy Alex 📺🏴󠁧󠁢󠁳󠁣󠁴󠁿| CRT VTUBER | | --- | --- | | Tweets downloaded | 3249 | | Retweets | 139 | | Short tweets | 819 | | Tweets kept | 2291 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1njfxl4k/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @kathyalexx's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/no11thwo) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/no11thwo/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/kathyalexx') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
dccuchile/albert-base-spanish-finetuned-xnli
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
"""infinite_jukebox.py - (c) 2017 - Dave Rensin - [email protected] An attempt to re-create the amazing Infinite Jukebox (http://www.infinitejuke.com) created by Paul Lamere of Echo Nest. Uses the Remixatron module to do most of the work. """ import argparse import curses import curses.textpad import numpy as np import os import pygame import pygame.event import pygame.locals import signal import soundfile as sf import sys import time from Remixatron import InfiniteJukebox from pygame import mixer SOUND_FINISHED = pygame.locals.USEREVENT + 1 def process_args(): """ Process the command line args """ description = """Creates an infinite remix of an audio file by finding musically similar beats and computing a randomized play path through them. The default choices should be suitable for a variety of musical styles. This work is inspired by the Infinite Jukebox (http://www.infinitejuke.com) project created by Paul Lamere ([email protected])""" epilog = """ """ parser = argparse.ArgumentParser(description=description, epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("filename", type=str, help="the name of the audio file to play. Most common audio types should work. (mp3, wav, ogg, etc..)") parser.add_argument("-clusters", metavar='N', type=int, default=0, help="set the number of clusters into which we want to bucket the audio. Default: 0 (automatically try to find the optimal cluster value.)") parser.add_argument("-start", metavar='start_beat', type=int, default=1, help="start on a specific beat. Default: 1") parser.add_argument("-save", metavar='label', type=str, help="Save the remix to a file, rather than play it. Will create file named [label].wav") parser.add_argument("-duration", metavar='seconds', type=int, default=180, help="length (in seconds) to save. Must use with -save. Default: 180") parser.add_argument("-verbose", action='store_true', help="print extra info about the track and play vector") parser.add_argument("-use_v1", action='store_true', help="use the original auto clustering algorithm instead of the new one. -clusters must not be set.") return parser.parse_args() def MyCallback(pct_complete, message): """ The callback function that gets status updates. Just prints a low-fi progress bar and reflects the status message passed in. Example: [###### ] Doing some thing... """ progress_bar = " [" + "".ljust(int(pct_complete * 10),'#') + "".ljust(10 - int(pct_complete * 10), ' ') + "] " log_line = progress_bar + message window.clear() window.addstr(1,0,log_line) window.refresh() def display_playback_progress(v): """ Displays a super low-fi playback progress map See README.md for details.. Returns the time this function took so we can deduct it from the sleep time for the beat """ time_start = time.time() term_width = curses.tigetnum('cols') y_offset = 11 beat = v['beat'] min_sequence = v['seq_len'] current_sequence = v['seq_pos'] # compute a segment map and display it. See README.md for an # explanation of segment maps and cluster maps. segment_map = '' segment_chars = '#-' for b in jukebox.beats: segment_map += segment_chars[ b['segment'] % 2 ] window.addstr(y_offset,0,segment_map + " ") # highlight all the jump candidates in the segment # map for c in jukebox.beats[beat]['jump_candidates']: b = jukebox.beats[c] window.addch(y_offset + int(b['id'] / term_width), # y position of character b['id'] % term_width, # x position of character ord(segment_chars[b['segment'] %2]), # either '#' or '-' depending on the segment curses.A_REVERSE) # print in reverse highlight # print the position tracker on the segment map x_pos = beat % term_width y_pos = int(beat/term_width) + y_offset beats_until_jump = min_sequence - current_sequence buj_disp = '' # show the beats until the next jump. If the value == 0 then # then sequence wanted to jump but couldn't find a suitable # target. Display an appropriate symbol for that (a frowny face, of course!) if beats_until_jump > 0: buj_disp = str(beats_until_jump).zfill(2) else: buj_disp = ':(' window.addstr(y_pos, x_pos, buj_disp, curses.A_BOLD | curses.A_REVERSE | curses.A_STANDOUT ) window.refresh() time_finish = time.time() return time_finish - time_start def get_verbose_info(): """Show statistics about the song and the analysis""" info = """ filename: %s duration: %02d:%02d:%02d beats: %d tempo: %d bpm clusters: %d segments: %d samplerate: %d """ (minutes,seconds) = divmod(round(jukebox.duration),60) (hours, minutes) = divmod(minutes, 60) verbose_info = info % (os.path.basename(args.filename), hours, minutes, seconds, len(jukebox.beats), int(round(jukebox.tempo)), jukebox.clusters, jukebox.segments, jukebox.sample_rate) segment_map = '' cluster_map = '' segment_chars = '#-' cluster_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890-=,.<>/?;:!@#$%^&*()_+' for b in jukebox.beats: segment_map += segment_chars[ b['segment'] % 2 ] cluster_map += cluster_chars[ b['cluster'] ] verbose_info += "\n" + segment_map + "\n\n" if args.verbose: verbose_info += cluster_map + "\n\n" verbose_info += jukebox._extra_diag return verbose_info def get_window_contents(): """Dump the contents of the current curses window.""" tbox = curses.textpad.Textbox(window) tbox.stripspaces = False w_str = tbox.gather() return w_str def cleanup(): """Cleanup before exiting""" if not window: return w_str = get_window_contents() curses.curs_set(1) curses.endwin() print(w_str.rstrip()) print mixer.quit() def graceful_exit(signum, frame): """Catch SIGINT gracefully""" # restore the original signal handler as otherwise evil things will happen # in raw_input when CTRL+C is pressed, and our signal handler is not re-entrant signal.signal(signal.SIGINT, original_sigint) cleanup() sys.exit(0) def save_to_file(jukebox, label, duration): ''' Save a fixed length of audio to disk. ''' avg_beat_duration = 60 / jukebox.tempo num_beats_to_save = int(duration / avg_beat_duration) # this list comprehension returns all the 'buffer' arrays from the beats # associated with the [0..num_beats_to_save] entries in the play vector main_bytes = [jukebox.beats[v['beat']]['buffer'] for v in jukebox.play_vector[0:num_beats_to_save]] # main_bytes is an array of byte[] arrays. We need to flatten it to just a # regular byte[] output_bytes = np.concatenate( main_bytes ) # write out the wav file sf.write(label + '.wav', output_bytes, jukebox.sample_rate, format='WAV', subtype='PCM_24') if __name__ == "__main__": # store the original SIGINT handler and install a new handler original_sigint = signal.getsignal(signal.SIGINT) signal.signal(signal.SIGINT, graceful_exit) # # Main program logic # window = None args = process_args() curses.setupterm() window = curses.initscr() curses.curs_set(0) # do the clustering. Run synchronously. Post status messages to MyCallback() jukebox = InfiniteJukebox(filename=args.filename, start_beat=args.start, clusters=args.clusters, progress_callback=MyCallback, do_async=False, use_v1=args.use_v1) # show more info about what was found window.addstr(2,0, get_verbose_info()) window.refresh() # if we're just saving the remix to a file, then just # find the necessarry beats and do that if args.save: save_to_file(jukebox, args.save, args.duration) graceful_exit(0, 0) # it's important to make sure the mixer is setup with the # same sample rate as the audio. Otherwise the playback will # sound too slow/fast/awful mixer.init(frequency=jukebox.sample_rate) channel = mixer.Channel(0) # pygame's event handling functions won't work unless the # display module has been initialized -- even though we # won't be making any display calls. pygame.display.init() # register the event type we want fired when a sound buffer # finishes playing channel.set_endevent(SOUND_FINISHED) # queue and start playing the first event in the play vector. This is basic # audio double buffering that will reduce choppy audio from impercise timings. The # goal is to always have one beat in queue to play as soon as the last one is done. beat_to_play = jukebox.beats[ jukebox.play_vector[0]['beat'] ] snd = mixer.Sound(buffer=beat_to_play['buffer']) channel.queue(snd) display_playback_progress(jukebox.play_vector[0]) # go through the rest of the playback list, start playing each beat, display # the progress and wait for the playback to complete. Playback happens on another # thread in the pygame library, so we have to wait to be signaled to queue another # event. for v in jukebox.play_vector[1:]: beat_to_play = jukebox.beats[ v['beat'] ] snd = mixer.Sound(buffer=beat_to_play['buffer']) channel.queue(snd) pygame.event.wait() display_playback_progress(v)
dccuchile/albert-large-spanish-finetuned-ner
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
# EternalJukebox The source files for the EternalJukebox, a rehosting of the Infinite Jukebox. This repo contains everything you need to host the EternalJukebox on your own server! You can visit the official site [here](https://eternalbox.dev/), in case you want to mess around with it without doing all the hard stuff. # Docker Install ## Prerequesits You need to install [docker](https://docs.docker.com/engine/install/) and [docker-compose](https://docs.docker.com/compose/install/) Then, download or clone the repository. ## Configuration To configure, rename `.env.example` to `.env` and change the appropriate values. For advanced configuration edit `envvar_config.yaml`. ## Running To start, run `docker-compose up -d` in the repositories directory. To stop, run `docker-compose down`. If you change anything in the repository, like pulling updates, run `docker-compose build` to re-build the application. If you want to change the port from 8080, edit `docker-compose.yml` line 9, to be `- <your port>:8080` # Manual Install ## Prerequisites ### Java: ##### Windows Download and install Java from https://www.java.com/en/download/ ##### Debian-based Linux distributions For Ubuntu or Debian-based distributions execute `sudo apt-get install default-jre` in the terminal ##### Fedora and CentOS There is a tutorial for installing java on Fedora and CentOS at https://www.digitalocean.com/community/tutorials/how-to-install-java-on-centos-and-fedora ### Yt-dlp (a more up-to-date fork of Youtube-dl): ##### Windows Download the .exe at https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.exe and place it in `C:\Windows\`, or in another folder on the PATH. ##### Linux Use these commands in the terminal to install youtube-dl on Linux: `sudo curl -L https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp -o /usr/local/bin/yt-dlp` `sudo chmod a+rx /usr/local/bin/yt-dlp` ### ffmpeg: ##### Windows Download the exe from https://ffmpeg.zeranoe.com/builds/ and place it in `C:\Windows\`, or in another folder on teh PATH. ##### Linux ffmpeg is available to download in most distributions using `sudo apt-get install ffmpeg` or equivalent ## Getting the project files: The whole process of obtaining project files is much easier now, as the build process is streamlined through Jenkins. The project site is over [here](https://jenkins.abimon.org/job/EternalJukebox/), and contains the individual files to download, or an all-in-one zip for all the files. Alternatively, the files can be found over at a permanent server [here](https://abimon.org/eternal_jukebox) ## Configuring First thing to do is create a new file called either `config.yaml` or `config.json` (YAML tends to be easier to write, but takes up slightly more space), then open it with notepad/notepad++ on Windows and whatever text editor you like on Linux (for example nano: `nano config.json`) Now you should go to https://developer.spotify.com/my-applications/ and log in to your spotify account. Then click the "Create an app" button and a new page should popup. There give it a name and description and click create. It should send you to the new app's page, the only thing you need from here is your Client ID and Client Secret (Note: Never share these with anyone!) You will also need a Youtube Data API key, which you can find about how to obtain [here](https://developers.google.com/youtube/v3/getting-started). There are a variety of config options (documentation coming soon) that allow most portions of the EternalJukebox to be configured, and these can be entered here. ## Starting the server: First you need to open the Terminal or Command Prompt. Then make sure its running in the folder that your EternalJukebox.jar is in, once again to do this use the `cd` command. Then execute the jar with `java -jar EternalJukebox.jar` If everything went right it should say `Listening at http://0.0.0.0:11037` you should now be able to connect to it with a browser through http://localhost:11037 Congrats you did it! ## Manually Building This is not recommended unless you're making some modifications, and as such should only be performed by more advanced users You'll need to obtain a copy of [Gradle](https://gradle.org/install/), likely a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html), and [Jekyll](https://jekyllrb.com/). You'll also need the project files in some capacity, be it `git clone` or downloading the archive from GitHub. From there, building in Gradle is simple; just run `gradle clean shadowJar` from the project file directory. That should produce a jar file in `build/libs` that will work for you. In addition, you'll need to build the Jekyll webpages, which can be done by running `jekyll build --source _web --destination web`
dccuchile/albert-large-spanish-finetuned-pawsx
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1766.21 +/- 63.44 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
dccuchile/albert-large-spanish-finetuned-qa-mlqa
[ "pytorch", "albert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: creativeml-openrail-m tags: - stable-diffusion - text-to-image ---
dccuchile/albert-large-spanish-finetuned-xnli
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- language: en thumbnail: http://www.huggingtweets.com/azulthesnail-kathyalexx-marudecinco/1666710422318/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1583932374550716416/qRWsI19i_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1576895864597004290/OvjzjXe7_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1578550833234542594/R_GQ0lW9_400x400.jpg&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Kathy Alex 📺🏴󠁧󠁢󠁳󠁣󠁴󠁿| CRT VTUBER & Azulie is on hiatus until i have a pc~ & Maru 🧉 Hero/Idol VTuber ⭐ 【GATTAI!! The Live】</div> <div style="text-align: center; font-size: 14px;">@azulthesnail-kathyalexx-marudecinco</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Kathy Alex 📺🏴󠁧󠁢󠁳󠁣󠁴󠁿| CRT VTUBER & Azulie is on hiatus until i have a pc~ & Maru 🧉 Hero/Idol VTuber ⭐ 【GATTAI!! The Live】. | Data | Kathy Alex 📺🏴󠁧󠁢󠁳󠁣󠁴󠁿| CRT VTUBER | Azulie is on hiatus until i have a pc~ | Maru 🧉 Hero/Idol VTuber ⭐ 【GATTAI!! The Live】 | | --- | --- | --- | --- | | Tweets downloaded | 3249 | 3231 | 3232 | | Retweets | 139 | 520 | 199 | | Short tweets | 820 | 695 | 292 | | Tweets kept | 2290 | 2016 | 2741 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/zxca1cj1/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @azulthesnail-kathyalexx-marudecinco's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2xrpxqbx) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2xrpxqbx/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/azulthesnail-kathyalexx-marudecinco') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
dccuchile/albert-tiny-spanish-finetuned-mldoc
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: rare-puppers results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.9701492786407471 --- # rare-puppers Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### corgi ![corgi](images/corgi.jpg) #### samoyed ![samoyed](images/samoyed.jpg) #### shiba inu ![shiba inu](images/shiba_inu.jpg)
dccuchile/albert-xlarge-spanish-finetuned-mldoc
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- license: creativeml-openrail-m --- This model was trained based off of https://huggingface.co/runwayml/stable-diffusion-v1-5 for 15000 steps using 2.5k images from https://dune.fandom.com/wiki/Dune_Wiki "bene gesserit" ![bene gesserit_23.png](https://s3.amazonaws.com/moonup/production/uploads/1666715289256-628ccf2530d48c565bae0af1.png) "dune" ![dune_23.png](https://s3.amazonaws.com/moonup/production/uploads/1666715146863-628ccf2530d48c565bae0af1.png) "paul atreides" ![tmpjr8qk311.png](https://s3.amazonaws.com/moonup/production/uploads/1666715437446-628ccf2530d48c565bae0af1.png) "sandworm" ![sandworm_23.png](https://s3.amazonaws.com/moonup/production/uploads/1666715322167-628ccf2530d48c565bae0af1.png) "taylor swift" ![taylor swift_23.png](https://s3.amazonaws.com/moonup/production/uploads/1666715146854-628ccf2530d48c565bae0af1.png) "yoda" ![yoda_23.png](https://s3.amazonaws.com/moonup/production/uploads/1666715146864-628ccf2530d48c565bae0af1.png) "shai hulud" ![tmpdfzbv3n2.png](https://s3.amazonaws.com/moonup/production/uploads/1666715234746-628ccf2530d48c565bae0af1.png)
dccuchile/albert-xlarge-spanish-finetuned-pawsx
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
24
null
--- language: en license: apache-2.0 library_name: diffusers tags: [] datasets: json metrics: [] --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # ddpm-butterflies-128 ## Model description This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library on the `json` dataset. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training data [TODO: describe the data used to train the model] ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 16 - gradient_accumulation_steps: 1 - optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None - lr_scheduler: None - lr_warmup_steps: 500 - ema_inv_gamma: None - ema_inv_gamma: None - ema_inv_gamma: None - mixed_precision: fp16 ### Training results 📈 [TensorBoard logs](https://huggingface.co/judaschrist/ddpm-butterflies-128/tensorboard?#scalars)
dccuchile/albert-xlarge-spanish-finetuned-pos
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: en thumbnail: http://www.huggingtweets.com/mickyc_1/1666716763439/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1051285627734544385/q4JfjJq7_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Mick Cunningham</div> <div style="text-align: center; font-size: 14px;">@mickyc_1</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Mick Cunningham. | Data | Mick Cunningham | | --- | --- | | Tweets downloaded | 3161 | | Retweets | 1745 | | Short tweets | 225 | | Tweets kept | 1191 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/3qltgo32/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @mickyc_1's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/1mtpatvd) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/1mtpatvd/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/mickyc_1') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
dccuchile/albert-xxlarge-spanish-finetuned-mldoc
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: swin-tiny-patch4-window7-224-large-dataset-varicropped results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.6571201272871917 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-large-dataset-varicropped This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.3554 - Accuracy: 0.6571 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.8523 | 0.99 | 88 | 1.8136 | 0.3771 | | 0.0725 | 1.99 | 176 | 1.2359 | 0.6006 | | 0.0397 | 2.99 | 264 | 1.1716 | 0.6014 | | 0.0179 | 3.99 | 352 | 1.5688 | 0.5704 | | 0.0173 | 4.99 | 440 | 1.3718 | 0.6237 | | 0.0097 | 5.99 | 528 | 1.3841 | 0.5927 | | 0.0109 | 6.99 | 616 | 1.4044 | 0.5895 | | 0.0019 | 7.99 | 704 | 1.2936 | 0.6150 | | 0.002 | 8.99 | 792 | 1.4264 | 0.5760 | | 0.0035 | 9.99 | 880 | 1.2226 | 0.6396 | | 0.0025 | 10.99 | 968 | 1.1553 | 0.6635 | | 0.0009 | 11.99 | 1056 | 1.1727 | 0.6643 | | 0.0037 | 12.99 | 1144 | 1.1182 | 0.6714 | | 0.0017 | 13.99 | 1232 | 1.4015 | 0.6364 | | 0.0009 | 14.99 | 1320 | 1.2955 | 0.6683 | | 0.0002 | 15.99 | 1408 | 1.2310 | 0.6555 | | 0.0007 | 16.99 | 1496 | 1.3849 | 0.6325 | | 0.001 | 17.99 | 1584 | 1.4312 | 0.6102 | | 0.0001 | 18.99 | 1672 | 1.5087 | 0.6181 | | 0.0002 | 19.99 | 1760 | 1.7247 | 0.6062 | | 0.0016 | 20.99 | 1848 | 1.5534 | 0.6237 | | 0.0004 | 21.99 | 1936 | 1.5382 | 0.6333 | | 0.0008 | 22.99 | 2024 | 1.4910 | 0.6484 | | 0.0008 | 23.99 | 2112 | 1.5020 | 0.6380 | | 0.0005 | 24.99 | 2200 | 1.4788 | 0.6468 | | 0.001 | 25.99 | 2288 | 1.3416 | 0.6770 | | 0.003 | 26.99 | 2376 | 1.2643 | 0.6738 | | 0.0001 | 27.99 | 2464 | 1.3582 | 0.6595 | | 0.0 | 28.99 | 2552 | 1.3767 | 0.6523 | | 0.0 | 29.99 | 2640 | 1.3554 | 0.6571 | ### Framework versions - Transformers 4.21.1 - Pytorch 1.12.1 - Datasets 2.4.0 - Tokenizers 0.12.1
dccuchile/albert-xxlarge-spanish-finetuned-qa-mlqa
[ "pytorch", "albert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1525190650781700100/IA5xWzX8_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">ɐɔɔn˥</div> <div style="text-align: center; font-size: 14px;">@vacuumacumen</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from ɐɔɔn˥. | Data | ɐɔɔn˥ | | --- | --- | | Tweets downloaded | 3131 | | Retweets | 1562 | | Short tweets | 381 | | Tweets kept | 1188 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/30ibvfar/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @vacuumacumen's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2z84zva6) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2z84zva6/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/vacuumacumen') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
dccuchile/albert-xxlarge-spanish-finetuned-xnli
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
68
null
--- language: en license: apache-2.0 library_name: diffusers tags: [] datasets: huggan/smithsonian_butterflies_subset metrics: [] --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # ddpm-butterflies-128 ## Model description This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library on the `huggan/smithsonian_butterflies_subset` dataset. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training data [TODO: describe the data used to train the model] ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 16 - gradient_accumulation_steps: 1 - optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None - lr_scheduler: None - lr_warmup_steps: 500 - ema_inv_gamma: None - ema_inv_gamma: None - ema_inv_gamma: None - mixed_precision: fp16 ### Training results 📈 [TensorBoard logs](https://huggingface.co/mattgabriel/ddpm-butterflies-128/tensorboard?#scalars)
dccuchile/albert-base-spanish
[ "pytorch", "tf", "albert", "pretraining", "es", "dataset:large_spanish_corpus", "transformers", "spanish", "OpenCENIA" ]
null
{ "architectures": [ "AlbertForPreTraining" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
586
null
--- language: - ms tags: - paraphrase metrics: - sacrebleu --- # finetune-paraphrase-t5-small-standard-bahasa-cased Finetuned T5 small on MS paraphrase tasks. ## Dataset 1. translated PAWS, https://huggingface.co/datasets/mesolitica/translated-PAWS 2. translated MRPC, https://huggingface.co/datasets/mesolitica/translated-MRPC 3. translated ParaSCI, https://huggingface.co/datasets/mesolitica/translated-paraSCI ## Finetune details 1. Finetune using single RTX 3090 Ti. Scripts at https://github.com/huseinzol05/malaya/tree/master/session/paraphrase/hf-t5 ## Supported prefix 1. `parafrasa: {string}`, for MS paraphrase. ## Evaluation Evaluated on MRPC validation set and ParaSCI Arxiv test set. ``` {'name': 'BLEU', 'score': 37.598729045833316, '_mean': -1.0, '_ci': -1.0, '_verbose': '62.6/42.5/33.2/27.0 (BP = 0.957 ratio = 0.958 hyp_len = 96781 ref_len = 101064)', 'bp': 0.9567103919247614, 'counts': [60539, 38753, 28443, 21680], 'totals': [96781, 91237, 85693, 80149], 'sys_len': 96781, 'ref_len': 101064, 'precisions': [62.55256713611143, 42.47509234192268, 33.19174261608299, 27.049620082596164], 'prec_str': '62.6/42.5/33.2/27.0', 'ratio': 0.9576209134805668} ```
dccuchile/albert-tiny-spanish
[ "pytorch", "tf", "albert", "pretraining", "es", "dataset:large_spanish_corpus", "transformers", "spanish", "OpenCENIA" ]
null
{ "architectures": [ "AlbertForPreTraining" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
393
null
## Diffusion-GAN &mdash; Official PyTorch implementation **Diffusion-GAN: Training GANs with Diffusion**<br> Zhendong Wang, Huangjie Zheng, Pengcheng He, Weizhu Chen and Mingyuan Zhou <br> https://arxiv.org/abs/2206.02262 <br> Abstract: *For stable training of generative adversarial networks (GANs), injecting instance noise into the input of the discriminator is considered as a theoretically sound solution, which, however, has not yet delivered on its promise in practice. This paper introduces Diffusion-GAN that employs a Gaussian mixture distribution, defined over all the diffusion steps of a forward diffusion chain, to inject instance noise. A random sample from the mixture, which is diffused from an observed or generated data, is fed as the input to the discriminator. The generator is updated by backpropagating its gradient through the forward diffusion chain, whose length is adaptively adjusted to control the maximum noise-to-data ratio allowed at each training step. Theoretical analysis verifies the soundness of the proposed Diffusion-GAN, which provides model- and domain-agnostic differentiable augmentation. A rich set of experiments on diverse datasets show that DiffusionGAN can provide stable and data-efficient GAN training, bringing consistent performance improvement over strong GAN baselines for synthesizing photorealistic images.* [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/diffusion-gan-training-gans-with-diffusion/image-generation-on-celeba-64x64)](https://paperswithcode.com/sota/image-generation-on-celeba-64x64?p=diffusion-gan-training-gans-with-diffusion) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/diffusion-gan-training-gans-with-diffusion/image-generation-on-stl-10)](https://paperswithcode.com/sota/image-generation-on-stl-10?p=diffusion-gan-training-gans-with-diffusion) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/diffusion-gan-training-gans-with-diffusion/image-generation-on-lsun-bedroom-256-x-256)](https://paperswithcode.com/sota/image-generation-on-lsun-bedroom-256-x-256?p=diffusion-gan-training-gans-with-diffusion) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/diffusion-gan-training-gans-with-diffusion/image-generation-on-afhq-wild)](https://paperswithcode.com/sota/image-generation-on-afhq-wild?p=diffusion-gan-training-gans-with-diffusion) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/diffusion-gan-training-gans-with-diffusion/image-generation-on-afhq-cat)](https://paperswithcode.com/sota/image-generation-on-afhq-cat?p=diffusion-gan-training-gans-with-diffusion) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/diffusion-gan-training-gans-with-diffusion/image-generation-on-afhq-dog)](https://paperswithcode.com/sota/image-generation-on-afhq-dog?p=diffusion-gan-training-gans-with-diffusion) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/diffusion-gan-training-gans-with-diffusion/image-generation-on-lsun-churches-256-x-256)](https://paperswithcode.com/sota/image-generation-on-lsun-churches-256-x-256?p=diffusion-gan-training-gans-with-diffusion) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/diffusion-gan-training-gans-with-diffusion/image-generation-on-ffhq-1024-x-1024)](https://paperswithcode.com/sota/image-generation-on-ffhq-1024-x-1024?p=diffusion-gan-training-gans-with-diffusion) ## ToDos - [x] Initial code release - [x] Providing pretrained models ## Build your Diffusion-GAN Here, we explain how to train general GANs with diffusion. We provide two ways: a. plug-in as simple as a data augmentation method; b. training GANs on diffusion chains with a timestep-dependent discriminator. Currently, we didn't find significant empirical differences of the two approaches, while the second approach has stronger theoretical guarantees. We suspect when advanced timestep-dependent structure is applied in the discriminator, the second approach could become better, and we left that for future study. ### Simple Plug-in * Design a proper diffusion process based on the ```diffusion.py``` file * Apply diffusion on the inputs of discriminators, ```logits = Discriminator(Diffusion(gen/real_images))``` * Add adaptiveness of diffusion into your training iterations ``` if update_diffusion: # batch_idx % ada_interval == 0 adjust = np.sign(sign(Discriminator(real_images)) - ada_target) * C # C = (batch_size * ada_interval) / (ada_kimg * 1000) diffusion.p = (diffusion.p + adjust).clip(min=0., max=1.) diffusion.update_T() ``` ### Full Version * Add diffusion timestep `t` as an input for discriminators `logits = Discriminator(images, t)`. You may need some modifications in your discriminator architecture. * The other steps are the same as Simple Plug-in. Note that since discriminator depends on timesteps, you need to collect `t`. ``` diffused_images, t = Diffusion(images) logits = Discrimnator(diffused_images, t) ``` ## Train our Diffusion-GAN ### Requirements * 64-bit Python 3.7 and PyTorch 1.7.1/1.8.1. See [https://pytorch.org/](https://pytorch.org/) for PyTorch install instructions. * CUDA toolkit 11.0 or later. * Python libraries: `pip install click requests tqdm pyspng ninja imageio-ffmpeg==0.4.3`. ### Data Preparation In our paper, we trained our model on [CIFAR-10 (32 x 32)](https://www.cs.toronto.edu/~kriz/cifar.html), [STL-10 (64 x 64)](https://cs.stanford.edu/~acoates/stl10/), [LSUN (256 x 256)](https://github.com/fyu/lsun), [AFHQ (512 x 512)](https://github.com/clovaai/stargan-v2) and [FFHQ (1024 x 1024)](https://github.com/NVlabs/ffhq-dataset). You can download the datasets we used in our paper at their respective websites. To prepare the dataset at the respective resolution, run for example ```.bash python dataset_tool.py --source=~/downloads/lsun/raw/bedroom_lmdb --dest=~/datasets/lsun_bedroom200k.zip \ --transform=center-crop --width=256 --height=256 --max_images=200000 python dataset_tool.py --source=~/downloads/lsun/raw/church_lmdb --dest=~/datasets/lsun_church200k.zip \ --transform=center-crop-wide --width=256 --height=256 --max_images=200000 ``` ### Training We show the training commands that we used below. In most cases, the training commands are similar, so below we use CIFAR-10 dataset as an example: For Diffusion-GAN, ```.bash python train.py --outdir=training-runs --data="~/cifar10.zip" --gpus=4 --cfg cifar --kimg 50000 --aug no --target 0.6 --noise_sd 0.05 --ts_dist priority ``` For Diffusion-ProjectedGAN ```.bash python train.py --outdir=training-runs --data="~/cifar10.zip" --gpus=4 --batch 64 --batch-gpu=16 --cfg fastgan --kimg 50000 --target 0.45 --d_pos first --noise_sd 0.5 ``` For Diffusion-InsGen ```.bash python train.py --outdir=training-runs --data="~/afhq-wild.zip" --gpus=8 --cfg paper512 --kimg 25000 ``` We follows the `config` setting from [StyleGAN2-ADA](https://github.com/NVlabs/stylegan2-ada-pytorchhttps://github.com/NVlabs/stylegan2-ada-pytorch) and refer to them for more details. The other major hyperparameters are listed and discussed below: * `--target` the discriminator target, which balances the level of diffusion intensity. * `--aug` domain-specific image augmentation, such as ADA and Differentiable Augmentation, which is used for evaluate complementariness with diffusion. * `--noise_sd` diffusion noise standard deviation, which is set as 0.05 in our case. * ` --ts_dist` t sampling distribution, $\pi(t)$ in paper. We evaluated two `t` sampling distribution `['priority', 'uniform']`, where `'priority'` denotes the Equation (11) in paper and `'uniform'` denotes random sampling. In most cases, `priority` works slightly better, while in some cases, such as FFHQ, `'uniform'` is better. ## Sampling and Evaluation with our checkpoints We report the FIDs of our Diffusion-GAN below and provide the trained checkpoints in the ``./checkpoints`` folder: | Model | Dataset | Resolution | FID | |:---------------------------:|:------------:|:----------:|:-----:| | Diffusion-StyleGAN2 | CIFAR-10 | 32x32 | 3.19 | | Diffusion-StyleGAN2 | CelebA | 64x64 | 1.69 | | Diffusion-StyleGAN2 | STL-10 | 64x64 | 11.53 | | Diffusion-StyleGAN2 | LSUN-Bedroom | 256x256 | 3.65 | | Diffusion-StyleGAN2 | LSUN-Church | 256x256 | 3.17 | | Diffusion-StyleGAN2 | FFHQ | 1024x1024 | 2.83 | | Diffusion-ProjectedGAN | CIFAR-10 | 32x32 | 2.54 | | Diffusion-ProjectedGAN | STL-10 | 64x64 | 6.91 | | Diffusion-ProjectedGAN | LSUN-Bedroom | 256x256 | 1.43 | | Diffusion-ProjectedGAN | LSUN-Church | 256x256 | 1.85 | | Diffusion-InsGen | AFHQ-Cat | 512x512 | 2.40 | | Diffusion-InsGen | AFHQ-Dog | 512x512 | 4.83 | | Diffusion-InsGen | AFHQ-Wild | 512x512 | 1.51 | To generate samples, run the following commands: ```.bash # Generate FFHQ with pretrained Diffusion-StyleGAN2 python generate.py --outdir=out --seeds=1-100 \ --network=https://tsciencescu.blob.core.windows.net/projectshzheng/DiffusionGAN/diffusion-stylegan2-ffhq.pkl # Generate LSUN-Church with pretrained Diffusion-ProjectedGAN python gen_images.py --outdir=out --seeds=1-100 \ --network=https://tsciencescu.blob.core.windows.net/projectshzheng/DiffusionGAN/diffusion-projectedgan-lsun-church.pkl ``` The checkpoints can be replaced with any pre-trained Diffusion-GAN checkpoint path downloaded from the table above. Similarly, the metrics can be calculated with the following commands: ```.bash # Pre-trained network pickle: specify dataset explicitly, print result to stdout. python calc_metrics.py --metrics=fid50k_full --data=~/datasets/ffhq.zip --mirror=1 \ --network=https://tsciencescu.blob.core.windows.net/projectshzheng/DiffusionGAN/diffusion-stylegan2-ffhq.pkl ``` ## Citation ``` @article{wang2022diffusiongan, title = {Diffusion-GAN: Training GANs with Diffusion}, author = {Wang, Zhendong and Zheng, Huangjie and He, Pengcheng and Chen, Weizhu and Zhou, Mingyuan}, journal = {arXiv preprint arXiv:2206.02262}, year = {2022}, url = {https://arxiv.org/abs/2206.02262} } ``` ## Acknowledgements Our code builds upon the awesome [StyleGAN2-ADA repo](https://github.com/NVlabs/stylegan2-ada-pytorch), [InsGen repo](https://github.com/genforce/insgen) and [ProjectedGAN repo](https://github.com/autonomousvision/projected_gan), respectively by Karras et al, Ceyuan Yang et al and Axel Sauer et al.
dccuchile/bert-base-spanish-wwm-cased-finetuned-mldoc
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- language: en license: apache-2.0 library_name: diffusers tags: [] datasets: huggan/smithsonian_butterflies_subset metrics: [] --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # ddpm-butterflies-128 ## Model description This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library on the `huggan/smithsonian_butterflies_subset` dataset. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training data [TODO: describe the data used to train the model] ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 4 - eval_batch_size: 4 - gradient_accumulation_steps: 1 - optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None - lr_scheduler: None - lr_warmup_steps: 500 - ema_inv_gamma: None - ema_inv_gamma: None - ema_inv_gamma: None - mixed_precision: fp16 ### Training results 📈 [TensorBoard logs](https://huggingface.co/gstqtfr/ddpm-butterflies-128/tensorboard?#scalars)
dccuchile/bert-base-spanish-wwm-uncased-finetuned-qa-mlqa
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: en thumbnail: http://www.huggingtweets.com/hubziii/1666719717482/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1508800010904821778/V1w5Wr-z_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Hubz</div> <div style="text-align: center; font-size: 14px;">@hubziii</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Hubz. | Data | Hubz | | --- | --- | | Tweets downloaded | 3245 | | Retweets | 160 | | Short tweets | 752 | | Tweets kept | 2333 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/2s6y6ktm/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @hubziii's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/3pqti9e3) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/3pqti9e3/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/hubziii') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
dccuchile/distilbert-base-spanish-uncased-finetuned-mldoc
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- license: mit tags: - generated_from_trainer datasets: - imagefolder model-index: - name: donut-base-Medical_Blocks results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # donut-base-Medical_Blocks This model is a fine-tuned version of [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
dccuchile/distilbert-base-spanish-uncased-finetuned-qa-mlqa
[ "pytorch", "distilbert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: mit --- ### Alberto_Pablo on Stable Diffusion via Dreambooth #### model by Ganosh This your the Stable Diffusion model fine-tuned the Alberto_Pablo concept taught to Stable Diffusion with Dreambooth. It can be used by modifying the `instance_prompt`: **a photo of sks Alberto** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts) Here are the images used for training this concept: ![image 0](https://huggingface.co/sd-dreambooth-library/alberto-pablo/resolve/main/concept_images/11.jpeg) ![image 1](https://huggingface.co/sd-dreambooth-library/alberto-pablo/resolve/main/concept_images/4.jpeg) ![image 2](https://huggingface.co/sd-dreambooth-library/alberto-pablo/resolve/main/concept_images/9.jpeg) ![image 3](https://huggingface.co/sd-dreambooth-library/alberto-pablo/resolve/main/concept_images/0.jpeg) ![image 4](https://huggingface.co/sd-dreambooth-library/alberto-pablo/resolve/main/concept_images/2.jpeg) ![image 5](https://huggingface.co/sd-dreambooth-library/alberto-pablo/resolve/main/concept_images/14.jpeg) ![image 6](https://huggingface.co/sd-dreambooth-library/alberto-pablo/resolve/main/concept_images/7.jpeg) ![image 7](https://huggingface.co/sd-dreambooth-library/alberto-pablo/resolve/main/concept_images/13.jpeg) ![image 8](https://huggingface.co/sd-dreambooth-library/alberto-pablo/resolve/main/concept_images/3.jpeg) ![image 9](https://huggingface.co/sd-dreambooth-library/alberto-pablo/resolve/main/concept_images/10.jpeg) ![image 10](https://huggingface.co/sd-dreambooth-library/alberto-pablo/resolve/main/concept_images/8.jpeg) ![image 11](https://huggingface.co/sd-dreambooth-library/alberto-pablo/resolve/main/concept_images/1.jpeg) ![image 12](https://huggingface.co/sd-dreambooth-library/alberto-pablo/resolve/main/concept_images/12.jpeg) ![image 13](https://huggingface.co/sd-dreambooth-library/alberto-pablo/resolve/main/concept_images/6.jpeg) ![image 14](https://huggingface.co/sd-dreambooth-library/alberto-pablo/resolve/main/concept_images/5.jpeg)
CennetOguz/distilbert-base-uncased-finetuned-recipe-1
[ "pytorch", "tensorboard", "distilbert", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: mit tags: - generated_from_trainer model-index: - name: borges-gpt results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # borges-gpt This model is a fine-tuned version of [DeepESP/gpt2-spanish](https://huggingface.co/DeepESP/gpt2-spanish) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.005 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 1000 - num_epochs: 75 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0+rocm5.2 - Datasets 2.6.1 - Tokenizers 0.13.2
CennetOguz/distilbert-base-uncased-finetuned-recipe-accelerate-1
[ "pytorch", "distilbert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1647023471749111810/G5s5jf4-_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">🌞</div> <div style="text-align: center; font-size: 14px;">@raspberryl0ver</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from 🌞. | Data | 🌞 | | --- | --- | | Tweets downloaded | 2241 | | Retweets | 457 | | Short tweets | 291 | | Tweets kept | 1493 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/btps6b16/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @raspberryl0ver's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/dojoofh6) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/dojoofh6/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/raspberryl0ver') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Chaddmckay/Cdm
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en thumbnail: http://www.huggingtweets.com/prathgodbole/1666723893377/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1041700878858674178/q1uKuS6o_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Prathamesh Godbole</div> <div style="text-align: center; font-size: 14px;">@prathgodbole</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Prathamesh Godbole. | Data | Prathamesh Godbole | | --- | --- | | Tweets downloaded | 3245 | | Retweets | 52 | | Short tweets | 241 | | Tweets kept | 2952 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/yqz5qdl4/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @prathgodbole's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/1mum0rf3) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/1mum0rf3/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/prathgodbole') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Chaewon/mmnt_decoder_en
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- language: en license: apache-2.0 library_name: diffusers tags: [] datasets: huggan/smithsonian_butterflies_subset metrics: [] --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # ddpm-butterflies-128 ## Model description This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library on the `huggan/smithsonian_butterflies_subset` dataset. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training data [TODO: describe the data used to train the model] ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 16 - gradient_accumulation_steps: 1 - optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None - lr_scheduler: None - lr_warmup_steps: 500 - ema_inv_gamma: None - ema_inv_gamma: None - ema_inv_gamma: None - mixed_precision: fp16 ### Training results 📈 [TensorBoard logs](https://huggingface.co/yucao16/ddpm-butterflies-128/tensorboard?#scalars)
Chakita/Friends
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - generated_from_trainer datasets: - rotten_tomatoes model-index: - name: rtm-electra-511E results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # rtm-electra-511E This model was trained from scratch on the rotten_tomatoes dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
Chakita/KROBERT
[ "pytorch", "roberta", "fill-mask", "transformers", "masked-lm", "fill-in-the-blanks", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1579203041764442116/RSLookYD_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1571653458972794884/eaxhUsib_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Gutted & oskcar</div> <div style="text-align: center; font-size: 14px;">@big___oven-codeinecucumber</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Gutted & oskcar. | Data | Gutted | oskcar | | --- | --- | --- | | Tweets downloaded | 1761 | 2669 | | Retweets | 243 | 635 | | Short tweets | 326 | 308 | | Tweets kept | 1192 | 1726 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1qyf2pl5/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @big___oven-codeinecucumber's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2rr9twhn) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2rr9twhn/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/big___oven-codeinecucumber') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Chakita/KannadaBERT
[ "pytorch", "roberta", "fill-mask", "transformers", "masked-lm", "fill-in-the-blanks", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 37.20 +/- 16.28 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 5 of the Deep Reinforcement Learning Class: https://github.com/huggingface/deep-rl-class/tree/main/unit5
Chalponkey/DialoGPT-small-Barry
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- tags: - conversational --- # Homer Simpson DialoGPT Model
Chan/distilgpt2-finetuned-wikitext2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit --- ### Toho-pixel on Stable Diffusion This is the `<toho-pixel>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<toho-pixel> 0](https://huggingface.co/sd-concepts-library/toho-pixel/resolve/main/concept_images/4.jpeg) ![<toho-pixel> 1](https://huggingface.co/sd-concepts-library/toho-pixel/resolve/main/concept_images/0.jpeg) ![<toho-pixel> 2](https://huggingface.co/sd-concepts-library/toho-pixel/resolve/main/concept_images/2.jpeg) ![<toho-pixel> 3](https://huggingface.co/sd-concepts-library/toho-pixel/resolve/main/concept_images/3.jpeg) ![<toho-pixel> 4](https://huggingface.co/sd-concepts-library/toho-pixel/resolve/main/concept_images/1.jpeg)
Chan/distilroberta-base-finetuned-wikitext2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en thumbnail: http://www.huggingtweets.com/ok_0s/1666729242111/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1575869051850612737/Hz2LIceC_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">⓪𝕊 is minting Youts</div> <div style="text-align: center; font-size: 14px;">@ok_0s</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from ⓪𝕊 is minting Youts. | Data | ⓪𝕊 is minting Youts | | --- | --- | | Tweets downloaded | 1390 | | Retweets | 132 | | Short tweets | 287 | | Tweets kept | 971 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/11ejsejg/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @ok_0s's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/1z3prl6a) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/1z3prl6a/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/ok_0s') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
ComCom/gpt2
[ "pytorch", "gpt2", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "GPT2Model" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: mit --- This model has been pretrained on BEIR corpus without relevance-level supervision following the approach described in the paper **COCO-DR: Combating Distribution Shifts in Zero-Shot Dense Retrieval with Contrastive and Distributionally Robust Learning**. The associated GitHub repository is available here https://github.com/OpenMatch/COCO-DR. This model is trained with BERT-large as the backbone with 335M hyperparameters.
Connor/DialoGPT-small-rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - gender-bias - bert --- # Model Card for `mabel-bert-base-uncased` # Model Description This is the model for MABEL, as described in our paper, "[MABEL: Attenuating Gender Bias using Textual Entailment Data](https://arxiv.org/abs/2210.14975)". MABEL is trained from an underlying `bert-base-uncased` backbone, and demonstrates a good bias-performance tradeoff across a suite of intrinsic and extrinsic bias metrics.
Contrastive-Tension/BERT-Distil-CT-STSb
[ "pytorch", "tf", "distilbert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "DistilBertModel" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
2022-10-26T07:18:51Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9245 - name: F1 type: f1 value: 0.9244695413548749 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2144 - Accuracy: 0.9245 - F1: 0.9245 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8227 | 1.0 | 250 | 0.3150 | 0.902 | 0.8992 | | 0.246 | 2.0 | 500 | 0.2144 | 0.9245 | 0.9245 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
Contrastive-Tension/BERT-Large-NLI-CT
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
2022-10-26T07:24:07Z
--- language: en license: apache-2.0 library_name: diffusers tags: [] datasets: huggan/smithsonian_butterflies_subset metrics: [] --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # ddpm-butterflies-128 ## Model description This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library on the `huggan/smithsonian_butterflies_subset` dataset. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training data [TODO: describe the data used to train the model] ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 16 - gradient_accumulation_steps: 1 - optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None - lr_scheduler: None - lr_warmup_steps: 500 - ema_inv_gamma: None - ema_inv_gamma: None - ema_inv_gamma: None - mixed_precision: fp16 ### Training results 📈 [TensorBoard logs](https://huggingface.co/echo840/ddpm-butterflies-128/tensorboard?#scalars)
Coolhand/Abuela
[ "en", "image_restoration", "superresolution", "license:mit" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
We offer FREE consultations, evaluations, and credit education. Our process only takes 30-60 days and we offer a 100% MONEY-BACK GUARANTEE on almost all our services. Don’t let bad credit and financial concerns hold you back anymore. Ask about our FREE [Credit Repair Albuquerque](https://albuquerque.asapcreditrepairusa.com/) Referral Services TODAY!
Coolhand/Sentiment
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-26T07:44:30Z
--- license: mit --- ### AliceBeta on Stable Diffusion This is the `<Alice-style>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<Alice-style> 0](https://huggingface.co/sd-concepts-library/alicebeta/resolve/main/concept_images/0.jpeg) ![<Alice-style> 1](https://huggingface.co/sd-concepts-library/alicebeta/resolve/main/concept_images/1.jpeg) ![<Alice-style> 2](https://huggingface.co/sd-concepts-library/alicebeta/resolve/main/concept_images/2.jpeg) ![<Alice-style> 3](https://huggingface.co/sd-concepts-library/alicebeta/resolve/main/concept_images/3.jpeg) ![<Alice-style> 4](https://huggingface.co/sd-concepts-library/alicebeta/resolve/main/concept_images/4.jpeg)
Corvus/DialoGPT-medium-CaptainPrice-Extended
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy model-index: - name: distilbert-imdb results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb args: plain_text metrics: - name: Accuracy type: accuracy value: 0.93072 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-imdb This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.1842 - Accuracy: 0.9307 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.217 | 1.0 | 1563 | 0.1842 | 0.9307 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.12.1+cu116 - Datasets 1.17.0 - Tokenizers 0.10.3
Corvus/DialoGPT-medium-CaptainPrice
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-10-26T08:13:56Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 765 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 765, "warmup_steps": 77, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
CouchCat/ma_ner_v6_distil
[ "pytorch", "distilbert", "token-classification", "en", "transformers", "ner", "license:mit", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
Access to model yura187/Gospoja is restricted and you are not in the authorized list. Visit https://huggingface.co/yura187/Gospoja to ask for access.
CoveJH/ConBot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
We’re not your average credit repair firm, we truly care, so we only charge for the items we pursue on your report. Not only does this make us one of the FASTEST credit restoration companies, but we’re also one of the most affordable. We offer FREE consultations, evaluations, and credit education. Our process only takes 30-60 days and we offer a 100% MONEY-BACK GUARANTEE on almost all our services. Follow this [link](https://philadelphia.asapcreditrepairusa.com/)
Coverage/sakurajimamai
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - de tags: - summarization - arxiv:2005.00661 - arxiv:2111.09525 - arxiv:2112.08542 - arxiv:2104.04302 - arxiv:2109.09209 license: mit metrics: - rouge widget: - text: "Als Reaktion auf die Brandserie wurde am Mittwoch bei der Kriminalpolizei Würzburg eine Ermittlungskommission eingerichtet. Ich habe den Eindruck, der Brandstifter wird dreister, kommentiert Rosalinde Schraud, die Bürgermeisterin von Estenfeld, die Brandserie. Gerade die letzten beiden Brandstiftungen seien ungewöhnlich gewesen, da sie mitten am Tag und an frequentierten Straßen stattgefunden haben.Kommt der Brandstifter aus Estenfeld?Norbert Walz ist das letzte Opfer des Brandstifters von Estenfeld. Ein Unbekannter hat am Dienstagnachmittag sein Gartenhaus angezündet.Was da in seinem Kopf herumgeht, was da passiert – das ist ja unglaublich! Das kann schon jemand aus dem Ort sein, weil sich derjenige auskennt.Norbert Walz aus Estenfeld.Dass es sich beim Brandstifter wohl um einen Bürger ihrer Gemeinde handele, will die erste Bürgermeisterin von Estenfeld, Rosalinde Schraud, nicht bestätigen: In der Bevölkerung gibt es natürlich Spekulationen, an denen ich mich aber nicht beteiligen will. Laut Schraud reagiert die Bürgerschaft mit vermehrter Aufmerksamkeit auf die Brände: Man guckt mehr in die Nachbarschaft. Aufhören wird die Brandserie wohl nicht, solange der Täter nicht gefasst wird.Es wäre nicht ungewöhnlich, dass der Täter aus der Umgebung von Estenfeld stammt. Wir bitten deshalb Zeugen, die sachdienliche Hinweise sowohl zu den Bränden geben können, sich mit unserer Kriminalpolizei in Verbindung zu setzen.Philipp Hümmer, Sprecher des Polizeipräsidiums UnterfrankenFür Hinweise, die zur Ergreifung des Täters führen, hat das Bayerische Landeskriminalamt eine Belohnung von 2.000 Euro ausgesetzt." example_title: "Example article" inference: parameters: num_beams: 5 --- # German news title gen This is a model for the task of news headline generation in German. While this task is very similar to summarization, there remain differences like length, structure, and language style, which cause state-of-the-art summarization models not to be suited best for headline generation and demand further fine tuning on this task. For this model, [mT5-base](https://huggingface.co/google/mt5-base) by Google is used as a foundation model. **The model is still work in progress** ## Dataset & preprocessing The model was finetuned on a corpus of news articles from [BR24](https://www.br.de/) published between 2015 and 2021. The texts are in german language and cover a range of different news topics like politics, sports, and culture, with a focus on topics that are relevant to the people living in Bavaria (Germany). In a preprocessing step, article-headline pairs matching any of the following criteria were filtered out: - very short articles (number of words in text lower than 3x the number of words in the headline). - articles with headlines containing only words that are not contained in the text (lemmatized and excluding stopwords). - articles with headlines that are just the name of a known text format (e.g. "Das war der Tag" a format summarizing the most important topics of the day) Further the prefix `summarize: ` was added to all articles to make use of the pretrained summarization capabilities of mT5. After filtering the corpus contained 89098 article-headline pairs, of which 87306 were used for training, 902 for validation, and 890 for testing. ## Training After multiple test runs of finetuning the present model was further trained using the following parameters: - foundation-model: mT5-base - input_prefix: "summarize: " - num_train_epochs: 10 - learning_rate: 5e-5 - warmup_ratio: 0.3 - lr_scheduler_type: constant_with_warmup - per_device_train_batch_size: 3 - gradient_accumulation_steps: 2 - fp16: False Every 5000 steps a checkpoint is stored and evaluated on the validation set. After the training, the checkpoint with the best cross-entropy loss on the validation set is saved as the final model. ## Usage Because the model was fine tuned on mT5, the usage is analogous to the T5 model ([see docs](https://huggingface.co/docs/transformers/model_doc/t5)). Another option for using the model for inference is the huggingface [summarization pipeline](https://huggingface.co/docs/transformers/v4.23.1/en/main_classes/pipelines#transformers.SummarizationPipeline). In both cases the prefix `summarize: ` has to be added to the input texts. For obtaining higher quality headlines it is recommended to increase the beam size for genereation. In the evaluations conducted for this model a beam size of 5 was used. ### Example: Direct model evaluation ```python from transformers import AutoModelForSeq2SeqLM, AutoTokenizer model_id = "" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForSeq2SeqLM.from_pretrained(model_id) text = "Als Reaktion auf die Brandserie wurde am Mittwoch bei der Kriminalpolizei Würzburg eine Ermittlungskommission eingerichtet. Ich habe den Eindruck, der Brandstifter wird dreister, kommentiert Rosalinde Schraud, die Bürgermeisterin von Estenfeld, die Brandserie. Gerade die letzten beiden Brandstiftungen seien ungewöhnlich gewesen, da sie mitten am Tag und an frequentierten Straßen stattgefunden haben.Kommt der Brandstifter aus Estenfeld?Norbert Walz ist das letzte Opfer des Brandstifters von Estenfeld. Ein Unbekannter hat am Dienstagnachmittag sein Gartenhaus angezündet.Was da in seinem Kopf herumgeht, was da passiert – das ist ja unglaublich! Das kann schon jemand aus dem Ort sein, weil sich derjenige auskennt.Norbert Walz aus Estenfeld.Dass es sich beim Brandstifter wohl um einen Bürger ihrer Gemeinde handele, will die erste Bürgermeisterin von Estenfeld, Rosalinde Schraud, nicht bestätigen: In der Bevölkerung gibt es natürlich Spekulationen, an denen ich mich aber nicht beteiligen will. Laut Schraud reagiert die Bürgerschaft mit vermehrter Aufmerksamkeit auf die Brände: Man guckt mehr in die Nachbarschaft. Aufhören wird die Brandserie wohl nicht, solange der Täter nicht gefasst wird.Es wäre nicht ungewöhnlich, dass der Täter aus der Umgebung von Estenfeld stammt. Wir bitten deshalb Zeugen, die sachdienliche Hinweise sowohl zu den Bränden geben können, sich mit unserer Kriminalpolizei in Verbindung zu setzen.Philipp Hümmer, Sprecher des Polizeipräsidiums UnterfrankenFür Hinweise, die zur Ergreifung des Täters führen, hat das Bayerische Landeskriminalamt eine Belohnung von 2.000 Euro ausgesetzt." input_text = "summarize: " + text input_ids = tokenizer(input_text, return_tensors="pt").input_ids outputs = model.generate(input_ids, num_beams=5) generated_headline = tokenizer.decode(outputs[0], skip_special_tokens=True) print(generated_headline) ``` ### Example: Model evaluation using huggingface pipeline ```python from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline model_id = "" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForSeq2SeqLM.from_pretrained(model_id) headline_generator = pipeline( "summarization", model=model, tokenizer=tokenizer, num_beams=5 ) text = "Als Reaktion auf die Brandserie wurde am Mittwoch bei der Kriminalpolizei Würzburg eine Ermittlungskommission eingerichtet. Ich habe den Eindruck, der Brandstifter wird dreister, kommentiert Rosalinde Schraud, die Bürgermeisterin von Estenfeld, die Brandserie. Gerade die letzten beiden Brandstiftungen seien ungewöhnlich gewesen, da sie mitten am Tag und an frequentierten Straßen stattgefunden haben.Kommt der Brandstifter aus Estenfeld?Norbert Walz ist das letzte Opfer des Brandstifters von Estenfeld. Ein Unbekannter hat am Dienstagnachmittag sein Gartenhaus angezündet.Was da in seinem Kopf herumgeht, was da passiert – das ist ja unglaublich! Das kann schon jemand aus dem Ort sein, weil sich derjenige auskennt.Norbert Walz aus Estenfeld.Dass es sich beim Brandstifter wohl um einen Bürger ihrer Gemeinde handele, will die erste Bürgermeisterin von Estenfeld, Rosalinde Schraud, nicht bestätigen: In der Bevölkerung gibt es natürlich Spekulationen, an denen ich mich aber nicht beteiligen will. Laut Schraud reagiert die Bürgerschaft mit vermehrter Aufmerksamkeit auf die Brände: Man guckt mehr in die Nachbarschaft. Aufhören wird die Brandserie wohl nicht, solange der Täter nicht gefasst wird.Es wäre nicht ungewöhnlich, dass der Täter aus der Umgebung von Estenfeld stammt. Wir bitten deshalb Zeugen, die sachdienliche Hinweise sowohl zu den Bränden geben können, sich mit unserer Kriminalpolizei in Verbindung zu setzen.Philipp Hümmer, Sprecher des Polizeipräsidiums UnterfrankenFür Hinweise, die zur Ergreifung des Täters führen, hat das Bayerische Landeskriminalamt eine Belohnung von 2.000 Euro ausgesetzt." input_text = "summarize: " + text generated_headline = headline_generator(input_text)[0]["summary_text"] print(generated_headline) ``` ## Limitations Like most state-of-the-art summarization models this model has issues with the factuality of the generated texts [^factuality]. **It is therefore strongly advised having a human fact-check the generated headlines.** An analysis of possible biases reproduced by the present model, regardless of whether they originate from our finetuning or the underlying mT5 model, is beyond the scope of this work. We assume that biases exist within the model and an analysis will be a task for future work As the model was trained on news articles from the time range 2015-2021, further biases and factual errors could emerge due to topic shifts in news articles and changes in the (e.g. political) situation. ## Evaluation The model was evaluated on a held-out test set consisting of 890 article-headline pairs. For each model the headlines were generated using beam search with a beam width of 5. ### Quantitative | model | Rouge1 | Rouge2 | RougeL | RougeLsum | |-|-|-|-|-| | [T-Systems-onsite/mt5-small-sum-de-en-v2](https://huggingface.co/T-Systems-onsite/mt5-small-sum-de-en-v2)| 0.107 | 0.0297 | 0.098 | 0.098 | | aiautomationlab/german-news-title-gen-mt5 | 0.3131 | 0.0873 | 0.1997 | 0.1997 | For evaluating the factuality of the generated headlines concerning the input text, we use 3 state-of-the-art metrics for summary evaluation (the parameters were chosen according to the recommendations from the respective papers or GitHub repositories). Because these metrics are only available for the English language the texts and generated headlines were translated from German to English using the [DeepL API](https://www.deepl.com/en/docs-api/) in an additional preprocessing step for this factuality evaluation. - **SummaC-CZ** [^summac] Yields a score between -1 and 1, representing the difference between entailment probability and contradiction probability (-1: the headline is not entailed in text and is completely contradicted by it, 1: the headline is fully entailed in text and not contradicted by it). Parameters: - `model_name`: [vitc](https://huggingface.co/tals/albert-xlarge-vitaminc-mnli) - **QAFactEval** [^qafacteval] Using Lerc Quip score, which is reported to perform best in the corresponding paper. The score yields a value between 0 and 5 representing the overlap between answers based on the headline and text to questions generated from the headline (0: no overlap, 5: perfect overlap). Parameters: - `use_lerc_quip`: True - **DAE (dependency arc entailment)** [^dae] Yields a binary value of either 0 or 1, representing whether all dependency arcs in the headline are entailed in the text (0: at least one dependency arc is not entailed, 1: all dependency arcs are entailed). Parameters: - model checkpoint: DAE_xsum_human_best_ckpt - `model_type`: model_type - `max_seq_length`: 512 Each metric is calculated for all article-headline pairs in the test set and the respective mean score over the test set is reported. | model | SummacCZ | QAFactEval | DAE | |-|-|-|-| | [T-Systems-onsite/mt5-small-sum-de-en-v2](https://huggingface.co/T-Systems-onsite/mt5-small-sum-de-en-v2) | 0.6969 | 3.3023 | 0.8292 | | aiautomationlab/german-news-title-gen-mt5 | 0.4419 | 1.9265 | 0.7438 | It can be observed that our model scores consistently lower than the T-Systems one. Following human evaluation, it seems that to match the structure and style specific to headlines the headline generation model has to be more abstractive than a model for summarization which leads to a higher frequency of hallucinations in the generated output. ### Qualitative A qualitative evaluation conducted by members of the BR AI + Automation Lab showed that the model succeeds in producing headlines that match the language and style of news headlines, but also confirms that there are issues with the factual consistency common to state-of-the-art summarization models. ## Future work Future work on this model will focus on generating headlines with higher factual consistency regarding the text. Ideas to achieve this goal include: - Use of coreference resolution as additional preprocessing step for making the relations within the text more explicit to the model. - Use of contrastive learning [^contrastive_learning] - Use of different models for different news topics, as different topics seem to be prone to different types of errors, more specialized models may be able to improve performance. - Use of factuality metric models for reranking beam search candidates in the generation step. - Perform analysis of biases included in the model [^factuality]: Maynez, Joshua, Shashi Narayan, Bernd Bohnet, and Ryan McDonald. “On Faithfulness and Factuality in Abstractive Summarization.” In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, 1906–19. Online: Association for Computational Linguistics, 2020. https://doi.org/10.18653/v1/2020.acl-main.173. [^summac]: Laban, Philippe, Tobias Schnabel, Paul N. Bennett, and Marti A. Hearst. “SummaC: Re-Visiting NLI-Based Models for Inconsistency Detection in Summarization.” Transactions of the Association for Computational Linguistics 10 (February 9, 2022): 163–77. https://doi.org/10.1162/tacl_a_00453. Code: https://github.com/tingofurro/summac [^qafacteval]: Fabbri, Alexander R., Chien-Sheng Wu, Wenhao Liu, and Caiming Xiong. “QAFactEval: Improved QA-Based Factual Consistency Evaluation for Summarization.” arXiv, April 29, 2022. https://doi.org/10.48550/arXiv.2112.08542. Code: https://github.com/salesforce/QAFactEval [^dae]: Goyal, Tanya, and Greg Durrett. “Annotating and Modeling Fine-Grained Factuality in Summarization.” arXiv, April 9, 2021. http://arxiv.org/abs/2104.04302. Code: https://github.com/tagoyal/factuality-datasets [^contrastive_learning]: Cao, Shuyang, and Lu Wang. “CLIFF: Contrastive Learning for Improving Faithfulness and Factuality in Abstractive Summarization.” In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, 6633–49. Online and Punta Cana, Dominican Republic: Association for Computational Linguistics, 2021. https://doi.org/10.18653/v1/2021.emnlp-main.532.
Coyotl/DialoGPT-test2-arthurmorgan
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- title: Video Vision Transformer on medmnist emoji: 🧑‍⚕️ colorFrom: red colorTo: green sdk: gradio app_file: app.py pinned: false license: apache-2.0 library_name: keras --- ## Keras Implementation of Video Vision Transformer on medmnist This repo contains the model [to this Keras example on Video Vision Transformer](https://keras.io/examples/vision/vivit/). ## Background Information This example implements [ViViT: A Video Vision Transformer](https://arxiv.org/abs/2103.15691) by Arnab et al., a pure Transformer-based model for video classification. The authors propose a novel embedding scheme and a number of Transformer variants to model video clips. ## Datasets We use the [MedMNIST v2: A Large-Scale Lightweight Benchmark for 2D and 3D Biomedical Image Classification](https://medmnist.com/) dataset. ## Training Parameters ``` # DATA DATASET_NAME = "organmnist3d" BATCH_SIZE = 32 AUTO = tf.data.AUTOTUNE INPUT_SHAPE = (28, 28, 28, 1) NUM_CLASSES = 11 # OPTIMIZER LEARNING_RATE = 1e-4 WEIGHT_DECAY = 1e-5 # TRAINING EPOCHS = 80 # TUBELET EMBEDDING PATCH_SIZE = (8, 8, 8) NUM_PATCHES = (INPUT_SHAPE[0] // PATCH_SIZE[0]) ** 2 # ViViT ARCHITECTURE LAYER_NORM_EPS = 1e-6 PROJECTION_DIM = 128 NUM_HEADS = 8 NUM_LAYERS = 8 ```
Craig/mGqFiPhu
[ "sentence-transformers", "feature-extraction", "sentence-similarity", "transformers", "license:apache-2.0" ]
feature-extraction
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - pawsx metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-paws results: - task: name: Text Classification type: text-classification dataset: name: pawsx type: pawsx args: en metrics: - name: Accuracy type: accuracy value: 0.8355 - name: F1 type: f1 value: 0.8361579553422098 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-paws This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the pawsx dataset. It achieves the following results on the evaluation set: - Loss: 0.3850 - Accuracy: 0.8355 - F1: 0.8362 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.6715 | 1.0 | 772 | 0.5982 | 0.6785 | 0.6799 | | 0.4278 | 2.0 | 1544 | 0.3850 | 0.8355 | 0.8362 | ### Framework versions - Transformers 4.13.0 - Pytorch 1.12.1+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
Craig/paraphrase-MiniLM-L6-v2
[ "pytorch", "bert", "arxiv:1908.10084", "sentence-transformers", "feature-extraction", "sentence-similarity", "transformers", "license:apache-2.0" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,026
2022-10-26T10:05:41Z
--- language: en thumbnail: http://www.huggingtweets.com/michiokaku/1666778868543/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/2477428816/oyj5obfw5nrjiqhtylp9_400x400.jpeg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Dr. Michio Kaku</div> <div style="text-align: center; font-size: 14px;">@michiokaku</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Dr. Michio Kaku. | Data | Dr. Michio Kaku | | --- | --- | | Tweets downloaded | 2115 | | Retweets | 22 | | Short tweets | 168 | | Tweets kept | 1925 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/3j2uv5cn/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @michiokaku's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/3uof51hl) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/3uof51hl/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/michiokaku') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Crispy/dialopt-small-kratos
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-26T10:30:36Z
--- language: en thumbnail: http://www.huggingtweets.com/alberteinstein-physicstoday-physicstweet/1666780409313/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/879355674957926400/VSGZHGib_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1576931585408073728/9Y0JqcIu_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/2821633714/ea74608b616cb0dc06a2562c01dcbe2e_400x400.jpeg&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Albert Einstein & Physics Today & Physics Tweet</div> <div style="text-align: center; font-size: 14px;">@alberteinstein-physicstoday-physicstweet</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Albert Einstein & Physics Today & Physics Tweet. | Data | Albert Einstein | Physics Today | Physics Tweet | | --- | --- | --- | --- | | Tweets downloaded | 3251 | 3249 | 3250 | | Retweets | 126 | 754 | 0 | | Short tweets | 101 | 14 | 0 | | Tweets kept | 3024 | 2481 | 3250 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/uingbn5k/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @alberteinstein-physicstoday-physicstweet's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/vwa3h6sy) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/vwa3h6sy/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/alberteinstein-physicstoday-physicstweet') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
CrypticT1tan/DialoGPT-medium-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-26T10:44:58Z
--- language: - pt thumbnail: "Portuguese BERT for the Legal Domain" pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - transformers datasets: - assin - assin2 - stsb_multi_mt - rufimelo/PortugueseLegalSentences-v1 widget: - source_sentence: "O advogado apresentou as provas ao juíz." sentences: - "O juíz leu as provas." - "O juíz leu o recurso." - "O juíz atirou uma pedra." example_title: "Example 1" model-index: - name: BERTimbau results: - task: name: STS type: STS metrics: - name: Pearson Correlation - assin Dataset type: Pearson Correlation value: 0.81326 - name: Pearson Correlation - assin2 Dataset type: Pearson Correlation value: 0.83130 - name: Pearson Correlation - stsb_multi_mt pt Dataset type: Pearson Correlation value: 0.786314 --- # rufimelo/Legal-BERTimbau-large-TSDAE-sts-v2 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 1024 dimensional dense vector space and can be used for tasks like clustering or semantic search. rufimelo/Legal-BERTimbau-large-TSDAE-sts-v2 is based on Legal-BERTimbau-large which derives from [BERTimbau](https://huggingface.co/neuralmind/bert-large-portuguese-cased) large. It is adapted to the Portuguese legal domain and trained for STS on portuguese datasets. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["Isto é um exemplo", "Isto é um outro exemplo"] model = SentenceTransformer('rufimelo/Legal-BERTimbau-large-TSDAE-sts-v2') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('rufimelo/Legal-BERTimbau-large-TSDAE-sts-v2') model = AutoModel.from_pretrained('rufimelo/Legal-BERTimbau-large-TSDAE-sts-v2') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results STS | Model| Assin | Assin2|stsb_multi_mt pt| avg| | ---------------------------------------- | ---------- | ---------- |---------- |---------- | | Legal-BERTimbau-sts-base| 0.71457| 0.73545 | 0.72383|0.72462| | Legal-BERTimbau-sts-base-ma| 0.74874 | 0.79532|0.82254 |0.78886| | Legal-BERTimbau-sts-base-ma-v2| 0.75481 | 0.80262|0.82178|0.79307| | Legal-BERTimbau-base-TSDAE-sts|0.78814 |0.81380 |0.75777|0.78657| | Legal-BERTimbau-sts-large| 0.76629| 0.82357 | 0.79120|0.79369| | Legal-BERTimbau-sts-large-v2| 0.76299 | 0.81121|0.81726 |0.79715| | Legal-BERTimbau-sts-large-ma| 0.76195| 0.81622 | 0.82608|0.80142| | Legal-BERTimbau-sts-large-ma-v2| 0.7836| 0.8462| 0.8261| 0.81863| | Legal-BERTimbau-sts-large-ma-v3| 0.7749| **0.8470**| 0.8364| **0.81943**| | Legal-BERTimbau-large-v2-sts| 0.71665| 0.80106| 0.73724| 0.75165| | Legal-BERTimbau-large-TSDAE-sts| 0.72376| 0.79261| 0.73635| 0.75090| | Legal-BERTimbau-large-TSDAE-sts-v2| 0.81326| 0.83130| 0.786314| 0.81029| | Legal-BERTimbau-large-TSDAE-sts-v3|0.80703 |0.82270 |0.77638 |0.80204 | | ---------------------------------------- | ---------- |---------- |---------- |---------- | | BERTimbau base Fine-tuned for STS|**0.78455** | 0.80626|0.82841|0.80640| | BERTimbau large Fine-tuned for STS|0.78193 | 0.81758|0.83784|0.81245| | ---------------------------------------- | ---------- |---------- |---------- |---------- | | paraphrase-multilingual-mpnet-base-v2| 0.71457| 0.79831 |0.83999 |0.78429| | paraphrase-multilingual-mpnet-base-v2 Fine-tuned with assin(s)| 0.77641|0.79831 |**0.84575**|0.80682| ## Training rufimelo/Legal-BERTimbau-large-TSDAE-sts-v2 is based on rufimelo/Legal-BERTimbau-large-TSDAE-sts-v2 which derives from [BERTimbau](https://huggingface.co/neuralmind/bert-base-portuguese-cased) large. rufimelo/Legal-BERTimbau-large-TSDAE was trained with TSDAE: 50000 cleaned documents (https://huggingface.co/datasets/rufimelo/PortugueseLegalSentences-v1) 'lr': 1e-5 It was trained for Semantic Textual Similarity, being submitted to a fine tuning stage with the [assin](https://huggingface.co/datasets/assin), [assin2](https://huggingface.co/datasets/assin2) and [stsb_multi_mt pt](https://huggingface.co/datasets/stsb_multi_mt) datasets. 'lr': 1e-5 ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False}) ) ``` ## Citing & Authors ## Citing & Authors If you use this work, please cite: ```bibtex @inproceedings{souza2020bertimbau, author = {F{\'a}bio Souza and Rodrigo Nogueira and Roberto Lotufo}, title = {{BERT}imbau: pretrained {BERT} models for {B}razilian {P}ortuguese}, booktitle = {9th Brazilian Conference on Intelligent Systems, {BRACIS}, Rio Grande do Sul, Brazil, October 20-23 (to appear)}, year = {2020} } @inproceedings{fonseca2016assin, title={ASSIN: Avaliacao de similaridade semantica e inferencia textual}, author={Fonseca, E and Santos, L and Criscuolo, Marcelo and Aluisio, S}, booktitle={Computational Processing of the Portuguese Language-12th International Conference, Tomar, Portugal}, pages={13--15}, year={2016} } @inproceedings{real2020assin, title={The assin 2 shared task: a quick overview}, author={Real, Livy and Fonseca, Erick and Oliveira, Hugo Goncalo}, booktitle={International Conference on Computational Processing of the Portuguese Language}, pages={406--412}, year={2020}, organization={Springer} } @InProceedings{huggingface:dataset:stsb_multi_mt, title = {Machine translated multilingual STS benchmark dataset.}, author={Philip May}, year={2021}, url={https://github.com/PhilipMay/stsb-multi-mt} } ```
Cryptikdw/DialoGPT-small-rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-10-26T10:46:45Z
--- tags: - generated_from_trainer datasets: - bc2gm_corpus metrics: - precision - recall - f1 - accuracy model-index: - name: biobert-base-cased-v1.2-bc2gm-ner results: - task: name: Token Classification type: token-classification dataset: name: bc2gm_corpus type: bc2gm_corpus config: bc2gm_corpus split: train args: bc2gm_corpus metrics: - name: Precision type: precision value: 0.7988356059445381 - name: Recall type: recall value: 0.8243478260869566 - name: F1 type: f1 value: 0.8113912231559292 - name: Accuracy type: accuracy value: 0.9772069842818806 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # biobert-base-cased-v1.2-bc2gm-ner This model is a fine-tuned version of [dmis-lab/biobert-base-cased-v1.2](https://huggingface.co/dmis-lab/biobert-base-cased-v1.2) on the bc2gm_corpus dataset. It achieves the following results on the evaluation set: - Loss: 0.1528 - Precision: 0.7988 - Recall: 0.8243 - F1: 0.8114 - Accuracy: 0.9772 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.057 | 1.0 | 782 | 0.0670 | 0.7446 | 0.8051 | 0.7736 | 0.9738 | | 0.0586 | 2.0 | 1564 | 0.0689 | 0.7689 | 0.8106 | 0.7892 | 0.9755 | | 0.0123 | 3.0 | 2346 | 0.0715 | 0.7846 | 0.8076 | 0.7959 | 0.9750 | | 0.0002 | 4.0 | 3128 | 0.0896 | 0.7942 | 0.8199 | 0.8068 | 0.9767 | | 0.0004 | 5.0 | 3910 | 0.1119 | 0.7971 | 0.8201 | 0.8084 | 0.9765 | | 0.0004 | 6.0 | 4692 | 0.1192 | 0.7966 | 0.8337 | 0.8147 | 0.9768 | | 0.013 | 7.0 | 5474 | 0.1274 | 0.7932 | 0.8266 | 0.8095 | 0.9773 | | 0.0236 | 8.0 | 6256 | 0.1419 | 0.7976 | 0.8213 | 0.8093 | 0.9771 | | 0.0004 | 9.0 | 7038 | 0.1519 | 0.8004 | 0.8261 | 0.8130 | 0.9772 | | 0.0 | 10.0 | 7820 | 0.1528 | 0.7988 | 0.8243 | 0.8114 | 0.9772 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
Culmenus/checkpoint-168500-finetuned-de-to-is_nr2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-26T10:52:27Z
--- tags: - generated_from_trainer datasets: - decision_transformer_gym_replay model-index: - name: output results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # output This model is a fine-tuned version of [](https://huggingface.co/) on the decision_transformer_gym_replay dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 64 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
Culmenus/opus-mt-de-is-finetuned-de-to-is
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers language: - it --- # sentence-BERTino-3-64 This is a distilled [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 64 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('efederici/sentence-BERTino-3-64') embeddings = model.encode(sentences) print(embeddings) ``` ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 1724 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MSELoss.MSELoss` ## Full Model Architecture ``` SentenceTransformer( (0): SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) (1): Dense({'in_features': 768, 'out_features': 64, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Culmenus/opus-mt-de-is-finetuned-de-to-is_35g65cc_1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
We want to get to know you, but first you should get to know us! We are a family-owned and operated Credit Repair company, founded in 2013. Our goal is to help you achieve financial success and reach your credit goals. Follow this [link](https://elpaso.asapcreditrepairusa.com/)
Culmenus/opus-mt-de-is-finetuned-de-to-is_35g65cc_2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-26T11:06:02Z
Are you looking for [credit repair McAllen](https://mcallen.asapcreditrepairusa.com/)? You are at the right place. ASAP Credit Repair McAllen will help you repair your credit scores by removing derogatory items from your accounts. Call or text us today!
Culmenus/opus-mt-de-is-finetuned-de-to-is_nr2
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
2022-10-26T11:39:37Z
--- license: apache-2.0 --- # OFA-large-caption ## Introduction This is the **large** version of OFA model finetuned for **image captioning**. OFA is a unified multimodal pretrained model that unifies modalities (i.e., cross-modality, vision, language) and tasks (e.g., image generation, visual grounding, image captioning, image classification, text generation, etc.) to a simple sequence-to-sequence learning framework. The directory includes 4 files, namely `config.json` which consists of model configuration, `vocab.json` and `merge.txt` for our OFA tokenizer, and lastly `pytorch_model.bin` which consists of model weights. There is no need to worry about the mismatch between Fairseq and transformers, since we have addressed the issue yet. ## How to use To use it in transformers, please refer to https://github.com/OFA-Sys/OFA/tree/feature/add_transformers. Install the transformers and download the models as shown below. ``` git clone --single-branch --branch feature/add_transformers https://github.com/OFA-Sys/OFA.git pip install OFA/transformers/ git clone https://huggingface.co/OFA-Sys/OFA-large-caption ``` After, refer the path to OFA-large to `ckpt_dir`, and prepare an image for the testing example below. Also, ensure that you have pillow and torchvision in your environment. ```python >>> from PIL import Image >>> from torchvision import transforms >>> from transformers import OFATokenizer, OFAModel >>> from generate import sequence_generator >>> mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5] >>> resolution = 480 >>> patch_resize_transform = transforms.Compose([ lambda image: image.convert("RGB"), transforms.Resize((resolution, resolution), interpolation=Image.BICUBIC), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std) ]) >>> tokenizer = OFATokenizer.from_pretrained(ckpt_dir) >>> txt = " what does the image describe?" >>> inputs = tokenizer([txt], return_tensors="pt").input_ids >>> img = Image.open(path_to_image) >>> patch_img = patch_resize_transform(img).unsqueeze(0) # using the generator of fairseq version >>> model = OFAModel.from_pretrained(ckpt_dir, use_cache=True) >>> generator = sequence_generator.SequenceGenerator( tokenizer=tokenizer, beam_size=5, max_len_b=16, min_len=0, no_repeat_ngram_size=3, ) >>> data = {} >>> data["net_input"] = {"input_ids": inputs, 'patch_images': patch_img, 'patch_masks':torch.tensor([True])} >>> gen_output = generator.generate([model], data) >>> gen = [gen_output[i][0]["tokens"] for i in range(len(gen_output))] # using the generator of huggingface version >>> model = OFAModel.from_pretrained(ckpt_dir, use_cache=False) >>> gen = model.generate(inputs, patch_images=patch_img, num_beams=5, no_repeat_ngram_size=3) >>> print(tokenizer.batch_decode(gen, skip_special_tokens=True)) ```
CurtisBowser/DialoGPT-medium-sora-two
[ "pytorch", "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: codebert-base-finetuned-code-ner-15e results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # codebert-base-finetuned-code-ner-15e This model is a fine-tuned version of [microsoft/codebert-base](https://huggingface.co/microsoft/codebert-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3831 - Precision: 0.6363 - Recall: 0.6494 - F1: 0.6428 - Accuracy: 0.9197 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 191 | 0.4566 | 0.5021 | 0.4220 | 0.4585 | 0.8827 | | No log | 2.0 | 382 | 0.3756 | 0.5699 | 0.5764 | 0.5731 | 0.9043 | | 0.5133 | 3.0 | 573 | 0.3605 | 0.6001 | 0.5767 | 0.5882 | 0.9093 | | 0.5133 | 4.0 | 764 | 0.3500 | 0.6130 | 0.6130 | 0.6130 | 0.9153 | | 0.5133 | 5.0 | 955 | 0.3501 | 0.6337 | 0.6172 | 0.6254 | 0.9178 | | 0.2203 | 6.0 | 1146 | 0.3645 | 0.6250 | 0.6352 | 0.6300 | 0.9163 | | 0.2203 | 7.0 | 1337 | 0.3488 | 0.6263 | 0.6422 | 0.6341 | 0.9189 | | 0.1457 | 8.0 | 1528 | 0.3575 | 0.6372 | 0.6397 | 0.6384 | 0.9194 | | 0.1457 | 9.0 | 1719 | 0.3662 | 0.6406 | 0.6343 | 0.6375 | 0.9189 | | 0.1457 | 10.0 | 1910 | 0.3613 | 0.6374 | 0.6473 | 0.6423 | 0.9201 | | 0.107 | 11.0 | 2101 | 0.3716 | 0.6329 | 0.6544 | 0.6435 | 0.9197 | | 0.107 | 12.0 | 2292 | 0.3754 | 0.6328 | 0.6487 | 0.6406 | 0.9193 | | 0.107 | 13.0 | 2483 | 0.3826 | 0.6395 | 0.6490 | 0.6443 | 0.9204 | | 0.0863 | 14.0 | 2674 | 0.3821 | 0.6368 | 0.6535 | 0.6451 | 0.9200 | | 0.0863 | 15.0 | 2865 | 0.3831 | 0.6363 | 0.6494 | 0.6428 | 0.9197 | ### Evaluation results | | Algorithm | Application | Class | Code_Block | Data_Structure | Data_Type | Device | Error_Name | File_Name | File_Type | Function | HTML_XML_Tag | Keyboard_IP | Language | Library | Operating_System | Output_Block | User_Interface_Element | User_Name | Value | Variable | Version | Website | overall_precision | overall_recall | overall_f1 | overall_accuracy | |:----------|------------:|--------------:|------------:|-------------:|-----------------:|------------:|----------:|-------------:|------------:|------------:|-----------:|---------------:|--------------:|-----------:|-----------:|-------------------:|---------------:|-------------------------:|------------:|-----------:|-----------:|-----------:|----------:|--------------------:|-----------------:|-------------:|-------------------:| | precision | 0 | 0.619835 | 0.680851 | 0.455629 | 0.813187 | 0.592593 | 0.395062 | 0.181818 | 0.800505 | 0.775956 | 0.757664 | 0.585366 | 0.333333 | 0.689769 | 0.61807 | 0.769231 | 0.0212766 | 0.542214 | 0.4375 | 0.370236 | 0.560479 | 0.883721 | 0.382353 | 0.626308 | 0.642171 | 0.63414 | 0.918927 | | recall | 0 | 0.677711 | 0.696864 | 0.494253 | 0.840909 | 0.8 | 0.533333 | 0.333333 | 0.794486 | 0.628319 | 0.631387 | 0.470588 | 0.0169492 | 0.81323 | 0.546279 | 0.843373 | 0.04 | 0.653846 | 0.518519 | 0.52987 | 0.54482 | 0.914089 | 0.270833 | 0.626308 | 0.642171 | 0.63414 | 0.918927 | | f1 | 0 | 0.647482 | 0.688765 | 0.474156 | 0.826816 | 0.680851 | 0.453901 | 0.235294 | 0.797484 | 0.694377 | 0.688786 | 0.521739 | 0.0322581 | 0.746429 | 0.579961 | 0.804598 | 0.0277778 | 0.592821 | 0.474576 | 0.435897 | 0.552538 | 0.898649 | 0.317073 | 0.626308 | 0.642171 | 0.63414 | 0.918927 | | number | 31 | 664 | 1148 | 696 | 264 | 120 | 60 | 30 | 798 | 226 | 822 | 102 | 59 | 257 | 551 | 83 | 25 | 442 | 54 | 385 | 859 | 291 | 48 | 0.626308 | 0.642171 | 0.63414 | 0.918927 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
Cyrell/Cyrell
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-28T16:30:49Z
--- tags: - image-classification - pytorch library_name: generic metrics: - accuracy model-index: - name: krenzcolor_chkpt_classifier results: - task: name: Image Classification type: pair-classification metrics: - name: Accuracy type: accuracy value: 0.9196428656578064 --- # krenzcolor_chkpt_classifier ## KK色彩課程-作業節點檢查AI Demo for checkpoint classification of the homework in Art course by "Krenz Cushart" 這個AI分類器會判斷同學在課程中L3,L4的臨摹中的三個檢查點,並檢查通過與否。 詳細六個類別如下: - (1) chk1_fail &nbsp;|&nbsp; (2) chk1_pass - (3) chk2_fail &nbsp;|&nbsp; (4) chk2_pass - (5) chk3_fail &nbsp;|&nbsp; (6) chk3_pass 其中chk1,chk2,chk3分別代表檢查點一、二、三;fail及pass代表作業通過與否。 ## 快速導覽: 將以下圖片拖曳至右側方框 (Hosted inference API) Note: 第一次讀取model的時候會跑比較久:~20秒 #### chk1_pass ![chk1_pass](images/L4_1_chk1_pass.jpg) #### chk2_pass ![chk2_pass](images/L4_1_chk2_pass.jpg) #### chk3_pass ![chk3_pass](images/L4_1_chk3_pass.jpg) ## 使用方法 ### 使用以下樣板填入臨摹 注意:務必將圖調整至224 x 224 pixels的大小再放入樣板右側空白處 ![L3-1老頭石膏](images/L3_1_tmp.jpg) ![L3-2布料](images/L3_2_tmp.jpg) ![L3-1雞](images/L4_1_tmp.jpg) ![L3-1雲](images/L4_2_tmp.jpg) ### 將圖片上傳到右側方匡 ![將圖片上傳到右側方匡](images/input_box.png) ### 上傳後會顯示各類別的機率 ![範例](images/example.png)
Czapla/Rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: yespublic results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # yespublic This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.2892 - Train End Logits Accuracy: 0.6617 - Train Start Logits Accuracy: 0.6190 - Validation Loss: 1.0393 - Validation End Logits Accuracy: 0.7213 - Validation Start Logits Accuracy: 0.6877 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 7377, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train End Logits Accuracy | Train Start Logits Accuracy | Validation Loss | Validation End Logits Accuracy | Validation Start Logits Accuracy | Epoch | |:----------:|:-------------------------:|:---------------------------:|:---------------:|:------------------------------:|:--------------------------------:|:-----:| | 1.2892 | 0.6617 | 0.6190 | 1.0393 | 0.7213 | 0.6877 | 0 | ### Framework versions - Transformers 4.20.1 - TensorFlow 2.6.4 - Datasets 2.1.0 - Tokenizers 0.12.1
D3xter1922/electra-base-discriminator-finetuned-cola
[ "pytorch", "tensorboard", "electra", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "ElectraForSequenceClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
68
2022-10-26T12:37:18Z
--- tags: - generated_from_trainer metrics: - accuracy model-index: - name: swin-food102 results: [] datasets: - juliensimon/food102 --- # swin-food102 This model is a fine-tuned version of [juliensimon/autotrain-food101-1471154053](https://huggingface.co/juliensimon/autotrain-food101-1471154053) on the [food102](https://huggingface.co/datasets/juliensimon/food102) dataset, namely the [food101](https://huggingface.co/datasets/food101) dataset with an extra class generated with a Stable Diffusion model. A detailed walk-through is available on [YouTube](https://youtu.be/sIe0eo3fYQ4). The achieves the following results on the evaluation set: - Loss: 0.2510 - Accuracy: 0.9338 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.1648 | 1.0 | 597 | 0.3118 | 0.9218 | | 0.31 | 2.0 | 1194 | 0.2606 | 0.9322 | | 0.2488 | 3.0 | 1791 | 0.2510 | 0.9338 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu102 - Datasets 2.4.0 - Tokenizers 0.13.1
DARKVIP3R/DialoGPT-medium-Anakin
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- language: - en tags: - cyberpunk - anime - stable-diffusion - rebecca --- # REBECCA _Shameless plug: If you want a *free open source* fully local booru alternative, I have [created one](https://github.com/demibit/stable-toolkit). It automatically tags your images and let's you easily filter by generation parameters. The [wiki](https://github.com/demibit/stable-toolkit/wiki) has some pictures of how it looks like._ ## DATASET - **71a**: 71 images of Rebecca, with dimensions of 640x640. ## VERSION - **v1**: Trained with clip skip 1 (off) (only applicable for embedding/hypernetwork, as I don't know the status for dreambooth). NAI-analog base model. ## DREAMBOOTH [Trained via this GUI](https://github.com/n00mkrad/text2image-gui), but note that it's quite restrictive in training options. - **rebecca-71a-v1**: Trained with "very high quality" preset and "normal" learning rate. - [Examples](https://imgur.com/a/kmcS2Ve) ## EMBEDDINGS _Note: You'll have to rename your chosen embedding to "rebecca"_ - **rebecca-71a-v1a-embeddings**: Embedding trained with voldy's web-ui with 8 tokens per vector, using a learning rate of _0.02:200, 0.01:1000, 0.005:2000, 0.002:3000, 0.0005:4000, 0.00005_, for 20k steps. - [Examples with the 20k embed](https://imgur.com/a/j0u1FlO) - **rebecca-71a-v1b-embeddings**: Embedding trained with voldy's web-ui with 8 tokens per vector, using a learning rate of _0.005_, for 30k steps. - [Examples with the 5k embed](https://imgur.com/a/MAlHx3d) - [Examples with the 10k embed](https://imgur.com/a/SeGNh31) - [Examples with the 15k embed](https://imgur.com/a/sIpkyQ9) - [Examples with the 20k embed](https://imgur.com/a/IHsAj9j) - [Examples with the 30k embed](https://imgur.com/a/qF4sZQe) - **rebecca-71a-v1c-embeddings**: Embedding trained with voldy's web-ui with 12 tokens per vector, using a learning rate of _0.005_, for 30k steps. - [Examples with the 5k embed](https://imgur.com/a/We3YUYa) - [Examples with the 10k embed](https://imgur.com/a/Crzc1aK) - [Examples with the 15k embed](https://imgur.com/a/xBlxZkq) - [Examples with the 20k embed](https://imgur.com/a/DvBiya4) - [Examples with the 30k embed](https://imgur.com/a/kpvcrcx) ## HYPERNETWORKS _Apparently voldy's hypernetwork training is broken at the moment. Will either rollback, or wait for it to get fixed._
DCU-NLP/bert-base-irish-cased-v1
[ "pytorch", "tf", "bert", "fill-mask", "transformers", "generated_from_keras_callback", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,244
2022-10-26T12:50:54Z
--- title: Daimond_Price emoji: 💩 colorFrom: blue colorTo: green sdk: streamlit sdk_version: 1.10.0 app_file: app.py pinned: false license: cc-by-3.0 --- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
DCU-NLP/electra-base-irish-cased-generator-v1
[ "pytorch", "electra", "fill-mask", "ga", "transformers", "irish", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "ElectraForMaskedLM" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
We are a family-owned and operated Credit Repair company, founded in 2013. Our goal is to help you achieve financial success and reach your credit goals. We’re not your average credit repair firm, we truly care, so we only charge for the items we pursue on your report. Not only does this make us one of the FASTEST credit restoration companies, but we’re also one of the most affordable. Follow this [link](https://lafayette.asapcreditrepairusa.com/)
DHBaek/gpt2-stackoverflow-question-contents-generator
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
2022-10-26T13:15:14Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: swin-tiny-patch4-window7-224-finetuned-eurosat results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9822222222222222 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0501 - Accuracy: 0.9822 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.3259 | 1.0 | 379 | 0.0760 | 0.9763 | | 0.1882 | 2.0 | 758 | 0.0694 | 0.9778 | | 0.1563 | 3.0 | 1137 | 0.0501 | 0.9822 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
DJSammy/bert-base-swedish-uncased_BotXO-ai
[ "pytorch", "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 10 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 10, "warmup_steps": 1, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
DKpro000/DialoGPT-medium-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer datasets: - rotten_tomatoes model-index: - name: rtm_bert_5E results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # rtm_bert_5E This model was trained from scratch on the rotten_tomatoes dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
DKpro000/DialoGPT-small-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-26T13:43:13Z
--- license: apache-2.0 # inference: false # pipeline_tag: zero-shot-image-classification pipeline_tag: feature-extraction # inference: # parameters: tags: - text-to-image - chinese - diffusion --- # Taiyi-Diffusion-532M-Cyberpunk-Chinese - Github: [Fengshenbang-LM](https://github.com/IDEA-CCNL/Fengshenbang-LM) - Docs: [Fengshenbang-Docs](https://fengshenbang-doc.readthedocs.io/) ## 简介 Brief Introduction 该模型由[Katherine Crowson's](https://github.com/openai/guided-diffusion)的无条件扩散模型在1k+张收集的赛博朋克风的图上微调而来。结合[IDEA-CCNL/Taiyi-CLIP-Roberta-large-326M-Chinese](https://huggingface.co/IDEA-CCNL/Taiyi-CLIP-Roberta-large-326M-Chinese)可以实现中文Guided Diffusion的生成方式。 This model is finetune from Katherine Crowson's fine-tuned 512x512 diffusion model (https://github.com/openai/guided-diffusion), using 1k+ cyberpunk-style image crawled from the InterNet. Combine With [IDEA-CCNL/Taiyi-CLIP-Roberta-large-326M-Chinese](https://huggingface.co/IDEA-CCNL/Taiyi-CLIP-Roberta-large-326M-Chinese) can generate image via guided diffusion in Chinese. ## 模型分类 Model Taxonomy | 需求 Demand | 任务 Task | 系列 Series | 模型 Model | 参数 Parameter | 额外 Extra | | :----: | :----: | :----: | :----: | :----: | :----: | | 特殊 Special | 多模态 Multimodal | 太乙 Taiyi | Diffusion Model | 532M | Cyberpunk | ## 使用 Usage 使用示例见:https://github.com/IDEA-CCNL/Fengshenbang-LM/tree/main/fengshen/examples/disco_project ## 生成示例 Example | 城市,赛博朋克 | 城市,赛博朋克 | | ---- | ---- | | ![](cyb1.png) | ![](cyb2.png) | ## 引用 Citation 如果您在您的工作中使用了我们的模型,可以引用我们的[论文](https://arxiv.org/abs/2209.02970): If you are using the resource for your work, please cite the our [paper](https://arxiv.org/abs/2209.02970): ```text @article{fengshenbang, author = {Jiaxing Zhang and Ruyi Gan and Junjie Wang and Yuxiang Zhang and Lin Zhang and Ping Yang and Xinyu Gao and Ziwei Wu and Xiaoqun Dong and Junqing He and Jianheng Zhuo and Qi Yang and Yongfeng Huang and Xiayu Li and Yanghan Wu and Junyu Lu and Xinyu Zhu and Weifeng Chen and Ting Han and Kunhao Pan and Rui Wang and Hao Wang and Xiaojun Wu and Zhongshen Zeng and Chongpei Chen}, title = {Fengshenbang 1.0: Being the Foundation of Chinese Cognitive Intelligence}, journal = {CoRR}, volume = {abs/2209.02970}, year = {2022} } ``` 也可以引用我们的[网站](https://github.com/IDEA-CCNL/Fengshenbang-LM/): You can also cite our [website](https://github.com/IDEA-CCNL/Fengshenbang-LM/): ```text @misc{Fengshenbang-LM, title={Fengshenbang-LM}, author={IDEA-CCNL}, year={2021}, howpublished={\url{https://github.com/IDEA-CCNL/Fengshenbang-LM}}, } ```
DLNLP/t5-small-finetuned-xsum
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-26T13:46:45Z
--- tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy - f1 model-index: - name: dit-base-finetuned-brs results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.8823529411764706 - name: F1 type: f1 value: 0.8571428571428571 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # dit-base-finetuned-brs This model is a fine-tuned version of [microsoft/dit-base](https://huggingface.co/microsoft/dit-base) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.8748 - Accuracy: 0.8824 - F1: 0.8571 - Precision (ppv): 0.8571 - Recall (sensitivity): 0.8571 - Specificity: 0.9 - Npv: 0.9 - Auc: 0.8786 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision (ppv) | Recall (sensitivity) | Specificity | Npv | Auc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------------:|:--------------------:|:-----------:|:------:|:------:| | 0.6624 | 6.25 | 100 | 0.5548 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.5201 | 12.49 | 200 | 0.4617 | 0.8824 | 0.8571 | 0.8571 | 0.8571 | 0.9 | 0.9 | 0.8786 | | 0.5172 | 18.74 | 300 | 0.4249 | 0.8235 | 0.8000 | 0.75 | 0.8571 | 0.8 | 0.8889 | 0.8286 | | 0.4605 | 24.98 | 400 | 0.3172 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.4894 | 31.25 | 500 | 0.4466 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.3694 | 37.49 | 600 | 0.5077 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.6172 | 43.74 | 700 | 0.5722 | 0.7647 | 0.7143 | 0.7143 | 0.7143 | 0.8 | 0.8 | 0.7571 | | 0.3671 | 49.98 | 800 | 0.7006 | 0.7647 | 0.6667 | 0.8 | 0.5714 | 0.9 | 0.75 | 0.7357 | | 0.4109 | 56.25 | 900 | 0.4410 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.3198 | 62.49 | 1000 | 0.7226 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.4283 | 68.74 | 1100 | 0.8089 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.3273 | 74.98 | 1200 | 0.9059 | 0.7647 | 0.6667 | 0.8 | 0.5714 | 0.9 | 0.75 | 0.7357 | | 0.3237 | 81.25 | 1300 | 0.8520 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.2014 | 87.49 | 1400 | 0.9183 | 0.7647 | 0.6667 | 0.8 | 0.5714 | 0.9 | 0.75 | 0.7357 | | 0.3204 | 93.74 | 1500 | 0.6769 | 0.8824 | 0.8571 | 0.8571 | 0.8571 | 0.9 | 0.9 | 0.8786 | | 0.1786 | 99.98 | 1600 | 0.8748 | 0.8824 | 0.8571 | 0.8571 | 0.8571 | 0.9 | 0.9 | 0.8786 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
DTAI-KULeuven/mbert-corona-tweets-belgium-topics
[ "pytorch", "jax", "bert", "text-classification", "multilingual", "nl", "fr", "en", "arxiv:2104.09947", "transformers", "Dutch", "French", "English", "Tweets", "Topic classification" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
167
2022-10-26T21:35:49Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: swin-tiny-patch4-window7-224-finetuned-woody_130epochs results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.8921212121212121 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-woody_130epochs This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.4550 - Accuracy: 0.8921 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 130 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.6694 | 1.0 | 58 | 0.6370 | 0.6594 | | 0.6072 | 2.0 | 116 | 0.5813 | 0.7030 | | 0.6048 | 3.0 | 174 | 0.5646 | 0.7030 | | 0.5849 | 4.0 | 232 | 0.5778 | 0.6970 | | 0.5671 | 5.0 | 290 | 0.5394 | 0.7236 | | 0.5575 | 6.0 | 348 | 0.5212 | 0.7382 | | 0.568 | 7.0 | 406 | 0.5218 | 0.7358 | | 0.5607 | 8.0 | 464 | 0.5183 | 0.7527 | | 0.5351 | 9.0 | 522 | 0.5138 | 0.7467 | | 0.5459 | 10.0 | 580 | 0.5290 | 0.7394 | | 0.5454 | 11.0 | 638 | 0.5212 | 0.7345 | | 0.5291 | 12.0 | 696 | 0.5130 | 0.7576 | | 0.5378 | 13.0 | 754 | 0.5372 | 0.7503 | | 0.5264 | 14.0 | 812 | 0.6089 | 0.6861 | | 0.4909 | 15.0 | 870 | 0.4852 | 0.7636 | | 0.5591 | 16.0 | 928 | 0.4817 | 0.76 | | 0.4966 | 17.0 | 986 | 0.5673 | 0.6933 | | 0.4988 | 18.0 | 1044 | 0.5131 | 0.7418 | | 0.5339 | 19.0 | 1102 | 0.4998 | 0.7394 | | 0.4804 | 20.0 | 1160 | 0.4655 | 0.7733 | | 0.503 | 21.0 | 1218 | 0.4554 | 0.7685 | | 0.4859 | 22.0 | 1276 | 0.4713 | 0.7770 | | 0.504 | 23.0 | 1334 | 0.4545 | 0.7721 | | 0.478 | 24.0 | 1392 | 0.4658 | 0.7830 | | 0.4759 | 25.0 | 1450 | 0.4365 | 0.8012 | | 0.4686 | 26.0 | 1508 | 0.4452 | 0.7855 | | 0.4668 | 27.0 | 1566 | 0.4427 | 0.7879 | | 0.4615 | 28.0 | 1624 | 0.4439 | 0.7685 | | 0.4588 | 29.0 | 1682 | 0.4378 | 0.7830 | | 0.4588 | 30.0 | 1740 | 0.4229 | 0.7988 | | 0.4296 | 31.0 | 1798 | 0.4188 | 0.7976 | | 0.4208 | 32.0 | 1856 | 0.4316 | 0.7891 | | 0.4481 | 33.0 | 1914 | 0.4331 | 0.7891 | | 0.4253 | 34.0 | 1972 | 0.4524 | 0.7879 | | 0.4117 | 35.0 | 2030 | 0.4570 | 0.7952 | | 0.4405 | 36.0 | 2088 | 0.4307 | 0.7927 | | 0.4154 | 37.0 | 2146 | 0.4257 | 0.8024 | | 0.3962 | 38.0 | 2204 | 0.5077 | 0.7818 | | 0.414 | 39.0 | 2262 | 0.4602 | 0.8012 | | 0.3937 | 40.0 | 2320 | 0.4741 | 0.7770 | | 0.4186 | 41.0 | 2378 | 0.4250 | 0.8 | | 0.4076 | 42.0 | 2436 | 0.4353 | 0.7988 | | 0.3777 | 43.0 | 2494 | 0.4442 | 0.7879 | | 0.3968 | 44.0 | 2552 | 0.4525 | 0.7879 | | 0.377 | 45.0 | 2610 | 0.4198 | 0.7988 | | 0.378 | 46.0 | 2668 | 0.4297 | 0.8097 | | 0.3675 | 47.0 | 2726 | 0.4435 | 0.8085 | | 0.3562 | 48.0 | 2784 | 0.4477 | 0.7952 | | 0.381 | 49.0 | 2842 | 0.4206 | 0.8255 | | 0.3603 | 50.0 | 2900 | 0.4136 | 0.8109 | | 0.3331 | 51.0 | 2958 | 0.4141 | 0.8230 | | 0.3471 | 52.0 | 3016 | 0.4253 | 0.8109 | | 0.346 | 53.0 | 3074 | 0.5203 | 0.8048 | | 0.3481 | 54.0 | 3132 | 0.4288 | 0.8242 | | 0.3411 | 55.0 | 3190 | 0.4416 | 0.8194 | | 0.3275 | 56.0 | 3248 | 0.4149 | 0.8291 | | 0.3067 | 57.0 | 3306 | 0.4623 | 0.8218 | | 0.3166 | 58.0 | 3364 | 0.4432 | 0.8255 | | 0.3294 | 59.0 | 3422 | 0.4599 | 0.8267 | | 0.3146 | 60.0 | 3480 | 0.4266 | 0.8291 | | 0.3091 | 61.0 | 3538 | 0.4318 | 0.8315 | | 0.3277 | 62.0 | 3596 | 0.4252 | 0.8242 | | 0.296 | 63.0 | 3654 | 0.4332 | 0.8436 | | 0.3241 | 64.0 | 3712 | 0.4729 | 0.8194 | | 0.3104 | 65.0 | 3770 | 0.4228 | 0.8448 | | 0.2878 | 66.0 | 3828 | 0.4173 | 0.8388 | | 0.265 | 67.0 | 3886 | 0.4210 | 0.8497 | | 0.3011 | 68.0 | 3944 | 0.4276 | 0.8436 | | 0.2861 | 69.0 | 4002 | 0.4923 | 0.8315 | | 0.2994 | 70.0 | 4060 | 0.4472 | 0.8182 | | 0.276 | 71.0 | 4118 | 0.4541 | 0.8315 | | 0.2796 | 72.0 | 4176 | 0.4218 | 0.8521 | | 0.2727 | 73.0 | 4234 | 0.4053 | 0.8448 | | 0.255 | 74.0 | 4292 | 0.4356 | 0.8376 | | 0.276 | 75.0 | 4350 | 0.4193 | 0.8436 | | 0.261 | 76.0 | 4408 | 0.4484 | 0.8533 | | 0.2416 | 77.0 | 4466 | 0.4722 | 0.8194 | | 0.2602 | 78.0 | 4524 | 0.4431 | 0.8533 | | 0.2591 | 79.0 | 4582 | 0.4269 | 0.8606 | | 0.2613 | 80.0 | 4640 | 0.4335 | 0.8485 | | 0.2555 | 81.0 | 4698 | 0.4269 | 0.8594 | | 0.2832 | 82.0 | 4756 | 0.3968 | 0.8715 | | 0.264 | 83.0 | 4814 | 0.4173 | 0.8703 | | 0.2462 | 84.0 | 4872 | 0.4150 | 0.8606 | | 0.2424 | 85.0 | 4930 | 0.4377 | 0.8630 | | 0.2574 | 86.0 | 4988 | 0.4120 | 0.8679 | | 0.2273 | 87.0 | 5046 | 0.4393 | 0.8533 | | 0.2334 | 88.0 | 5104 | 0.4366 | 0.8630 | | 0.2258 | 89.0 | 5162 | 0.4189 | 0.8630 | | 0.2153 | 90.0 | 5220 | 0.4474 | 0.8630 | | 0.2462 | 91.0 | 5278 | 0.4362 | 0.8642 | | 0.2356 | 92.0 | 5336 | 0.4454 | 0.8715 | | 0.2019 | 93.0 | 5394 | 0.4413 | 0.88 | | 0.209 | 94.0 | 5452 | 0.4410 | 0.8703 | | 0.2201 | 95.0 | 5510 | 0.4323 | 0.8691 | | 0.2245 | 96.0 | 5568 | 0.4999 | 0.8618 | | 0.2178 | 97.0 | 5626 | 0.4612 | 0.8655 | | 0.2163 | 98.0 | 5684 | 0.4340 | 0.8703 | | 0.2228 | 99.0 | 5742 | 0.4504 | 0.8788 | | 0.2151 | 100.0 | 5800 | 0.4602 | 0.8703 | | 0.1988 | 101.0 | 5858 | 0.4414 | 0.8812 | | 0.2227 | 102.0 | 5916 | 0.4392 | 0.8824 | | 0.1772 | 103.0 | 5974 | 0.5069 | 0.8630 | | 0.2199 | 104.0 | 6032 | 0.4648 | 0.8667 | | 0.1936 | 105.0 | 6090 | 0.4806 | 0.8691 | | 0.199 | 106.0 | 6148 | 0.4569 | 0.8764 | | 0.2149 | 107.0 | 6206 | 0.4445 | 0.8739 | | 0.1917 | 108.0 | 6264 | 0.4444 | 0.8727 | | 0.201 | 109.0 | 6322 | 0.4594 | 0.8727 | | 0.1938 | 110.0 | 6380 | 0.4564 | 0.8764 | | 0.1977 | 111.0 | 6438 | 0.4398 | 0.8739 | | 0.1776 | 112.0 | 6496 | 0.4356 | 0.88 | | 0.1939 | 113.0 | 6554 | 0.4412 | 0.8848 | | 0.178 | 114.0 | 6612 | 0.4373 | 0.88 | | 0.1926 | 115.0 | 6670 | 0.4508 | 0.8812 | | 0.1979 | 116.0 | 6728 | 0.4477 | 0.8848 | | 0.1958 | 117.0 | 6786 | 0.4488 | 0.8897 | | 0.189 | 118.0 | 6844 | 0.4553 | 0.8836 | | 0.1838 | 119.0 | 6902 | 0.4605 | 0.8848 | | 0.1755 | 120.0 | 6960 | 0.4463 | 0.8836 | | 0.1958 | 121.0 | 7018 | 0.4474 | 0.8861 | | 0.1857 | 122.0 | 7076 | 0.4550 | 0.8921 | | 0.1466 | 123.0 | 7134 | 0.4494 | 0.8885 | | 0.1751 | 124.0 | 7192 | 0.4560 | 0.8873 | | 0.175 | 125.0 | 7250 | 0.4383 | 0.8897 | | 0.207 | 126.0 | 7308 | 0.4601 | 0.8873 | | 0.1756 | 127.0 | 7366 | 0.4425 | 0.8897 | | 0.1695 | 128.0 | 7424 | 0.4533 | 0.8909 | | 0.1873 | 129.0 | 7482 | 0.4510 | 0.8897 | | 0.1726 | 130.0 | 7540 | 0.4463 | 0.8909 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
alexandrainst/da-hatespeech-detection-base
[ "pytorch", "tf", "safetensors", "bert", "text-classification", "da", "transformers", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,719
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 689.50 +/- 181.29 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga dominiks -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga dominiks -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga dominiks ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
alexandrainst/da-subjectivivity-classification-base
[ "pytorch", "tf", "safetensors", "bert", "text-classification", "da", "dataset:DDSC/twitter-sent", "dataset:DDSC/europarl", "transformers", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
846
null
Pretrained adapters for multilingual knowledge graph enhancement (https://arxiv.org/abs/2210.13617). --- license: mit ---
Dablio/Dablio
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en tags: - grammar - text2text-generation datasets: - leslyarun/c4_200m_gec_train100k_test25k --- # Get Grammatical corrections on your English text, trained on a subset of c4-200m dataset # Use the below code for running the model ``` python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM from transformers import pipeline tokenizer = AutoTokenizer.from_pretrained("leslyarun/grammatical-error-correction") model = AutoModelForSeq2SeqLM.from_pretrained("leslyarun/grammatical-error-correction") text2text_generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer) output = text2text_generator("grammar: " + sentence) print(output[0]["generated_text"]) ```
Daivakai/DialoGPT-small-saitama
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
2022-10-26T15:23:43Z
--- license: mit --- ### A modern house on Stable Diffusion via Dreambooth #### model by rrustom This your the Stable Diffusion model fine-tuned the A modern house concept taught to Stable Diffusion with Dreambooth. It can be used by modifying the `instance_prompt`: **a photo of sks modern home** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts) Here are the images used for training this concept: ![image 0](https://huggingface.co/rrustom/a-modern-house/resolve/main/concept_images/1.jpeg) ![image 1](https://huggingface.co/rrustom/a-modern-house/resolve/main/concept_images/2.jpeg) ![image 2](https://huggingface.co/rrustom/a-modern-house/resolve/main/concept_images/0.jpeg) ![image 3](https://huggingface.co/rrustom/a-modern-house/resolve/main/concept_images/3.jpeg)
DanL/scientific-challenges-and-directions
[ "pytorch", "bert", "text-classification", "en", "dataset:DanL/scientific-challenges-and-directions-dataset", "arxiv:2108.13751", "transformers", "generated_from_trainer" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
134
null
--- language: en thumbnail: http://www.huggingtweets.com/glowrillazart/1666798579724/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1424126329893572616/kufqmDCQ_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">glow</div> <div style="text-align: center; font-size: 14px;">@glowrillazart</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from glow. | Data | glow | | --- | --- | | Tweets downloaded | 3231 | | Retweets | 265 | | Short tweets | 365 | | Tweets kept | 2601 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1ni25iaa/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @glowrillazart's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/j2vftkea) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/j2vftkea/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/glowrillazart') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
DataikuNLP/average_word_embeddings_glove.6B.300d
[ "arxiv:1908.10084", "sentence-transformers", "feature-extraction", "sentence-similarity", "license:apache-2.0" ]
sentence-similarity
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-26T16:49:49Z
--- license: cc-by-nc-4.0 --- # roscoe-512-roberta-base ## Model description Sentence embedding model for reasoning steps. To obtain reasoning step embeddings, we finetune SimCSE (Gao et al., 2021), a supervised sentence similarity model extending the RoBERTa word embedding model (Liu et al., 2019) on multi-step reasoning datasets we listed in §5 (see details in Golovneva et al., 2022). SimCSE is a contrastive learning model that is trained on triplets of reference reasoning steps, positive and hard-negative hypothesis reasoning steps to minimize the cross-entropy objective with in-batch negatives. For contrastive learning, we use the context and reference reasoning steps as a positive sample, and context and perturbed reference steps as hard-negative pairs. With finetuned model we embed each individual step, as well as a reasoning chain as a whole. We use the pretrained checkpoint of supervised SimCSE model sup-simcse-roberta-base to initialize our model, and further train it for five epochs on our synthetic train data. ## Training data To train the model, we construct dataset by generating perturbations — i.e., deterministic modifications — on half of the reference reasoning steps in the following sets: Entailment-Bank (deductive reasoning), ProofWriter (logical reasoning); three arithmetic reasoning datasets MATH, ASDIV and AQUA; EQASC (explanations for commonsense question answering), and StrategyQA (question answering with implicit reasoning strategies). ## References 1. Tianyu Gao, Xingcheng Yao, and Danqi Chen. Simcse: Simple contrastive learning of sentence embeddings. arXiv preprint arXiv:2104.08821, 2021. 2. Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692, 2019. 3. Olga Golovneva, Moya Chen, Spencer Poff, Martin Corredor, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. ROSCOE: A Suite of Metrics for Scoring Step-by-Step Reasoning. arXiv:2212.07919, 2022. ## Citation @article{golovneva2022roscoe, title={{ROSCOE}: A Suite of Metrics for Scoring Step-by-Step Reasoning}, author={Golovneva, Olga and Chen, Moya and Poff, Spencer and Corredor, Martin and Zettlemoyer, Luke and Fazel-Zarandi, Maryam and Celikyilmaz, Asli}, journal={arXiv preprint arXiv:2212.07919}, year={2022} }
DataikuNLP/paraphrase-albert-small-v2
[ "pytorch", "albert", "arxiv:1908.10084", "sentence-transformers", "feature-extraction", "sentence-similarity", "transformers", "license:apache-2.0" ]
sentence-similarity
{ "architectures": [ "AlbertModel" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
628
null
## Overview ``` Language model: Pegasus-xsum Language: English Downstream-task: Question-Answering Generation Training data: SQuAD 2.0, NewsQA Eval data: SQuAD 2.0, NewsQA Infrastructure: Nvidia Tesla K80 12Gb RAM ``` ## Hyperparameters ``` per_device_train_batch_size = 2 per_device_eval_batch_size = 2 num_train_epochs = 3 base_LM_model = "pegasus-xsum" source_max_token_len = 256 target_max_token_len = 64 learning_rate = 5e-5 lr_schedule = LinearWarmup warmup_steps = 150 ``` ## Usage ```python import transformers from transformers import PegasusForConditionalGeneration, PegasusTokenizerFast model_name = 'nloc2578/QAG_Pegasus_3ep_eval' tokenizer = PegasusTokenizerFast.from_pretrained(model_name) model = PegasusForConditionalGeneration.from_pretrained(model_name, pad_token_id=tokenizer.eos_token_id) text = '''The primary goal of distractor generation is generating answer options that are plausibly answers to the question, and might appear correct to a user who does know the correct answer. Distractors should also be clearly distinct from the key and each other and they should not be correct answers to the question (for questions that might have multiple correct answers).''' input_id = tokenizer(text, return_tensors='pt') output = model.generate(input_id['input_ids']) result = tokenizer.decode(output[0]) print(result) ```
Dave/twomad-model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-26T17:15:53Z
--- language: en thumbnail: http://www.huggingtweets.com/the_boolaidman/1666821342474/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1528444052034789378/E1BRWZyE_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">theboghog</div> <div style="text-align: center; font-size: 14px;">@the_boolaidman</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from theboghog. | Data | theboghog | | --- | --- | | Tweets downloaded | 184 | | Retweets | 44 | | Short tweets | 32 | | Tweets kept | 108 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/lez3uo4l/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @the_boolaidman's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/34ufbard) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/34ufbard/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/the_boolaidman') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Davlan/bert-base-multilingual-cased-finetuned-igbo
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
2022-10-26T17:42:08Z
--- language: en thumbnail: http://www.huggingtweets.com/big___oven-schizo_freq/1666821031327/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1571653458972794884/eaxhUsib_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1582126821025382400/PZjx83du_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">oskcar & Lukas (computer)</div> <div style="text-align: center; font-size: 14px;">@big___oven-schizo_freq</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from oskcar & Lukas (computer). | Data | oskcar | Lukas (computer) | | --- | --- | --- | | Tweets downloaded | 2642 | 3234 | | Retweets | 605 | 480 | | Short tweets | 325 | 326 | | Tweets kept | 1712 | 2428 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/t7nn481m/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @big___oven-schizo_freq's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/3ljhfklh) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/3ljhfklh/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/big___oven-schizo_freq') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Davlan/bert-base-multilingual-cased-finetuned-kinyarwanda
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- tags: - generated_from_trainer datasets: - generator model-index: - name: scideberta-cs-tdm-pretrained-finetuned-ner results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # scideberta-cs-tdm-pretrained-finetuned-ner This model is a fine-tuned version of [sohamtiwari3120/scideberta-cs-tdm-pretrained](https://huggingface.co/sohamtiwari3120/scideberta-cs-tdm-pretrained) on the generator dataset. It achieves the following results on the evaluation set: - Loss: 0.6836 - Overall Precision: 0.5912 - Overall Recall: 0.6850 - Overall F1: 0.6347 - Overall Accuracy: 0.9609 - Datasetname F1: 0.5882 - Hyperparametername F1: 0.6897 - Hyperparametervalue F1: 0.7619 - Methodname F1: 0.6525 - Metricname F1: 0.7500 - Metricvalue F1: 0.6452 - Taskname F1: 0.5370 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | Datasetname F1 | Hyperparametername F1 | Hyperparametervalue F1 | Methodname F1 | Metricname F1 | Metricvalue F1 | Taskname F1 | |:-------------:|:-----:|:----:|:---------------:|:-----------------:|:--------------:|:----------:|:----------------:|:--------------:|:---------------------:|:----------------------:|:-------------:|:-------------:|:--------------:|:-----------:| | No log | 1.0 | 132 | 0.3507 | 0.3972 | 0.6870 | 0.5034 | 0.9410 | 0.4370 | 0.5441 | 0.5814 | 0.6124 | 0.5604 | 0.6207 | 0.3724 | | No log | 2.0 | 264 | 0.3079 | 0.4066 | 0.7520 | 0.5278 | 0.9430 | 0.4138 | 0.5380 | 0.6222 | 0.5895 | 0.625 | 0.7273 | 0.4340 | | No log | 3.0 | 396 | 0.3740 | 0.5007 | 0.7195 | 0.5905 | 0.9535 | 0.4882 | 0.6777 | 0.7500 | 0.6254 | 0.6747 | 0.7097 | 0.4962 | | 0.4014 | 4.0 | 528 | 0.4072 | 0.5161 | 0.7154 | 0.5997 | 0.9540 | 0.5167 | 0.6612 | 0.6374 | 0.6337 | 0.6753 | 0.6061 | 0.5341 | | 0.4014 | 5.0 | 660 | 0.4088 | 0.5590 | 0.7317 | 0.6338 | 0.9582 | 0.5660 | 0.6667 | 0.7397 | 0.6250 | 0.7226 | 0.75 | 0.5794 | | 0.4014 | 6.0 | 792 | 0.4810 | 0.5201 | 0.7093 | 0.6002 | 0.9550 | 0.4874 | 0.5970 | 0.6506 | 0.6207 | 0.6708 | 0.6250 | 0.5756 | | 0.4014 | 7.0 | 924 | 0.5288 | 0.5403 | 0.6809 | 0.6025 | 0.9576 | 0.4915 | 0.6500 | 0.6133 | 0.6255 | 0.7006 | 0.7879 | 0.5389 | | 0.0912 | 8.0 | 1056 | 0.5281 | 0.5468 | 0.6890 | 0.6097 | 0.9574 | 0.5370 | 0.7143 | 0.6866 | 0.5854 | 0.6939 | 0.7742 | 0.5491 | | 0.0912 | 9.0 | 1188 | 0.4744 | 0.5371 | 0.7358 | 0.6209 | 0.9560 | 0.5370 | 0.6341 | 0.6753 | 0.6554 | 0.6795 | 0.7059 | 0.5699 | | 0.0912 | 10.0 | 1320 | 0.5498 | 0.5686 | 0.7073 | 0.6304 | 0.9586 | 0.5370 | 0.6349 | 0.7500 | 0.6553 | 0.7152 | 0.7742 | 0.5573 | | 0.0912 | 11.0 | 1452 | 0.6424 | 0.5857 | 0.7012 | 0.6383 | 0.9597 | 0.56 | 0.6789 | 0.7246 | 0.6667 | 0.6974 | 0.6875 | 0.5757 | | 0.0354 | 12.0 | 1584 | 0.5867 | 0.5641 | 0.6890 | 0.6203 | 0.9585 | 0.5185 | 0.6496 | 0.7213 | 0.6619 | 0.7152 | 0.7333 | 0.5402 | | 0.0354 | 13.0 | 1716 | 0.5500 | 0.5667 | 0.6992 | 0.6260 | 0.9592 | 0.5524 | 0.6829 | 0.7222 | 0.6621 | 0.6466 | 0.7333 | 0.5607 | | 0.0354 | 14.0 | 1848 | 0.5743 | 0.5780 | 0.7154 | 0.6394 | 0.9596 | 0.5283 | 0.6833 | 0.7222 | 0.6644 | 0.6716 | 0.7742 | 0.5960 | | 0.0354 | 15.0 | 1980 | 0.6836 | 0.5912 | 0.6850 | 0.6347 | 0.9609 | 0.5882 | 0.6897 | 0.7619 | 0.6525 | 0.7500 | 0.6452 | 0.5370 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu102 - Datasets 2.6.1 - Tokenizers 0.13.1
Davlan/bert-base-multilingual-cased-finetuned-luganda
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
2022-10-26T17:46:18Z
--- language: - en library_name: pytorch tags: - language-model - gpt2 - transformer - wikitext-103 model-index: - name: gpt2_wt103-40m_12-layer results: - task: type: language-modeling dataset: type: wikitext name: Wikitext-103 metrics: - type: perplexity value: 40.3 --- # Model description paper: [Characterizing Verbatim Short-Term Memory in Neural Language Models](https://arxiv.org/abs/2210.13569) This is a gpt2-small-like decoder-only transformer model trained on a 40M token subset of the [wikitext-103 dataset](https://paperswithcode.com/dataset/wikitext-103). # Usage You can download and load the model as follows: ```python from transformers import GPT2LMHeadModel model = GPT2LMHeadModel.from_pretrained("Kristijan/gpt2_wt103-40m_12-layer") ``` Alternatively, if you've downloaded the checkpoint files in this repository, you could also do: ```python from transformers import GPT2LMHeadModel model = GPT2LMHeadModel.from_pretrained(path_to_folder_with_checkpoint_files) ``` To tokenize your text for this model, you should use the [tokenizer trained on Wikitext-103](https://huggingface.co/Kristijan/wikitext-103-tokenizer) # Intended uses This checkpoint is intended for research purposes, for example those interested in studying the behavior of transformer language models trained on smaller datasets.
Davlan/bert-base-multilingual-cased-finetuned-naija
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
2022-10-26T17:53:02Z
This is a fine-tuned BART model for Definition Generation. It is still in the prototype stage, fine-tuned only with 40k Training Instances of (definition, context) pairs for 3 epochs. The eval_loss is still in 2.30. The beam Size is 4.
Davlan/bert-base-multilingual-cased-masakhaner
[ "pytorch", "tf", "bert", "token-classification", "arxiv:2103.11811", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
88
2022-10-26T18:07:09Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 626.50 +/- 127.69 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga PraveenKishore -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga PraveenKishore -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga PraveenKishore ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Davlan/byt5-base-eng-yor-mt
[ "pytorch", "t5", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
2022-10-26T18:17:24Z
--- language: en thumbnail: http://www.huggingtweets.com/snobrights/1666808315124/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1562231899925397504/PZnUZWaV_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">vote4ana</div> <div style="text-align: center; font-size: 14px;">@snobrights</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from vote4ana. | Data | vote4ana | | --- | --- | | Tweets downloaded | 1947 | | Retweets | 510 | | Short tweets | 353 | | Tweets kept | 1084 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/163lcflh/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @snobrights's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/6bnd5aob) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/6bnd5aob/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/snobrights') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Davlan/distilbert-base-multilingual-cased-ner-hrl
[ "pytorch", "tf", "distilbert", "token-classification", "transformers", "autotrain_compatible", "has_space" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
123,856
2022-10-26T18:31:32Z
--- license: mit --- ### Cute Game Style on Stable Diffusion This is the `<cute-game-style>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<cute-game-style> 0](https://huggingface.co/sd-concepts-library/cute-game-style/resolve/main/concept_images/1.jpeg) ![<cute-game-style> 1](https://huggingface.co/sd-concepts-library/cute-game-style/resolve/main/concept_images/6.jpeg) ![<cute-game-style> 2](https://huggingface.co/sd-concepts-library/cute-game-style/resolve/main/concept_images/2.jpeg) ![<cute-game-style> 3](https://huggingface.co/sd-concepts-library/cute-game-style/resolve/main/concept_images/4.jpeg) ![<cute-game-style> 4](https://huggingface.co/sd-concepts-library/cute-game-style/resolve/main/concept_images/0.jpeg) ![<cute-game-style> 5](https://huggingface.co/sd-concepts-library/cute-game-style/resolve/main/concept_images/3.jpeg) ![<cute-game-style> 6](https://huggingface.co/sd-concepts-library/cute-game-style/resolve/main/concept_images/7.jpeg) ![<cute-game-style> 7](https://huggingface.co/sd-concepts-library/cute-game-style/resolve/main/concept_images/5.jpeg) Here are images generated with this style: ![painting of a house in the style of <cute-game-style>](https://i.imgur.com/msUaazE.png) ![a beautiful pond in the style of <cute-game-style>](https://i.imgur.com/MVfHS33.png) ![painting of the colourful and lush interior of a greenhouse in the style of <cute-game-style>](https://i.imgur.com/WZJfoo9.png) ![cute isometric office building in the style of <cute-game-style>](https://i.imgur.com/1B1NRKh.png)
Davlan/mbart50-large-eng-yor-mt
[ "pytorch", "mbart", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2022-10-26T18:40:36Z
--- language: - pt thumbnail: "Portuguese BERT for STS" pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - transformers datasets: - assin - assin2 - stsb_multi_mt widget: - source_sentence: "O advogado apresentou as provas ao juíz." sentences: - "O juíz leu as provas." - "O juíz leu o recurso." - "O juíz atirou uma pedra." example_title: "Example 1" model-index: - name: BERTimbau results: - task: name: STS type: STS metrics: - name: Pearson Correlation - assin Dataset type: Pearson Correlation value: 0.81758 - name: Pearson Correlation - assin2 Dataset type: Pearson Correlation value: 0.83784 - name: Pearson Correlation - stsb_multi_mt pt Dataset type: Pearson Correlation value: 0.81245 --- # rufimelo/bert-large-portuguese-cased-sts2 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 1024 dimensional dense vector space and can be used for tasks like clustering or semantic search. rufimelo/bert-large-portuguese-cased-sts derives from [BERTimbau](https://huggingface.co/neuralmind/bert-large-portuguese-cased) large. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["Isto é um exemplo", "Isto é um outro exemplo"] model = SentenceTransformer('rufimelo/bert-large-portuguese-cased-sts') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('rufimelo/bert-large-portuguese-cased-sts') model = AutoModel.from_pretrained('rufimelo/bert-large-portuguese-cased-sts') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Training rufimelo/bert-large-portuguese-cased-sts derives from [BERTimbau](https://huggingface.co/neuralmind/bert-base-portuguese-cased) large. It was trained for Semantic Textual Similarity, being submitted to a fine tuning stage with the [assin](https://huggingface.co/datasets/assin), [assin2](https://huggingface.co/datasets/assin2) and [stsb_multi_mt pt](https://huggingface.co/datasets/stsb_multi_mt) datasets. ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False}) ) ``` ## Citing & Authors ## Citing & Authors If you use this work, please cite: ```bibtex @inproceedings{souza2020bertimbau, author = {F{\'a}bio Souza and Rodrigo Nogueira and Roberto Lotufo}, title = {{BERT}imbau: pretrained {BERT} models for {B}razilian {P}ortuguese}, booktitle = {9th Brazilian Conference on Intelligent Systems, {BRACIS}, Rio Grande do Sul, Brazil, October 20-23 (to appear)}, year = {2020} } @inproceedings{fonseca2016assin, title={ASSIN: Avaliacao de similaridade semantica e inferencia textual}, author={Fonseca, E and Santos, L and Criscuolo, Marcelo and Aluisio, S}, booktitle={Computational Processing of the Portuguese Language-12th International Conference, Tomar, Portugal}, pages={13--15}, year={2016} } @inproceedings{real2020assin, title={The assin 2 shared task: a quick overview}, author={Real, Livy and Fonseca, Erick and Oliveira, Hugo Goncalo}, booktitle={International Conference on Computational Processing of the Portuguese Language}, pages={406--412}, year={2020}, organization={Springer} } @InProceedings{huggingface:dataset:stsb_multi_mt, title = {Machine translated multilingual STS benchmark dataset.}, author={Philip May}, year={2021}, url={https://github.com/PhilipMay/stsb-multi-mt} } ```
Davlan/xlm-roberta-base-finetuned-igbo
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
68
2022-10-26T19:02:08Z
--- language: en thumbnail: http://www.huggingtweets.com/simerino1/1666811016675/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1174133652399300608/3UF7GOrK_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">computer</div> <div style="text-align: center; font-size: 14px;">@simerino1</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from computer. | Data | computer | | --- | --- | | Tweets downloaded | 980 | | Retweets | 366 | | Short tweets | 96 | | Tweets kept | 518 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/356xy36h/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @simerino1's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/1eld4xfg) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/1eld4xfg/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/simerino1') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Davlan/xlm-roberta-base-finetuned-kinyarwanda
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
61
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: whynotwork results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # whynotwork This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.2892 - Train End Logits Accuracy: 0.6617 - Train Start Logits Accuracy: 0.6190 - Validation Loss: 1.0393 - Validation End Logits Accuracy: 0.7213 - Validation Start Logits Accuracy: 0.6877 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 7377, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train End Logits Accuracy | Train Start Logits Accuracy | Validation Loss | Validation End Logits Accuracy | Validation Start Logits Accuracy | Epoch | |:----------:|:-------------------------:|:---------------------------:|:---------------:|:------------------------------:|:--------------------------------:|:-----:| | 1.2892 | 0.6617 | 0.6190 | 1.0393 | 0.7213 | 0.6877 | 0 | ### Framework versions - Transformers 4.20.1 - TensorFlow 2.6.4 - Datasets 2.1.0 - Tokenizers 0.12.1
Davlan/xlm-roberta-base-finetuned-naija
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice metrics: - wer model-index: - name: wav2vec2-large-xls-r-300m-tr-colab results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: common_voice type: common_voice config: ga-IE split: train+validation args: ga-IE metrics: - name: Wer type: wer value: 0.593329432416618 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-tr-colab This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 1.1786 - Wer: 0.5933 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.3421 | 14.81 | 400 | 1.1795 | 0.5922 | | 0.113 | 29.63 | 800 | 1.1786 | 0.5933 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
Davlan/xlm-roberta-base-finetuned-zulu
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: en thumbnail: http://www.huggingtweets.com/big___oven-naamitee/1666815335749/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1548322756059545605/ndrcvhSk_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1571653458972794884/eaxhUsib_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">bymyamym & oskcar</div> <div style="text-align: center; font-size: 14px;">@big___oven-naamitee</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from bymyamym & oskcar. | Data | bymyamym | oskcar | | --- | --- | --- | | Tweets downloaded | 168 | 2628 | | Retweets | 45 | 605 | | Short tweets | 41 | 325 | | Tweets kept | 82 | 1698 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/drhgr3vu/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @big___oven-naamitee's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/vrwpswox) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/vrwpswox/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/big___oven-naamitee') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Davlan/xlm-roberta-base-sadilar-ner
[ "pytorch", "xlm-roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
## Model info This is a BPE tokenizer retrained from scratch on the concatenated [Wikitext-103](https://paperswithcode.com/dataset/wikitext-103) train, evaluation, and test sets. The vocabulary had 28,439 entries. This tokenizer was use to tokenize text for [the GPT-2 model trained on Wikitext-103](https://huggingface.co/Kristijan/gpt2_wt103-40m_12-layer). ## Usage You can download the tokenizer directly from hub as follows: ``` from transformers import GPT2TokenizerFast tokenizer = GPT2TokenizerFast.from_pretrained("Kristijan/wikitext-103-tokenizer") ``` After cloning/downloading the files, you can load the tokenizer using the `/from_pretrained()` methods as follows: ``` from transformers import GPT2TokenizerFast tokenizer = GPT2TokenizerFast.from_pretrained(path_to_folder_with_merges_and_vocab_files) ```
Davlan/xlm-roberta-large-masakhaner
[ "pytorch", "tf", "xlm-roberta", "token-classification", "arxiv:2103.11811", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,449
2022-10-26T20:35:33Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: t5-small-finetuned-xsum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned-xsum This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.5585 - Rouge1: 45.8829 - Rouge2: 35.4564 - Rougel: 44.7101 - Rougelsum: 45.1103 - Gen Len: 11.9031 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 1.713 | 1.0 | 4578 | 1.5585 | 45.8829 | 35.4564 | 44.7101 | 45.1103 | 11.9031 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.2
DeadBeast/mbert-base-cased-finetuned-bengali-fakenews
[ "pytorch", "bert", "text-classification", "bengali", "dataset:BanFakeNews", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
37
null
witch red long hair red lips blue eyes facial scar beautiful woman thin body jewelry and black lizard
DeadBeast/roberta-base-pretrained-mr-2
[ "pytorch", "jax", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: littledataset results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # littledataset This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0000 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 169 | 0.0001 | | No log | 2.0 | 338 | 0.0000 | | 0.0036 | 3.0 | 507 | 0.0001 | | 0.0036 | 4.0 | 676 | 0.0000 | | 0.0036 | 5.0 | 845 | 0.0000 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.1 - Datasets 2.5.1 - Tokenizers 0.12.1
Declan/CNN_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: mit tags: - audio - music - generation - tensorflow --- # Musika Model: musika_techno ## Model provided by: marcop Pretrained musika_techno model for the [Musika system](https://github.com/marcoppasini/musika) for fast infinite waveform music generation. Introduced in [this paper](https://arxiv.org/abs/2208.08706). ## How to use You can generate music from this pretrained musika_techno model using the notebook available [here](https://colab.research.google.com/drive/1HJWliBXPi-Xlx3gY8cjFI5-xaZgrTD7r?usp=sharing). ### Model description This pretrained GAN system consists of a ResNet-style generator and discriminator. During training, stability is controlled by adapting the strength of gradient penalty regularization on-the-fly. The gradient penalty weighting term is contained in *switch.npy*. The generator is conditioned on a latent coordinate system to produce samples of arbitrary length. The latent representations produced by the generator are then passed to a decoder which converts them into waveform audio. The generator has a context window of about 12 seconds of audio.
Declan/FoxNews_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: mit --- ### Anime Background Style on Stable Diffusion This is the `<anime-background-style>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<anime-background-style> 0](https://huggingface.co/sd-concepts-library/anime-background-style/resolve/main/concept_images/5.jpeg) ![<anime-background-style> 1](https://huggingface.co/sd-concepts-library/anime-background-style/resolve/main/concept_images/8.jpeg) ![<anime-background-style> 2](https://huggingface.co/sd-concepts-library/anime-background-style/resolve/main/concept_images/3.jpeg) ![<anime-background-style> 3](https://huggingface.co/sd-concepts-library/anime-background-style/resolve/main/concept_images/0.jpeg) ![<anime-background-style> 4](https://huggingface.co/sd-concepts-library/anime-background-style/resolve/main/concept_images/9.jpeg) ![<anime-background-style> 5](https://huggingface.co/sd-concepts-library/anime-background-style/resolve/main/concept_images/6.jpeg) ![<anime-background-style> 6](https://huggingface.co/sd-concepts-library/anime-background-style/resolve/main/concept_images/2.jpeg) ![<anime-background-style> 7](https://huggingface.co/sd-concepts-library/anime-background-style/resolve/main/concept_images/1.jpeg) ![<anime-background-style> 8](https://huggingface.co/sd-concepts-library/anime-background-style/resolve/main/concept_images/4.jpeg) ![<anime-background-style> 9](https://huggingface.co/sd-concepts-library/anime-background-style/resolve/main/concept_images/7.jpeg) Here are images generated with this style: ![a suburban street in the style of <anime-background-style>](https://i.imgur.com/S774UmL.png) ![a public pool in the style of <anime-background-style>](https://i.imgur.com/d1Z4V3K.png) ![a lush jungle in the style of <anime-background-style>](https://i.imgur.com/06vhfIH.png) This style does not produce good results as most of the training images were too small. I'll likely train it again with bigger ones.
Declan/FoxNews_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
Access to model THEOWNERS77/PimpDogGOD is restricted and you are not in the authorized list. Visit https://huggingface.co/THEOWNERS77/PimpDogGOD to ask for access.
Declan/FoxNews_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 metrics: - bleu model-index: - name: opus-mt-en-ro-finetuned-en-to-ro results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: wmt16 type: wmt16 config: ro-en split: train args: ro-en metrics: - name: Bleu type: bleu value: 28.1505 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # opus-mt-en-ro-finetuned-en-to-ro This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-ro](https://huggingface.co/Helsinki-NLP/opus-mt-en-ro) on the wmt16 dataset. It achieves the following results on the evaluation set: - Loss: 1.2886 - Bleu: 28.1505 - Gen Len: 34.1036 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:| | 0.7437 | 1.0 | 38145 | 1.2886 | 28.1505 | 34.1036 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
Declan/NewYorkTimes_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
Access to model sd-concepts-library/smooth-pencils is restricted and you are not in the authorized list. Visit https://huggingface.co/sd-concepts-library/smooth-pencils to ask for access.
Declan/Politico_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: swin-tiny-patch4-window7-224-finetuned-eurosat results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9814814814814815 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0593 - Accuracy: 0.9815 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2731 | 1.0 | 190 | 0.1128 | 0.9637 | | 0.1862 | 2.0 | 380 | 0.0759 | 0.9759 | | 0.1409 | 3.0 | 570 | 0.0593 | 0.9815 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
DeskDown/MarianMix_en-zh-10
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: mit --- ### xi on Stable Diffusion This is the `<JinpingXi>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as an `object`: ![<JinpingXi> 0](https://huggingface.co/sd-concepts-library/xi/resolve/main/concept_images/0.jpg) ![<JinpingXi> 1](https://huggingface.co/sd-concepts-library/xi/resolve/main/concept_images/1.jpg) ![<JinpingXi> 2](https://huggingface.co/sd-concepts-library/xi/resolve/main/concept_images/2.jpg) ![<JinpingXi> 3](https://huggingface.co/sd-concepts-library/xi/resolve/main/concept_images/3.jpg)
Despin89/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - fi tags: - finnish - gpt2 widget: - text: "Jotta voidaan luoda tekstiä" --- Knowledge distilled version of a larger model [gpt-fi](https://huggingface.co/hatanp/gpt-fi). This model has approximately 300M parameters compared to the 1.2B parameters of the larger model. For scripts and more complete model information refer to the large models' page.
Devmapall/paraphrase-quora
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
3
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb model-index: - name: distilbert-base-uncased-finetuned-imdb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-imdb This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 2.4721 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.7086 | 1.0 | 157 | 2.4898 | | 2.5796 | 2.0 | 314 | 2.4230 | | 2.5269 | 3.0 | 471 | 2.4354 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
Devrim/prism-default
[ "license:mit" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-27T11:31:33Z
--- language: - fi tags: - finnish - gpt2 widget: - text: "Jotta voidaan luoda tekstiä" library: - transformers license: apache-2.0 --- A small version of a larger model [gpt-fi](https://huggingface.co/hatanp/gpt-fi). This model has approximately 125M parameters compared to the 1.2B parameters of the larger model. For scripts and more complete model information refer to the large models' page.