modelId
stringlengths
4
112
lastModified
stringlengths
24
24
tags
list
pipeline_tag
stringclasses
21 values
files
list
publishedBy
stringlengths
2
37
downloads_last_month
int32
0
9.44M
library
stringclasses
15 values
modelCard
large_stringlengths
0
100k
omkar1309/RickBot
2021-06-07T13:09:24.000Z
[ "pytorch", "gpt2", "lm-head", "causal-lm", "transformers", "conversational", "text-generation" ]
conversational
[ ".gitattributes", "README.md", "config.json", "pytorch_model.bin", "special_tokens_map.json" ]
omkar1309
1
transformers
--- tags: - conversational --- #My Awesome model
onlplab/alephbert-base
2021-05-20T02:14:33.000Z
[ "pytorch", "jax", "bert", "masked-lm", "he", "dataset:oscar", "dataset:wikipedia", "dataset:twitter", "arxiv:1810.04805", "transformers", "language model", "license:apache-2.0", "fill-mask" ]
fill-mask
[ ".gitattributes", "README.md", "config.json", "flax_model.msgpack", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "training_args.bin", "vocab.txt" ]
onlplab
5,286
transformers
--- language: - he tags: - language model license: apache-2.0 datasets: - oscar - wikipedia - twitter --- # AlephBERT ## Hebrew Language Model State-of-the-art language model for Hebrew. Based on Google's BERT architecture [(Devlin et al. 2018)](https://arxiv.org/abs/1810.04805). #### How to use ```python from transformers import BertModel, BertTokenizerFast alephbert_tokenizer = BertTokenizerFast.from_pretrained('onlplab/alephbert-base') alephbert = BertModel.from_pretrained('onlplab/alephbert-base') # if not finetuning - disable dropout alephbert.eval() ``` ## Training data 1. OSCAR [(Ortiz, 2019)](https://oscar-corpus.com/) Hebrew section (10 GB text, 20 million sentences). 2. Hebrew dump of [Wikipedia](https://dumps.wikimedia.org/hewiki/latest/) (650 MB text, 3 million sentences). 3. Hebrew Tweets collected from the Twitter sample stream (7 GB text, 70 million sentences). ## Training procedure Trained on a DGX machine (8 V100 GPUs) using the standard huggingface training procedure. Since the larger part of our training data is based on tweets we decided to start by optimizing using Masked Language Model loss only. To optimize training time we split the data into 4 sections based on max number of tokens: 1. num tokens < 32 (70M sentences) 2. 32 <= num tokens < 64 (12M sentences) 3. 64 <= num tokens < 128 (10M sentences) 4. 128 <= num tokens < 512 (1.5M sentences) Each section was first trained for 5 epochs with an initial learning rate set to 1e-4. Then each section was trained for another 5 epochs with an initial learning rate set to 1e-5, for a total of 10 epochs. Total training time was 8 days.
ontocord/fastspeech2-en
2021-04-08T06:57:54.000Z
[ "pytorch", "fastspeech2", "en", "dataset:LJSpeech", "dataset:LibriTTS", "arxiv:2006.04558", "transformers", "audio", "TTS", "license:apache-2.0" ]
[ ".gitattributes", "README.md", "config.json", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "vocab.json" ]
ontocord
7
transformers
--- language: en datasets: - LJSpeech - LibriTTS tags: - audio - TTS license: apache-2.0 --- # ontocord/fastspeech2-en Modified version of the text-to-speech system [FastSpeech 2: Fast and High-Quality End-to-End Text to Speech] (https://arxiv.org/abs/2006.04558v1). ## Installation ``` git clone https://github.com/ontocord/fastspeech2_hf pip install transformers torchaudio ``` ## Usage The model can be used directly as follows: ``` # load the model and tokenizer from fastspeech2_hf.modeling_fastspeech2 import FastSpeech2ForPretraining, FastSpeech2Tokenizer model = FastSpeech2ForPretraining.from_pretrained("ontocord/fastspeech2-en") tokenizer = FastSpeech2Tokenizer.from_pretrained("ontocord/fastspeech2-en") # some helper routines from IPython.display import Audio as IPAudio, display as IPdisplay import torch import torchaudio def play_audio(waveform, sample_rate): waveform = waveform.numpy() if len(waveform.shape)==1: IPdisplay(IPAudio(waveform, rate=sample_rate)) return num_channels, num_frames = waveform.shape if num_channels <= 1: IPdisplay(IPAudio(waveform[0], rate=sample_rate)) elif num_channels == 2: IPdisplay(IPAudio((waveform[0], waveform[1]), rate=sample_rate)) else: raise ValueError("Waveform with more than 2 channels are not supported.") # set the g2p module for the tokenizer tokenizer.set_g2p(model.fastspeech2.g2p) # you can run in half mode on gpu. model = model.cuda().half() sentences = [ "Advanced text to speech models such as Fast Speech can synthesize speech significantly faster than previous auto regressive models with comparable quality. The training of Fast Speech model relies on an auto regressive teacher model for duration prediction and knowledge distillation, which can ease the one to many mapping problem in T T S. However, Fast Speech has several disadvantages, 1, the teacher student distillation pipeline is complicated, 2, the duration extracted from the teacher model is not accurate enough, and the target mel spectrograms distilled from teacher model suffer from information loss due to data simplification, both of which limit the voice quality. ", "Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition " "in being comparatively modern. ", "For although the Chinese took impressions from wood blocks engraved in relief for centuries before the woodcutters of the Netherlands, by a similar process " "produced the block books, which were the immediate predecessors of the true printed book, " "the invention of movable metal letters in the middle of the fifteenth century may justly be considered as the invention of the art of printing. ", "And it is worth mention in passing that, as an example of fine typography, " "the earliest book printed with movable types, the Gutenberg, or \"forty-two line Bible\" of about 1455, " "has never been surpassed. ", "Printing, then, for our purpose, may be considered as the art of making books by means of movable types. " "Now, as all books not primarily intended as picture-books consist principally of types composed to form letterpress,", ] batch = tokenizer(sentences, return_tensors="pt", padding=True) model.eval() with torch.no_grad(): out = model(use_postnet=False, **batch) wav =out[-2] for line, phone, w in zip(sentences, tokenizer.batch_decode(batch['input_ids']), wav): print ("txt:", line) print ("phoneme:", phone) play_audio(w.type(torch.FloatTensor), model.config.sampling_rate) ``` ##Github Code Repo Current code for this model can be found [here](https://github.com/ontocord/fastspeech2_hf) This is a work in progress (WIP) port of the model and code from [this repo] (https://github.com/ming024/FastSpeech2). The datasets on which this model was trained: - LJSpeech: a single-speaker English dataset consists of 13100 short audio clips of a female speaker reading passages from 7 non-fiction books, approximately 24 hours in total. - LibriTTS: a multi-speaker English dataset containing 585 hours of speech by 2456 speakers.
ontocord/mt5-fix-asr-vietnamese
2021-04-12T15:38:04.000Z
[ "pytorch", "mt5", "vi", "transformers", "language-modeling", "audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week", "license:apache-2.0" ]
automatic-speech-recognition
[ ".gitattributes", "README.md", "README.txt", "config.json", "pytorch_model.bin", "special_tokens_map.json", "spiece.model", "tokenizer_config.json" ]
ontocord
30
transformers
--- language: vi datasets: - common_voice - FOSD: https://data.mendeley.com/datasets/k9sxg2twv4/4 metrics: - wer tags: - language-modeling - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: MT5 Fix Asr Vietnamese by Ontocord results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice vi type: common_voice args: vi metrics: - name: Test WER type: wer value: 25.207182 --- # Ontocord/mt5-fix-asr-vietnamese Fine-tuned mt5 to correct output of an ASR model trained on [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) which was trained on Vietnamese using the [Common Voice](https://huggingface.co/datasets/common_voice), and [FOSD](https://data.mendeley.com/datasets/k9sxg2twv4/4). ## Usage The model can be used directly by submitting vietnamese asr text, but is is best to use with the ontocord/wav2vec2-large-xlsr-vietnamese model. ``` import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor, pipelines device = torch.device("cuda" if torch.cuda.is_available() else "cpu") test_dataset = load_dataset("common_voice", "vi", split="test[:2%]") processor = Wav2Vec2Processor.from_pretrained("ontocord/wav2vec2-large-xlsr-53-vietnamese") model = Wav2Vec2ForCTC.from_pretrained("ontocord/wav2vec2-large-xlsr-53-vietnamese").to(device) mt5 = pipelines.pipeline("text2text-generation","ontocord/mt5-fix-asr-vietnamese", device=0 if device == "cuda" else -1) resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to(device), attention_mask=inputs.attention_mask.to(device)).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", [aHash['generated_text'] for aHash in mt5(processor.batch_decode(predicted_ids), max_length=100)]) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation The model can be evaluated as follows on the Vietnamese test data of Common Voice. ``` import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor, pipelines import re test_dataset = load_dataset("common_voice", "vi", split="test") wer = load_metric("wer") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") processor = Wav2Vec2Processor.from_pretrained("ontocord/wav2vec2-large-xlsr-vietnamese") model = Wav2Vec2ForCTC.from_pretrained("ontocord/wav2vec2-large-xlsr-vietnamese").to(device) mt5 = pipelines.pipeline("text2text-generation","ontocord/mt5-fix-asr-vietnamese", device=0 if device == "cuda" else -1) chars_to_ignore_regex = '[\\\+\@\ǀ\,\?\.\!\-\;\:\"\“\%\‘\”\�]' resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # you may also want to use the decode_string from https://huggingface.co/Nhut/wav2vec2-large-xlsr-vietnamese def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to(device), attention_mask=inputs.attention_mask.to(device)).logits pred_ids = torch.argmax(logits, dim=-1) max_length = int(pred_ids.size()[1]) txt = [aHash['generated_text'].strip() for aHash in mt5(processor.batch_decode(pred_ids), max_length=max_length)] batch["pred_strings"] = txt return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 25.207182 ## Training The Common Voice train, validation, and FPT datasets were used for training. The script used for training can be found here # TODO
ontocord/wav2vec2-large-xlsr-vietnamese
2021-03-28T23:57:07.000Z
[ "pytorch", "wav2vec2", "vi", "transformers", "audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week", "license:apache-2.0" ]
automatic-speech-recognition
[ ".gitattributes", "README.md", "config.json", "preprocessor_config.json", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "vocab.json" ]
ontocord
22
transformers
--- language: vi datasets: - common_voice - FOSD: https://data.mendeley.com/datasets/k9sxg2twv4/4 metrics: - wer tags: - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: XLSR Wav2Vec2 Vietnamese by Ontocord results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice vi type: common_voice args: vi metrics: - name: Test WER type: wer value: 42.403315 --- # Ontocord/Wav2Vec2-Large-XLSR-53-Vietnamese Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Vietnamese using the [Common Voice](https://huggingface.co/datasets/common_voice), [FOSD](https://data.mendeley.com/datasets/k9sxg2twv4/4). When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ``` import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "vi", split="test[:2%]") processor = Wav2Vec2Processor.from_pretrained("ontocord/wav2vec2-large-xlsr-53-vietnamese") model = Wav2Vec2ForCTC.from_pretrained("ontocord/wav2vec2-large-xlsr-53-vietnamese") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation The model can be evaluated as follows on the Vietnamese test data of Common Voice. ``` import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re test_dataset = load_dataset("common_voice", "vi", split="test") wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("ontocord/wav2vec2-large-xlsr-vietnamese") model = Wav2Vec2ForCTC.from_pretrained("ontocord/wav2vec2-large-xlsr-vietnamese") model.to("cuda") chars_to_ignore_regex = '[\\\+\@\ǀ\,\?\.\!\-\;\:\"\“\%\‘\”\�]' resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # you may also want to use the decode_string from https://huggingface.co/Nhut/wav2vec2-large-xlsr-vietnamese def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 42.403315 ## Training The Common Voice train, validation, and FPT datasets were used for training. The script used for training can be found here # TODO
ooalancoo/test_model1
2021-05-24T21:09:45.000Z
[]
[ ".gitattributes" ]
ooalancoo
0
ooalancoo/test_model3
2021-05-24T21:20:10.000Z
[]
[ ".gitattributes" ]
ooalancoo
0
ooalancoo/test_model4
2021-05-26T21:29:25.000Z
[]
[ ".gitattributes" ]
ooalancoo
0
ooalancoo/test_model5
2021-05-26T21:30:46.000Z
[]
[ ".gitattributes" ]
ooalancoo
0
openai/clip-vit-base-patch32
2021-06-07T12:27:48.000Z
[ "pytorch", "jax", "clip", "arxiv:2103.00020", "arxiv:1908.04913", "transformers", "vision" ]
[ ".gitattributes", "README.md", "config.json", "flax_model.msgpack", "merges.txt", "preprocessor_config.json", "pytorch_model.bin", "special_tokens_map.json", "tokenizer.json", "tokenizer_config.json", "vocab.json" ]
openai
3,479
transformers
--- tags: - vision --- # Model Card: CLIP Disclaimer: The model card is taken and modified from the official CLIP repository, it can be found [here](https://github.com/openai/CLIP/blob/main/model-card.md). ## Model Details The CLIP model was developed by researchers at OpenAI to learn about what contributes to robustness in computer vision tasks. The model was also developed to test the ability of models to generalize to arbitrary image classification tasks in a zero-shot manner. It was not developed for general model deployment - to deploy models like CLIP, researchers will first need to carefully study their capabilities in relation to the specific context they’re being deployed within. ### Model Date January 2021 ### Model Type The base model uses a ViT-B/32 Transformer architecture as an image encoder and uses a masked self-attention Transformer as a text encoder. These encoders are trained to maximize the similarity of (image, text) pairs via a contrastive loss. There is also a variant of the model where the ResNet image encoder is replaced with a Vision Transformer. ### Model Version Initially, we’ve released one CLIP model based on the Vision Transformer architecture equivalent to ViT-B/32, along with the RN50 model, using the architecture equivalent to ResNet-50. *This port does not include the ResNet model.* Please see the paper linked below for further details about their specification. ### Documents - [Blog Post](https://openai.com/blog/clip/) - [CLIP Paper](https://arxiv.org/abs/2103.00020) ### Use with Transformers ```python3 from PIL import Image import requests from transformers import CLIPProcessor, CLIPModel model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ``` ## Model Use ### Intended Use The model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, arbitrary image classification. We also hope it can be used for interdisciplinary studies of the potential impact of such models - the CLIP paper includes a discussion of potential downstream impacts to provide an example for this sort of analysis. #### Primary intended uses The primary intended users of these models are AI researchers. We primarily imagine the model will be used by researchers to better understand robustness, generalization, and other capabilities, biases, and constraints of computer vision models. ### Out-of-Scope Use Cases **Any** deployed use case of the model - whether commercial or not - is currently out of scope. Non-deployed use cases such as image search in a constrained environment, are also not recommended unless there is thorough in-domain testing of the model with a specific, fixed class taxonomy. This is because our safety assessment demonstrated a high need for task specific testing especially given the variability of CLIP’s performance with different class taxonomies. This makes untested and unconstrained deployment of the model in any use case currently potentially harmful. Certain use cases which would fall under the domain of surveillance and facial recognition are always out-of-scope regardless of performance of the model. This is because the use of artificial intelligence for tasks such as these can be premature currently given the lack of testing norms and checks to ensure its fair use. Since the model has not been purposefully trained in or evaluated on any languages other than English, its use should be limited to English language use cases. ## Data The model was trained on publicly available image-caption data. This was done through a combination of crawling a handful of websites and using commonly-used pre-existing image datasets such as [YFCC100M](http://projects.dfki.uni-kl.de/yfcc100m/). A large portion of the data comes from our crawling of the internet. This means that the data is more representative of people and societies most connected to the internet which tend to skew towards more developed nations, and younger, male users. ### Data Mission Statement Our goal with building this dataset was to test out robustness and generalizability in computer vision tasks. As a result, the focus was on gathering large quantities of data from different publicly-available internet data sources. The data was gathered in a mostly non-interventionist manner. However, we only crawled websites that had policies against excessively violent and adult images and allowed us to filter out such content. We do not intend for this dataset to be used as the basis for any commercial or deployed model and will not be releasing the dataset. ## Performance and Limitations ### Performance We have evaluated the performance of CLIP on a wide range of benchmarks across a variety of computer vision datasets such as OCR to texture recognition to fine-grained classification. The paper describes model performance on the following datasets: - Food101 - CIFAR10 - CIFAR100 - Birdsnap - SUN397 - Stanford Cars - FGVC Aircraft - VOC2007 - DTD - Oxford-IIIT Pet dataset - Caltech101 - Flowers102 - MNIST - SVHN - IIIT5K - Hateful Memes - SST-2 - UCF101 - Kinetics700 - Country211 - CLEVR Counting - KITTI Distance - STL-10 - RareAct - Flickr30 - MSCOCO - ImageNet - ImageNet-A - ImageNet-R - ImageNet Sketch - ObjectNet (ImageNet Overlap) - Youtube-BB - ImageNet-Vid ## Limitations CLIP and our analysis of it have a number of limitations. CLIP currently struggles with respect to certain tasks such as fine grained classification and counting objects. CLIP also poses issues with regards to fairness and bias which we discuss in the paper and briefly in the next section. Additionally, our approach to testing CLIP also has an important limitation- in many cases we have used linear probes to evaluate the performance of CLIP and there is evidence suggesting that linear probes can underestimate model performance. ### Bias and Fairness We find that the performance of CLIP - and the specific biases it exhibits - can depend significantly on class design and the choices one makes for categories to include and exclude. We tested the risk of certain kinds of denigration with CLIP by classifying images of people from [Fairface](https://arxiv.org/abs/1908.04913) into crime-related and non-human animal categories. We found significant disparities with respect to race and gender. Additionally, we found that these disparities could shift based on how the classes were constructed. (Details captured in the Broader Impacts Section in the paper). We also tested the performance of CLIP on gender, race and age classification using the Fairface dataset (We default to using race categories as they are constructed in the Fairface dataset.) in order to assess quality of performance across different demographics. We found accuracy >96% across all races for gender classification with ‘Middle Eastern’ having the highest accuracy (98.4%) and ‘White’ having the lowest (96.5%). Additionally, CLIP averaged ~93% for racial classification and ~63% for age classification. Our use of evaluations to test for gender, race and age classification as well as denigration harms is simply to evaluate performance of the model across people and surface potential risks and not to demonstrate an endorsement/enthusiasm for such tasks. ## Feedback ### Where to send questions or comments about the model Please use [this Google Form](https://forms.gle/Uv7afRH5dvY34ZEs9)
opensource/extract_names
2021-01-19T04:59:04.000Z
[ "tf", "xlm-roberta", "token-classification", "multilingual", "transformers", "Extract Names", "license:apache-2.0" ]
token-classification
[ ".gitattributes", "README.md", "config.json", "sentencepiece.bpe.model", "special_tokens_map.json", "tf_model.h5", "tokenizer_config.json" ]
opensource
181
transformers
--- language: multilingual tags: - Extract Names license: apache-2.0 --- ## Extract names in any language.
ordinarykids/borges01
2020-12-16T23:29:13.000Z
[]
[ ".gitattributes" ]
ordinarykids
0
ordinarykids/borges02
2021-01-29T12:54:07.000Z
[]
[ ".gitattributes", "README.md" ]
ordinarykids
0
# MyModelName Borges02 ## Model description You can generate new short stories from Jorge Luis Borges. ## Intended uses & limitations #### How to use ```python # You can include sample code which will be formatted ``` #### Limitations and bias Provide examples of latent issues and potential remediations. ## Training data Describe the data you used to train the model. If you initialized it with pre-trained weights, add a link to the pre-trained model card or repository with description of the pre-training data. ## Training procedure Preprocessing, hardware used, hyperparameters... ## Eval results ### BibTeX entry and citation info ```bibtex @inproceedings{..., year={2020} } ```
ordinarykids/borges03
2021-05-23T10:52:08.000Z
[]
[ ".gitattributes" ]
ordinarykids
1
ordinarykids/first-model
2021-01-23T19:37:23.000Z
[]
[ ".gitattributes" ]
ordinarykids
0
orendar/distilbert-base-cased-finetuned-conll03-english
2021-01-05T11:26:25.000Z
[ "pytorch", "distilbert", "token-classification", "transformers" ]
token-classification
[ ".gitattributes", "config.json", "pytorch_model.bin" ]
orendar
144
transformers
orendar/language_model
2021-06-09T06:42:58.000Z
[ "pytorch", "gpt_neo", "causal-lm", "transformers", "text-generation" ]
text-generation
[ ".gitattributes", "all_results.json", "config.json", "merges.txt", "pytorch_model.bin", "special_tokens_map.json", "tokenizer.json", "tokenizer_config.json", "train_results.json", "trainer_state.json", "training_args.bin", "vocab.json" ]
orendar
57
transformers
orko/milkyway_models
2021-04-12T10:06:36.000Z
[]
[ ".gitattributes", "bert_finetuned/config.json", "bert_finetuned/pytorch_model.bin", "longformer_finetuned/config.json", "longformer_finetuned/pytorch_model.bin" ]
orko
0
orzhan/rugpt3-simplify-large
2021-05-31T14:31:36.000Z
[ "pytorch", "gpt2", "lm-head", "causal-lm", "transformers", "text-generation" ]
text-generation
[ ".gitattributes", "README.md", "config.json", "merges.txt", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "vocab.json" ]
orzhan
16
transformers
Text simplification model for Russian. Fine-tuned ruGPT3-large https://github.com/orzhan/rusimscore --- language: ru
osanseviero/adapter-example
2021-06-11T09:22:38.000Z
[ "adapter-transformers" ]
[ ".gitattributes", "README.md", "config.json", "head_config.json", "pytorch_adapter.bin", "pytorch_model_head.bin" ]
osanseviero
6
adapter-transformers
--- tags: - adapter-transformers --- # BERT-base Adapter for SQuAD 1.1 Imported from https://adapterhub.ml/adapters/ukp/bert-base-uncased_qa_squad1_houlsby/.
osanseviero/adapter-test
2021-06-07T07:19:42.000Z
[ "adapter-transformers" ]
[ ".gitattributes", "README.md" ]
osanseviero
0
adapter-transformers
--- tags: - adapter-transformers --- # Adapter transformers
osanseviero/clip-st
2021-05-17T08:59:53.000Z
[ "pytorch", "distilbert", "sentence-transformers", "feature-extraction" ]
feature-extraction
[ ".gitattributes", "README.md", "config.json", "modules.json", "pytorch_model.bin", "sentence_bert_config.json", "special_tokens_map.json", "tokenizer_config.json", "vocab.txt", "1_Pooling/config.json", "2_Dense/config.json", "2_Dense/pytorch_model.bin" ]
osanseviero
13
sentence-transformers
--- tags: - sentence-transformers - feature-extraction --- # TODO: Name of Model TODO: Description ## Model Description TODO: Add relevant content (0) Base Transformer Type: DistilBertModel (1) Pooling mean (2) Dense 768x512 ## Usage (Sentence-Transformers) Using this model becomes more convenient when you have [sentence-transformers](https://github.com/UKPLab/sentence-transformers) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence"] model = SentenceTransformer(TODO) embeddings = model.encode(sentences) print(embeddings) ``` ## TODO: Training Procedure ## TODO: Evaluation Results ## TODO: Citing & Authors
osanseviero/da_core_news_sm
2021-05-21T13:34:49.000Z
[]
[ ".gitattributes" ]
osanseviero
0
osanseviero/distilbert-base-nli-wkpooling
2021-05-04T12:35:09.000Z
[ "pytorch", "distilbert", "transformers" ]
[ ".DS_Store", ".gitattributes", "config.json", "modules.json", "pytorch_model.bin", "sentence_bert_config.json", "special_tokens_map.json", "tokenizer_config.json", "vocab.txt", "1_WKPooling/config.json" ]
osanseviero
14
transformers
osanseviero/fastspeech
2021-06-08T12:32:13.000Z
[ "translation", "license:apache-2.0" ]
translation
[ ".gitattributes", "README.md", "config.yml", "model.h5", "processor.json" ]
osanseviero
0
--- tags: - translation widget: - text: "I have a problem with my iphone that needs to be resolved asap!!" - max_length: 1 license: apache-2.0 --- Test
osanseviero/flair-ner-english
2021-05-19T14:44:12.000Z
[ "pytorch", "en", "dataset:conll2003", "flair", "token-classification", "sequence-tagger-model" ]
token-classification
[ ".gitattributes", "README.md", "pytorch_model.bin" ]
osanseviero
0
flair
--- tags: - flair - token-classification - sequence-tagger-model language: en datasets: - conll2003 widget: - text: "George Washington went to Washington" --- ## English NER in Flair (default model)
osanseviero/flair-ner-english3
2021-06-10T10:46:45.000Z
[ "pytorch" ]
[ ".gitattributes", "pytorch_model.bin" ]
osanseviero
0
osanseviero/full-sentence-distillroberta2
2021-05-21T08:46:10.000Z
[ "pytorch", "jax", "roberta", "sentence-transformers", "sentence-similarity" ]
sentence-similarity
[ ".DS_Store", ".gitattributes", "README.md", "config.json", "flax_model.msgpack", "merges.txt", "modules.json", "pytorch_model.bin", "sentence_bert_config.json", "special_tokens_map.json", "tokenizer_config.json", "vocab.json", "1_Pooling/config.json" ]
osanseviero
186
sentence-transformers
--- tags: - sentence-transformers - sentence-similarity --- ## Testing Sentence Transformer
osanseviero/full-sentence-distillroberta3
2021-06-07T14:38:50.000Z
[ "pytorch", "jax", "roberta", "sentence-transformers", "causal-lm", "license:cc-by-sa-4.0", "feature-extraction", "pipeline_tag:feature-extraction", "text-generation" ]
feature-extraction
[ ".gitattributes", "README.md", "config.json", "flax_model.msgpack", "merges.txt", "modules.json", "pytorch_model.bin", "sentence_bert_config.json", "special_tokens_map.json", "tokenizer.json", "tokenizer_config.json", "vocab.json", "1_Pooling/config.json" ]
osanseviero
120
sentence-transformers
--- pipeline_tag: feature-extraction tags: - sentence-transformers - causal-lm license: - CC-BY-SA-4.0 --- # TODO: Name of Model TODO: Description ## Model Description TODO: Add relevant content (0) Base Transformer Type: RobertaModel (1) Pooling mean ## Usage (Sentence-Transformers) Using this model becomes more convenient when you have [sentence-transformers](https://github.com/UKPLab/sentence-transformers) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence"] model = SentenceTransformer(TODO) embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) ```python from transformers import AutoTokenizer, AutoModel import torch # The next step is optional if you want your own pooling function. # Max Pooling - Take the max value over time for every dimension. def max_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() token_embeddings[input_mask_expanded == 0] = -1e9 # Set padding tokens to large negative value max_over_time = torch.max(token_embeddings, 1)[0] return max_over_time # Sentences we want sentence embeddings for sentences = ['This is an example sentence'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained(TODO) model = AutoModel.from_pretrained(TODO) # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, max_length=128, return_tensors='pt')) # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, max pooling. sentence_embeddings = max_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## TODO: Training Procedure ## TODO: Evaluation Results ## TODO: Citing & Authors
osanseviero/full-sentence-upload-to-hub2
2021-05-20T19:12:25.000Z
[ "pytorch", "jax", "roberta", "transformers" ]
[ ".gitattributes", "config.json", "flax_model.msgpack", "merges.txt", "modules.json", "pytorch_model.bin", "sentence_bert_config.json", "special_tokens_map.json", "tokenizer_config.json", "vocab.json", "1_Pooling/config.json" ]
osanseviero
7
transformers
osanseviero/keras-conv-mnist
2021-06-17T13:23:04.000Z
[ "tf", "transformers", "license:apache-2.0", "image-classification", "keras" ]
image-classification
[ ".gitattributes", "README.md", "config.json", "tf_model.h5" ]
osanseviero
15
transformers
osanseviero/keras-dog-or-cat
2021-06-17T12:14:33.000Z
[ "tf", "transformers", "license:apache-2.0", "image-classification", "keras" ]
image-classification
[ ".gitattributes", "README.md", "config.json", "tf_model.h5" ]
osanseviero
17
transformers
osanseviero/keras
2021-06-17T07:53:53.000Z
[ "tf" ]
[ ".gitattributes", "tf_model.h5", "saved_model/1/keras_metadata.pb", "saved_model/1/saved_model.pb", "saved_model/1/variables/variables.data-00000-of-00001", "saved_model/1/variables/variables.index" ]
osanseviero
0
osanseviero/melgan
2021-05-17T16:25:01.000Z
[]
[ ".gitattributes", "config.yml", "model.h5" ]
osanseviero
0
osanseviero/my_new_model
2021-06-07T14:27:42.000Z
[ "pytorch", "roberta", "sentence-transformers", "feature-extraction" ]
feature-extraction
[ ".gitattributes", "README.md", "config.json", "merges.txt", "modules.json", "pytorch_model.bin", "sentence_bert_config.json", "special_tokens_map.json", "tokenizer.json", "tokenizer_config.json", "vocab.json", "1_Pooling/config.json" ]
osanseviero
3
sentence-transformers
--- tags: - sentence-transformers - feature-extraction --- # Name of Model <!--- Describe your model here --> ## Model Description The model consists of the following layers: (0) Base Transformer Type: RobertaModel (1) mean Pooling ## Usage (Sentence-Transformers) Using this model becomes more convenient when you have [sentence-transformers](https://github.com/UKPLab/sentence-transformers) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence"] model = SentenceTransformer('model_name') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1) sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9) return sum_embeddings / sum_mask # Sentences we want sentence embeddings for sentences = ['This is an example sentence'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('model_name') model = AutoModel.from_pretrained('model_name') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, max_length=128, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, max pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Training Procedure <!--- Describe how your model was trained --> ## Evaluation Results <!--- Describe how your model was evaluated --> ## Citing & Authors <!--- Describe where people can find more information -->
osanseviero/pickle_example
2021-06-11T12:47:34.000Z
[]
[ ".gitattributes", "sklearn_model.pickle" ]
osanseviero
0
osanseviero/t5-finetuned-test
2021-06-08T12:32:33.000Z
[ "pytorch", "t5", "seq2seq", "eng", "dataset:Wikihow", "transformers", "wikihow", "t5-small", "lm-head", "pipeline:summarization", "summarization", "text2text-generation" ]
summarization
[ ".gitattributes", "README.md", "config.json", "pytorch_model.bin", "special_tokens_map.json", "spiece.model", "tokenizer_config.json" ]
osanseviero
35
transformers
--- language: "eng" tags: - wikihow - t5-small - pytorch - lm-head - seq2seq - t5 - pipeline:summarization - summarization datasets: - Wikihow widget: - max_length: 1 - text: "Lack of fluids can lead to dry mouth, which is a leading cause of bad breath. Water can also dilute any chemicals in your mouth or gut that are causing bad breath., Studies show that eating 6 ounces of yogurt a day reduces the level of odor-causing compounds in the mouth. In particular, look for yogurt containing the active bacteria Streptococcus thermophilus or Lactobacillus bulgaricus., The abrasive nature of fibrous fruits and vegetables helps to clean teeth, while the vitamins, antioxidants, and acids they contain improve dental health.Foods that can be particularly helpful include:Apples — Apples contain vitamin C, which is necessary for health gums, as well as malic acid, which helps to whiten teeth.Carrots — Carrots are rich in vitamin A, which strengthens tooth enamel.Celery — Chewing celery produces a lot of saliva, which helps to neutralize bacteria that cause bad breath.Pineapples — Pineapples contain bromelain, an enzyme that cleans the mouth., These teas have been shown to kill the bacteria that cause bad breath and plaque., An upset stomach can lead to burping, which contributes to bad breath. Don’t eat foods that upset your stomach, or if you do, use antacids. If you are lactose intolerant, try lactase tablets., They can all cause bad breath. If you do eat them, bring sugar-free gum or a toothbrush and toothpaste to freshen your mouth afterwards., Diets low in carbohydrates lead to ketosis — a state in which the body burns primarily fat instead of carbohydrates for energy. This may be good for your waistline, but it also produces chemicals called ketones, which contribute to bad breath.To stop the problem, you must change your diet. Or, you can combat the smell in one of these ways:Drink lots of water to dilute the ketones.Chew sugarless gum or suck on sugarless mints.Chew mint leaves." - text: " Bring 1/2 cup water to the boil.Add the fresh or dried rosemary to the water.Remove from the heat. Set aside for 1/2 an hour to infuse. Added flavour can be released by pressing down on the rosemary leaves with a spoon. Add the pieces to the blender or food processor with the elderflower cordial. Blend or process to a purée.,, Add the lemon or lime juice and stir to combine., Add a cover and place in the freezer.After 2 hours, remove from the freezer and break up with a fork. This helps the ice crystals to form properly.Continue doing this every hour until the granita freezes properly. Scoop the granita into dessert bowls and serve. Garnish with a cucumber curl or a small sprig of rosemary." metrics: - Rouge1: 31.2 - RougeL: 24.5 --- # Model name Wikihow T5-small ## Model description This is a T5-small model trained on Wikihow All data set. The model was trained for 3 epochs using a batch size of 16 and learning rate of 3e-4. Max_input_lngth is set as 512 and max_output_length is 150. Model attained a Rouge1 score of 31.2 and RougeL score of 24.5. We have written a blog post that covers the training procedure. Please find it [here](https://medium.com/@priya.dwivedi/fine-tuning-a-t5-transformer-for-any-summarization-task-82334c64c81). ## Usage ``` from transformers import AutoTokenizer, AutoModelWithLMHead tokenizer = AutoTokenizer.from_pretrained("deep-learning-analytics/wikihow-t5-small") model = AutoModelWithLMHead.from_pretrained("deep-learning-analytics/wikihow-t5-small") device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = model.to(device) text = """" Lack of fluids can lead to dry mouth, which is a leading cause of bad breath. Water can also dilute any chemicals in your mouth or gut that are causing bad breath., Studies show that eating 6 ounces of yogurt a day reduces the level of odor-causing compounds in the mouth. In particular, look for yogurt containing the active bacteria Streptococcus thermophilus or Lactobacillus bulgaricus., The abrasive nature of fibrous fruits and vegetables helps to clean teeth, while the vitamins, antioxidants, and acids they contain improve dental health.Foods that can be particularly helpful include:Apples — Apples contain vitamin C, which is necessary for health gums, as well as malic acid, which helps to whiten teeth.Carrots — Carrots are rich in vitamin A, which strengthens tooth enamel.Celery — Chewing celery produces a lot of saliva, which helps to neutralize bacteria that cause bad breath.Pineapples — Pineapples contain bromelain, an enzyme that cleans the mouth., These teas have been shown to kill the bacteria that cause bad breath and plaque., An upset stomach can lead to burping, which contributes to bad breath. Don’t eat foods that upset your stomach, or if you do, use antacids. If you are lactose intolerant, try lactase tablets., They can all cause bad breath. If you do eat them, bring sugar-free gum or a toothbrush and toothpaste to freshen your mouth afterwards., Diets low in carbohydrates lead to ketosis — a state in which the body burns primarily fat instead of carbohydrates for energy. This may be good for your waistline, but it also produces chemicals called ketones, which contribute to bad breath.To stop the problem, you must change your diet. Or, you can combat the smell in one of these ways:Drink lots of water to dilute the ketones.Chew sugarless gum or suck on sugarless mints.Chew mint leaves. """ preprocess_text = text.strip().replace("\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ ","") tokenized_text = tokenizer.encode(preprocess_text, return_tensors="pt").to(device) summary_ids = model.generate( tokenized_text, max_length=150, num_beams=2, repetition_penalty=2.5, length_penalty=1.0, early_stopping=True ) output = tokenizer.decode(summary_ids[0], skip_special_tokens=True) print ("\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ Summarized text: \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ ",output) ```
osanseviero/test
2021-06-14T09:35:44.000Z
[]
[ ".gitattributes", "sklearn_model.pickle" ]
osanseviero
0
osanseviero/test2
2021-06-14T12:22:33.000Z
[]
[ ".gitattributes" ]
osanseviero
0
osanseviero/test3
2021-06-14T12:22:47.000Z
[]
[ ".gitattributes", "sklearn_model.pickle" ]
osanseviero
0
osanseviero/test4
2021-06-14T12:25:19.000Z
[]
[ ".gitattributes", "README.md", "sklearn_model.pickle" ]
osanseviero
0
t
osanseviero/test_adapters
2021-06-16T20:03:55.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
[ ".gitattributes", "config.json", "pytorch_model.bin" ]
osanseviero
0
transformers
osanseviero/testlangtag
2021-06-04T13:04:18.000Z
[]
[ ".gitattributes", "README.md" ]
osanseviero
0
# This is a test
osanseviero/torch
2021-06-15T20:00:08.000Z
[ "pytorch", "transformers" ]
[ "README.md", "config.json", "pytorch_model.bin" ]
osanseviero
0
transformers
example
osanseviero/upload-to-hub
2021-05-20T19:13:12.000Z
[ "pytorch", "jax", "roberta", "transformers" ]
[ ".gitattributes", "README.md", "config.json", "flax_model.msgpack", "merges.txt", "modules.json", "pytorch_model.bin", "sentence_bert_config.json", "special_tokens_map.json", "tokenizer_config.json", "vocab.json", "1_Pooling/config.json" ]
osanseviero
22
transformers
Example card Second modification
othrif/wav2vec2-large-xlsr-arabic
2021-03-29T18:43:31.000Z
[ "pytorch", "wav2vec2", "ar", "dataset:common_voice", "transformers", "audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week", "license:apache-2.0" ]
automatic-speech-recognition
[ ".gitattributes", "README.md", "config.json", "evaluate.py", "finetune.sh", "preprocessor_config.json", "pytorch_model.bin", "run_common_voice.py", "special_tokens_map.json", "tokenizer_config.json", "vocab.json" ]
othrif
21
transformers
--- language: ar datasets: - common_voice metrics: - wer tags: - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: XLSR Wav2Vec2 Arabic by Othmane Rifki results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice ar type: common_voice args: ar metrics: - name: Test WER type: wer value: 46.77 --- # Wav2Vec2-Large-XLSR-53-Arabic Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Arabic using the [Common Voice](https://huggingface.co/datasets/common_voice). When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "ar", split="test[:2%]") processor = Wav2Vec2Processor.from_pretrained("othrif/wav2vec2-large-xlsr-arabic") model = Wav2Vec2ForCTC.from_pretrained("othrif/wav2vec2-large-xlsr-arabic") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the audio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation The model can be evaluated as follows on the Arabic test data of Common Voice. ```python import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re test_dataset = load_dataset("common_voice", "ar", split="test") wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("othrif/wav2vec2-large-xlsr-arabic") model = Wav2Vec2ForCTC.from_pretrained("othrif/wav2vec2-large-xlsr-arabic") model.to("cuda") chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\؛\\\\\\\\\\\\\\\\—\\\\\\\\\\\\\\\\_get\\\\\\\\\\\\\\\\«\\\\\\\\\\\\\\\\»\\\\\\\\\\\\\\\\ـ\\\\\\\\\\\\\\\\ـ\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\!\\\\\\\\\\\\\\\\-\\\\\\\\\\\\\\\\;\\\\\\\\\\\\\\\\:\\\\\\\\\\\\\\\\"\\\\\\\\\\\\\\\\“\\\\\\\\\\\\\\\\%\\\\\\\\\\\\\\\\‘\\\\\\\\\\\\\\\\”\\\\\\\\\\\\\\\\�\\\\\\\\\\\\\\\\#\\\\\\\\\\\\\\\\،\\\\\\\\\\\\\\\\☭,\\\\\\\\\\\\\\\\؟]' resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the audio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the audio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 46.77 ## Training The Common Voice `train`, `validation` datasets were used for training. The script used for training can be found [here](https://huggingface.co/othrif/wav2vec2-large-xlsr-arabic/tree/main)
othrif/wav2vec2-large-xlsr-egyptian
2021-03-29T02:46:30.000Z
[ "pytorch", "wav2vec2", "arz", "dataset:https://arabicspeech.org/", "transformers", "audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week", "license:apache-2.0" ]
automatic-speech-recognition
[ ".gitattributes", "README.md", "config.json", "optimizer.pt", "preprocessor_config.json", "pytorch_model.bin", "scheduler.pt", "special_tokens_map.json", "tokenizer_config.json", "trainer_state.json", "training_args.bin", "vocab.json" ]
othrif
26
transformers
--- language: arz datasets: - https://arabicspeech.org/ metrics: - wer tags: - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: XLSR Wav2Vec2 Egyptian Arabic by Othmane Rifki results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: arabicspeech.org MGB-3 type: arabicspeech.org MGB-3 args: ar metrics: - name: Test WER type: wer value: 55.2 --- # Wav2Vec2-Large-XLSR-53-Egyptian-Arabic Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) in Egyptian using the [arabicspeech.org MGB-3](https://arabicspeech.org/mgb3-asr/) When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "ar", split="test[:2%]") processor = Wav2Vec2Processor.from_pretrained("othrif/wav2vec2-large-xlsr-egyptian") model = Wav2Vec2ForCTC.from_pretrained("othrif/wav2vec2-large-xlsr-egyptian") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the audio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation The model can be evaluated as follows on the Arabic test data of Common Voice. ```python import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re test_dataset = load_dataset("common_voice", "ar", split="test") wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("othrif/wav2vec2-large-xlsr-egyptian") model = Wav2Vec2ForCTC.from_pretrained("othrif/wav2vec2-large-xlsr-egyptian") model.to("cuda") chars_to_ignore_regex = '[\؛\—\_get\«\»\ـ\ـ\,\?\.\!\-\;\:\"\“\%\‘\”\�\#\،\☭,\؟]' resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the audio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the audio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 55.2 ## Training The Common Voice `train`, `validation` datasets were used for training. The script used for training can be found [here](https://github.com/othrif/xlsr-wav2vec2)
othrif/wav2vec2-large-xlsr-moroccan
2021-04-15T03:16:32.000Z
[ "pytorch", "wav2vec2", "ary", "dataset:mgb5", "transformers", "audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week", "license:apache-2.0" ]
automatic-speech-recognition
[ ".gitattributes", "README.md", "config.json", "preprocessor_config.json", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "vocab.json" ]
othrif
92
transformers
--- language: ary datasets: - mgb5 metrics: - wer tags: - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: XLSR Wav2Vec2 Moroccan Arabic dialect by Othmane Rifki results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: MGB5 from ELDA and https://arabicspeech.org/ type: ELDA and https://arabicspeech.org/ args: ary metrics: - name: Test WER type: wer value: 66.45 --- # Wav2Vec2-Large-XLSR-53-Moroccan Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on [MGB5 Moroccan Arabic](http://www.islrn.org/resources/938-639-614-524-5/) kindly provided by [ELDA](http://www.elra.info/en/about/elda/) and [ArabicSpeech](https://arabicspeech.org/mgb5/). In order to have access to MGB5, please request it from ELDA. When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import re import torch import librosa import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import soundfile as sf dataset = load_dataset("ma_speech_corpus", split="test") processor = Wav2Vec2Processor.from_pretrained("othrif/wav2vec2-large-xlsr-moroccan") model = Wav2Vec2ForCTC.from_pretrained("othrif/wav2vec2-large-xlsr-moroccan") model.to("cuda") chars_to_ignore_regex = '[\\,\\?\\.\\!\\-\\;\\:\\"\\“\\'\\�]' def remove_special_characters(batch): batch["text"] = re.sub(chars_to_ignore_regex, "", batch["sentence"]).lower() + " " return batch dataset = dataset.map(remove_special_characters) dataset = dataset.select(range(10)) def speech_file_to_array_fn(batch): start, stop = batch['segment'].split('_') speech_array, sampling_rate = torchaudio.load(batch["path"]) speech_array, sampling_rate = sf.read(batch["path"], start=int(float(start) * sampling_rate), stop=int(float(stop) * sampling_rate)) batch["speech"] = librosa.resample(speech_array, sampling_rate, 16_000) batch["sampling_rate"] = 16_000 batch["target_text"] = batch["text"] return batch dataset = dataset.map( speech_file_to_array_fn ) def predict(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["predicted"] = processor.batch_decode(pred_ids) return batch dataset = dataset.map(predict, batched=True, batch_size=32) for reference, predicted in zip(dataset["sentence"], dataset["predicted"]): print("reference:", reference) print("predicted:", predicted) print("--") ``` Here's the output: ``` reference: عشرين ألفريال الوحده وشي خمسميه دريال predicted: عشرين علف ريا لوحده وشي خمسميات ريال -- reference: واحد جوج تلاتة ربعه خمسة ستة predicted: غيحك تويش تتبة نتاست -- reference: هي هاديك غتجينا تقريبا ميه وسته وعشرين ألف ريال predicted: ياض كتجينا تقريبه ميه أو ستي و عشيناأفرين -- reference: ###والصرف ليبقا نجيب بيه الصالون فلهوندا... أهاه نديروها علاش لا؟... predicted: أواصرف ليبقا نجيب يه اصالون فالهندا أه نديروها علاش لا -- reference: ###صافي مشات... أنا أختي معندي مندير بهاد صداع الراس... predicted: صافي مشات أنا خصي معندي مندير بهاد داع راسك ف -- reference: خلصو ليا غير لكريدي ديالي وديرو ليعجبكوم predicted: خلصو ليا غير لكريدي ديالي أوديرو لي عجبكوم -- reference: أنا نتكلف يلاه لقى شي حاجه نشغل بيها راسي predicted: أنا نتكلف يالله لقا شي حاجه نشغل بيها راسي ``` ## Evaluation The model can be evaluated as follows on the Arabic test data of Common Voice. ```python import re import torch import librosa import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import soundfile as sf eval_dataset = load_dataset("ma_speech_corpus", split="test") wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("othrif/wav2vec2-large-xlsr-moroccan") model = Wav2Vec2ForCTC.from_pretrained("othrif/wav2vec2-large-xlsr-moroccan") model.to("cuda") chars_to_ignore_regex = '[\\,\\?\\.\\!\\-\\;\\:\\"\\“\\'\\�]' def remove_special_characters(batch): batch["text"] = re.sub(chars_to_ignore_regex, "", batch["sentence"]).lower() + " " return batch eval_dataset = eval_dataset.map(remove_special_characters, remove_columns=["sentence"]) #eval_dataset = eval_dataset.select(range(100)) def speech_file_to_array_fn(batch): start, stop = batch['segment'].split('_') speech_array, sampling_rate = torchaudio.load(batch["path"]) speech_array, sampling_rate = sf.read(batch["path"], start=int(float(start) * sampling_rate), stop=int(float(stop) * sampling_rate)) batch["speech"] = librosa.resample(speech_array, sampling_rate, 16_000) batch["sampling_rate"] = 16_000 batch["target_text"] = batch["text"] return batch eval_dataset = eval_dataset.map( speech_file_to_array_fn, remove_columns=eval_dataset.column_names ) def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = eval_dataset.map(evaluate, batched=True, batch_size=32) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["target_text"]))) ``` **Test Result**: 66.45 ## Training The [MGB5](http://www.islrn.org/resources/938-639-614-524-5/) `train`, `validation` datasets were used for training. The script used for training can be found [here](https://github.com/othrif/xlsr-wav2vec2)
othrif/wav2vec_test
2021-03-29T02:48:07.000Z
[ "pytorch", "wav2vec2", "ar", "dataset:https://arabicspeech.org/", "transformers", "audio", "automatic-speech-recognition", "speech", "license:apache-2.0" ]
automatic-speech-recognition
[ ".gitattributes", "README.md", "config.json", "optimizer.pt", "preprocessor_config.json", "pytorch_model.bin", "scheduler.pt", "special_tokens_map.json", "tokenizer_config.json", "trainer_state.json", "training_args.bin", "vocab.json" ]
othrif
8
transformers
--- language: ar datasets: - https://arabicspeech.org/ tags: - audio - automatic-speech-recognition - speech license: apache-2.0 model-index: - name: XLSR Wav2Vec2 Egyptian by Zaid Alyafeai and Othmane Rifki results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: arabicspeech.org MGB-3 type: arabicspeech.org MGB-3 args: ar metrics: - name: Test WER type: wer value: 55.2 --- # Test Wav2Vec2 with egyptian arabic Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) in Egyptian using the [arabicspeech.org MGB-3](https://arabicspeech.org/mgb3-asr/) When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor dataset = load_dataset("arabic_speech_corpus", split="test") processor = Wav2Vec2Processor.from_pretrained("othrif/wav2vec_test") model = Wav2Vec2ForCTC.from_pretrained("othrif/wav2vec_test") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): \\tspeech_array, sampling_rate = torchaudio.load(batch["path"]) \\tbatch["speech"] = resampler(speech_array).squeeze().numpy() \\treturn batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): \\tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"][:2]) ```
overfit/peyma-ner-bert-base
2021-05-20T02:15:21.000Z
[ "tf", "bert", "token-classification", "transformers" ]
token-classification
[ ".gitattributes", "config.json", "special_tokens_map.json", "tf_model.h5", "tokenizer_config.json", "vocab.txt" ]
overfit
7
transformers
overfit/twiner-bert-base-mtl
2021-05-20T02:15:58.000Z
[ "tf", "bert", "token-classification", "transformers" ]
token-classification
[ ".gitattributes", "config.json", "special_tokens_map.json", "tf_model.h5", "tokenizer_config.json", "vocab.txt" ]
overfit
16
transformers
overfit/twiner-bert-base
2021-05-20T02:16:41.000Z
[ "tf", "bert", "token-classification", "transformers" ]
token-classification
[ ".gitattributes", ".gitignore", "README.md", "config.json", "special_tokens_map.json", "tf_model.h5", "tokenizer_config.json", "vocab.txt" ]
overfit
25
transformers
owaiskha9654/PICO_CLASSIFIER
2021-02-03T21:46:44.000Z
[]
[ ".gitattributes" ]
owaiskha9654
0
owaiskha9654/PicoClassifier
2021-02-03T21:58:49.000Z
[]
[ ".gitattributes" ]
owaiskha9654
0
oya163/NepBERT
2021-05-20T19:14:16.000Z
[ "pytorch", "jax", "roberta", "transformers" ]
[ ".gitattributes", "config.json", "flax_model.msgpack", "merges.txt", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "training_args.bin", "vocab.json" ]
oya163
10
transformers
ozcangundes/T5-base-for-BioQA
2021-03-02T18:50:32.000Z
[ "pytorch", "t5", "seq2seq", "english", "dataset:bioASQ", "arxiv:1910.10683", "transformers", "license:mit", "question-answering", "pipeline_tag:question-answering", "text2text-generation" ]
question-answering
[ ".gitattributes", "README.md", "config.json", "pytorch_model.bin", "special_tokens_map.json", "spiece.model", "tokenizer_config.json" ]
ozcangundes
246
transformers
--- language: english datasets: - bioASQ pipeline_tag: question-answering license: MIT --- # T5-base model fine-tuned on BioASQ for Biological Question Answering 👩‍⚕️👨‍⚕️ [Google's T5-base](https://huggingface.co/t5-base) fine-tuned on [BioASQ](https://github.com/dmis-lab/biobert) (secondary task) for **Q&A** downstream task. ## Details of T5 [Google's T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) Pretraining Dataset: [C4](https://huggingface.co/datasets/c4) Paper: [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/pdf/1910.10683.pdf) Authors: *Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu* ## Dependencies transformers == 4.3.3 sentencepiece >= 0.1.94 ## Usage 🚀 ```python import torch from transformers import T5ForConditionalGeneration, T5Tokenizer tokenizer = T5Tokenizer.from_pretrained("ozcangundes/T5-base-for-BioQA") model = T5ForConditionalGeneration.from_pretrained("ozcangundes/T5-base-for-BioQA") def get_answer(question,context): source_encoding=tokenizer( question, context, max_length=512, padding="max_length", truncation="only_second", return_attention_mask=True, add_special_tokens=True, return_tensors="pt") generated_ids=model.generate( input_ids=source_encoding["input_ids"], attention_mask=source_encoding["attention_mask"]) preds=[tokenizer.decode(gen_id, skip_special_tokens=True, clean_up_tokenization_spaces=True) for gen_id in generated_ids] return "".join(preds) ``` ### Example 1 ```python question={ "context":"Effect of food on the pharmacokinetics of empagliflozin, a sodium glucose cotransporter 2 (SGLT2) inhibitor, and assessment of dose proportionality in healthy volunteers. OBJECTIVES: Empagliflozin is an orally available, potent and highly selective inhibitor of the sodium glucose cotransporter 2 (SGLT2). This study was undertaken to investigate the effect of food on the pharmacokinetics of 25 mg empagliflozin and to assess dose proportionality between 10 mg and 25 mg empagliflozin under fasted conditions. MATERIALS AND METHODS: In this open-label, 3-way, cross-over study, 18 healthy volunteers received 3 single doses of empagliflozin in a randomized sequence (25 mg empagliflozin under fasted conditions, 25 mg empagliflozin after a high-fat, high-calorie breakfast and 10 mg empagliflozin under fasted conditions), each separated by a washout period of at least 7 days. Serial plasma samples were collected at selected time points over a period of 72 hours. RESULTS: Administration with food had no clinically relevant effect on the area under the plasma concentration-time curve (AUC0-∞) of empagliflozin (geometric mean ratio (GMR): 84.04, 90% confidence interval (CI): 80.86 - 87.34). The decrease observed in the maximum plasma concentrations (Cmax) of empagliflozin (GMR: 63.22, 90% CI: 56.74 - 70.44) when administered with food was not considered clinically meaningful. The increases in AUC0-∞ and Cmax for 10 mg vs. 25 mg empagliflozin administered under fasting conditions were roughly dose-proportional, as demonstrated by the slope β of the regression lines being slightly less than 1 (slope β for AUC0-∞: 0.94, 95% CI: 0.90 - 0.97; slope β for Cmax: 0.91, 95% CI: 0.80 - 1.01). Empagliflozin was well tolerated under fed and fasting conditions. CONCLUSIONS: The results support administration of empagliflozin tablets independently of food. Increases in empagliflozin exposure under fasting conditions were roughly dose-proportional between 10 mg and 25 mg empagliflozin.", "question":"Which protein does empagliflozin inhibit?" } get_answer(question["question"],question["context"]) ``` > SGLT2 ### Example 2 ```python question2={ "context":"Dermatitis herpetiformis: jejunal findings and skin response to gluten free diet. Fifty seven children with dermatitis herpetiformis, 18 from Finland and 39 from Hungary, were studied. Diagnostic criteria included the finding of granular IgA deposits in the skin of all patients. The mean age at onset of the rash was 7 X 2 years and favoured sites were the elbows, knees, and buttocks. Symptoms suggesting small intestinal disease were rare but in 35 (61%) of the children subtotal villous atrophy and in 16 (28%) partial villous atrophy were found on jejunal biopsy. Eighteen children underwent a second biopsy after a mean of 21 months on a gluten free diet; villous height was found to be increased and the intraepithelial lymphocyte count decreased in all these patients. Gluten challenge caused a reversal in the two children who underwent a third biopsy. The effect of the gluten free diet on the rash was examined in Finnish children by observing the daily requirements of dapsone, a drug used to control the rash at the beginning of the diet. Eight (67%) of the 12 children were able to stop taking dapsone after a mean of 11 months on the diet and all three patients treated with diet alone became asymptomatic after three to 6 months on the diet. These results confirm that most children with dermatitis herpetiformis have jejunal villous atrophy, though they rarely have gastrointestinal symptoms. The central role of gluten in childhood dermatitis herpetiformis is evidenced by the fact that a gluten free diet helps the damaged jejunal mucosa to recover and controls the rash even in those children who do not have an abnormal jejunal biopsy.", "question":"What is the typical rash associated with gluten?" } get_answer(question2["question"],question2["context"]) ``` > dermatitis herpetiformis Created by Özcan Gündeş ✌️ --- Twitter: <a href="https://twitter.com/ozcangundes" target="blank"><img align="center" src="https://cdn.jsdelivr.net/npm/[email protected]/icons/twitter.svg" alt="ozcangundes" height="30" width="30" /></a> Linkedin: <a href="https://www.linkedin.com/in/%C3%B6zcan-g%C3%BCnde%C5%9F-7693055b/" target="blank"><img align="center" src="https://cdn.jsdelivr.net/npm/[email protected]/icons/linkedin.svg" alt="13198517" height="30" width="30" /></a> Medium: <a href="https://medium.com/@ozcangundes" target="blank"><img align="center" src="https://cdn.jsdelivr.net/npm/[email protected]/icons/medium.svg" alt="@ozcangundes" height="30" width="30" /></a> Github: <a href="https://github.com/ozcangundes" target="blank"><img align="center" src="https://cdn.jsdelivr.net/npm/[email protected]/icons/github.svg" alt="@ozcangundes" height="30" width="30" /></a>
ozcangundes/mt5-multitask-qa-qg-turkish
2021-05-07T13:54:47.000Z
[ "pytorch", "mt5", "seq2seq", "tr", "dataset:TQUAD", "transformers", "question-answering", "question-generation", "multitask-model", "license:apache-2.0", "text2text-generation" ]
question-answering
[ ".gitattributes", "README.md", "added_tokens.json", "config.json", "pytorch_model.bin", "special_tokens_map.json", "spiece.model", "tokenizer_config.json", "training_args.bin" ]
ozcangundes
36
transformers
--- language: tr datasets: - TQUAD tags: - question-answering - question-generation - multitask-model license: apache-2.0 --- # mT5-small based Turkish Multitask (Answer Extraction, Question Generation and Question Answering) System [Google's Multilingual T5-small](https://github.com/google-research/multilingual-t5) is fine-tuned on [Turkish Question Answering dataset](https://github.com/okanvk/Turkish-Reading-Comprehension-Question-Answering-Dataset) for three downstream task **Answer extraction, Question Generation and Question Answering** served in this single model. mT5 model was also trained for multiple text2text NLP tasks. All data processing, training and pipeline codes can be found on my [Github](https://github.com/ozcangundes/multitask-question-generation). I will share the training details in the repo as soon as possible. mT5 small model has 300 million parameters and model size is about 1.2GB. Therefore, it takes significant amount of time to fine tune it. 8 epoch and 1e-4 learning rate with 0 warmup steps was applied during training. These hparams and the others can be fine-tuned for much more better results. ## Requirements ❗❗❗ ``` !pip install transformers==4.4.2 !pip install sentencepiece==0.1.95 !git clone https://github.com/ozcangundes/multitask-question-generation.git %cd multitask-question-generation/ ``` ## Usage 🚀🚀 ``` from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("ozcangundes/mt5-multitask-qa-qg-turkish") model = AutoModelForSeq2SeqLM.from_pretrained("ozcangundes/mt5-multitask-qa-qg-turkish") from pipelines import pipeline #pipelines.py script in the cloned repo multimodel = pipeline("multitask-qa-qg",tokenizer=tokenizer,model=model) #sample text text="Özcan Gündeş, 1993 yılı Tarsus doğumludur. Orta Doğu Teknik Üniversitesi \\\\ Endüstri Mühendisliği bölümünde 2011 2016 yılları arasında lisans eğitimi görmüştür. \\\\ Yüksek lisansını ise 2020 Aralık ayında, 4.00 genel not ortalaması ile \\\\ Boğaziçi Üniversitesi, Yönetim Bilişim Sistemleri bölümünde tamamlamıştır.\\\\ Futbolla yakından ilgilenmekle birlikte, Galatasaray kulübü taraftarıdır." ``` ## Example - Both Question Generation and Question Answering 💬💬 ``` multimodel(text) #output => [{'answer': 'Tarsus', 'question': 'Özcan Gündeş nerede doğmuştur?'}, {'answer': '1993', 'question': 'Özcan Gündeş kaç yılında doğmuştur?'}, {'answer': '2011 2016', 'question': 'Özcan Gündeş lisans eğitimini hangi yıllar arasında tamamlamıştır?'}, {'answer': 'Boğaziçi Üniversitesi, Yönetim Bilişim Sistemleri', 'question': 'Özcan Gündeş yüksek lisansını hangi bölümde tamamlamıştır?'}, {'answer': 'Galatasaray kulübü', 'question': 'Özcan Gündeş futbolla yakından ilgilenmekle birlikte hangi kulübü taraftarıdır?'}] ``` From this text, 5 questions are generated and they are answered by the model. ## Example - Question Answering 💭💭 Both text and also, related question should be passed into pipeline. ``` multimodel({"context":text,"question":"Özcan hangi takımı tutmaktadır?"}) #output => Galatasaray multimodel({"context":text,"question":"Özcan, yüksek lisanstan ne zaman mezun oldu?"}) #output => 2020 Aralık ayında multimodel({"context":text,"question":"Özcan'ın yüksek lisans bitirme notu kaçtır?"}) #output => 4.00 #Sorry for being cocky 😝😝 ``` ## ACKNOWLEDGEMENT This work is inspired from [Suraj Patil's great repo](https://github.com/patil-suraj/question_generation). I would like to thank him for the clean codes and also,[Okan Çiftçi](https://github.com/okanvk) for the Turkish dataset 🙏
ozcangundes/mt5-small-turkish-squad
2021-03-22T08:11:15.000Z
[ "pytorch", "mt5", "seq2seq", "tr", "dataset:TQUAD", "transformers", "license:mit", "question-answering", "pipeline_tag:question-answering", "text2text-generation" ]
question-answering
[ ".gitattributes", "README.md", "config.json", "pytorch_model.bin", "special_tokens_map.json", "spiece.model", "tokenizer_config.json" ]
ozcangundes
21
transformers
--- language: tr datasets: - TQUAD pipeline_tag: question-answering license: MIT --- # mT5-small based Turkish Question Answering System [Google's Multilingual T5-small](https://github.com/google-research/multilingual-t5) is fine-tuned on [Turkish Question Answering dataset](https://github.com/TQuad/turkish-nlp-qa-dataset) for **Q&A** downstream task by using Pytorch Lightning.⚡ The notebook that includes all fine tuning process will be shared on my Github page later. mT5 small model has 300 million parameters and model size is about 1.2GB. Therefore, it takes significant amount of time to fine tune it. **Important Note**: mT5 was only pre-trained on [mC4](https://www.tensorflow.org/datasets/catalog/c4#c4multilingual) excluding any supervised training. Therefore, the mT5 model has to be fine-tuned before it is useable on a downstream task. ## Usage 🚀 ```python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("ozcangundes/mt5-small-turkish-squad") model = AutoModelForSeq2SeqLM.from_pretrained("ozcangundes/mt5-small-turkish-squad") def get_answer(question,context): source_encoding=tokenizer( question, context, max_length=512, padding="max_length", truncation="only_second", return_attention_mask=True, add_special_tokens=True, return_tensors="pt") generated_ids=model.generate( input_ids=source_encoding["input_ids"], attention_mask=source_encoding["attention_mask"], max_length=120) preds=[tokenizer.decode(gen_id, skip_special_tokens=True, clean_up_tokenization_spaces=True) for gen_id in generated_ids] return "".join(preds) ``` ### Example 1 ```python question={ "context":"Pardus, Google'ın öğrencilerle staj ve kendini geliştirme imkânı ile \ tasarılara geliştirici ve katkı sağlamayı amaçladığı açık kaynak tasarısı \ Google Summer of Code'a 2008 ve 2009 olmak üzere iki kere katılmıştır. Bu organizasyona \ ilk katılan Türk tasarısı Pardus olmuştur. Bazı dönemlerde Pardus hakkındaki gelişmeleri \ halka duyurmak ve tasarıya olan ilgiyi arttırmak amacıyla CeBIT Eurasia Bilişim Fuarı'na \ katılım sağlanmaktadır. 2006, 2008, 2009, 2010, 2011,2013 ve 2014 bu fuarlarda Pardus \ standı kurulmuştur.2014 yılında ICT SummitT Now Bilişim Zirvesi'nde yer alınmıştır. \ BİLİŞİM’2014 TBD 31. Ulusal Bilişim Kurultayı ve CITEX’2014 Ankara Bilişim Fuarı’na \ Gümüş sponsorluk ile katkıda bulunulmuş ve Pardus standı kurulmuştur.", "question":"Pardus’un Google Summer of Code'a katıldığı yıllar nelerdir?" } get_answer(question["question"],question["context"]) ``` > 2008 ve 2009 ### Example 2 ```python question2={ "context":"II. Bayezid ve I. Selim devrinde yaşadı ve iki defa hekimbaşılık yaptı. \ Böbrek ve idrar kesesindeki taş oluşumunun nedenlerini ve tedavisini incelediği \ eseriyle tanınır. Adı kaynaklarda Ahmed ve Mahmud olarak da geçer. Ahi Çelebi \ olarak ün yapmıştır. Babası Tabib Mevlana Kemal ile birlikte 1463’te İstanbul’a yerleşti. \ Mevlana Kemal, devrin ünlü hekimlerindendir. Tebriz ya da Şirvan asıllı olduğu çeşitli \ kaynaklarda belirtilir. Ahi Mehmet Çelebi, hekimliği daha çok babasından öğrendi. Onun \ ölümünden sonra devrin önemli hekimleri Kutbüddin ile Altunîzâde’den ders alıp kısa zamanda \ mesleğini ilerletti. Hekimlik becerisinin yanı sıra kuramsal bilgisiyle de kendisini \ kabul ettirerek önce Fâtih Darüşşifasına hekim, sonra da başhekim oldu. II. Bayezid’in \ güvenini kazanarak mutfak eminliğine, ardından da Hekimbaşılığa getirildi. Dört buçuk \ yıl bu görevde kalan Ahî Çelebi, II. Bayezid’in ölümü üzerine geleneğe uyularak azledildi. \ Bir müddet sonra Yavuz onu tekrar Hekimbaşılığa getirdi ve Mısır seferine beraberinde \ götürdü. I. Selim'in ölümünden sonra Hekimbaşılık tan tekrar azledildi. Kaynakların \ belirttiğine göre, yaşı doksanı geçmiş olduğu halde, hacdan dönerken Kahire’de \ ölmüş ve İmam Şafi'nin kabri civarına defnedilmiştir.", "question":"Ahi Mehmet Çelebi hangi eseri ile tanınır?" } get_answer(question2["question"],question2["context"]) ``` > Böbrek ve idrar kesesindeki taş oluşumunun nedenlerini ve tedavisini incelediği eseriyle Created by Özcan Gündeş ✌️ --- Twitter: <a href="https://twitter.com/ozcangundes" target="blank"><img align="center" src="https://cdn.jsdelivr.net/npm/[email protected]/icons/twitter.svg" alt="ozcangundes" height="30" width="30" /></a> Linkedin: <a href="https://www.linkedin.com/in/%C3%B6zcan-g%C3%BCnde%C5%9F-7693055b/" target="blank"><img align="center" src="https://cdn.jsdelivr.net/npm/[email protected]/icons/linkedin.svg" alt="13198517" height="30" width="30" /></a> Medium: <a href="https://medium.com/@ozcangundes" target="blank"><img align="center" src="https://cdn.jsdelivr.net/npm/[email protected]/icons/medium.svg" alt="@ozcangundes" height="30" width="30" /></a> Github: <a href="https://github.com/ozcangundes" target="blank"><img align="center" src="https://cdn.jsdelivr.net/npm/[email protected]/icons/github.svg" alt="@ozcangundes" height="30" width="30" /></a>
ozcangundes/mt5-small-turkish-summarization
2021-03-22T08:10:49.000Z
[ "pytorch", "mt5", "seq2seq", "tr", "dataset:MLSUM", "arxiv:2004.14900", "transformers", "license:mit", "summarization", "pipeline_tag:summarization", "text2text-generation" ]
summarization
[ ".gitattributes", "README.md", "config.json", "pytorch_model.bin", "special_tokens_map.json", "spiece.model", "tokenizer_config.json" ]
ozcangundes
393
transformers
--- language: tr datasets: - MLSUM pipeline_tag: summarization license: MIT --- # mT5-small based Turkish Summarization System [Google's Multilingual T5-small](https://github.com/google-research/multilingual-t5) is fine-tuned on [MLSUM Turkish news dataset](https://github.com/recitalAI/MLSUM) for **Summarization** downstream task by using Pytorch Lightning.⚡ mT5 small model has 300 million parameters and model size is about 1.2GB. Therefore, it takes significant amount of time to fine tune it. The model is trained with 10 epochs, 8 batch size and 10e-4 learning rate. It took almost 4 hours. The max news length is kept as 784 and max summary length is determined as 64. **Important Note**: mT5 was only pre-trained on [mC4](https://www.tensorflow.org/datasets/catalog/c4#c4multilingual) excluding any supervised training. Therefore, the mT5 model has to be fine-tuned before it is useable on a downstream task. ## Dataset MLSUM dataset has more than 250K Turkish news with their related summaries. Since the mT5 model size and vocabulary is so large, 20K data is used for training and 4K data is used for validation. For more information about the dataset, please read this [great paper](https://arxiv.org/abs/2004.14900). ## Usage 🚀 ```python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("ozcangundes/mt5-small-turkish-summarization") model = AutoModelForSeq2SeqLM.from_pretrained("ozcangundes/mt5-small-turkish-summarization") def generate_summary(main_news): source_encoding=tokenizer( main_news, max_length=784, padding="max_length", truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt") generated_ids=model.generate( input_ids=source_encoding["input_ids"], attention_mask=source_encoding["attention_mask"], num_beams=2, max_length=120, repetition_penalty=2.5, length_penalty=2.0, early_stopping=True, use_cache=True ) preds=[tokenizer.decode(gen_id, skip_special_tokens=True, clean_up_tokenization_spaces=True) for gen_id in generated_ids] return "".join(preds) ``` ### Example 1 ```python main_news= "Final etabının üçüncü karşılaşması 29 Nisan Pazartesi günü saat 18.00 ’ de Burhan Felek Voleybol Salonu ’ nda oynanacak . Sezonu FIVB Kulüpler Dünya Şampiyonluğu ile açan ve CEV Avrupa Şampiyonlar Ligi'ni üçüncü olarak tamamlayan VakıfBank Kadın Voleybol Takımı , Vestel Venus Sultanlar Ligi final serisi ikinci maçında Eczacıbaşı VitrA'yı VakıfBank Spor Sarayı'nda 16-25 , 25-10 , 25-18 ve 25-17'lik setlerle 3-1 mağlup ederek seride durumu 1-1 ' e getirdi . İlk setini 25-16 kaybettiği karşılaşmanın ikinci setinde etkili servisler kullanan sarı-siyahlılar , teknik molasına 12-5 önde girdiği seti 25-10 almayı başardı . Etkili servis performansını üçüncü sette de sürdüren VakıfBank , teknik molasına 12-5 önde girdiği seti 25-18 alarak , karşılaşmada 2-1 öne geçti . Dördüncü sette rakibinin geri dönüşüne izin vermeyen VakıfBank , seti 25-17 , maçı da 3-1 kazanarak seride durumu eşitledi." generate_summary(main_news) #original summary -> "Vestel Venus Sultanlar Ligi final etabı ikinci karşılaşmasında VakıfBank kendi sahasında Eczacıbaşı VitrA'yı 3-1 mağlup etti ve seride durumu 1-1 ' e getirdi ." #output -> "CEV Avrupa Şampiyonlar Ligi'ni üçüncü olarak tamamlayan VakıfBank Kadın Voleybol Takımı, Vestel Venus Sultanlar Ligi final serisi ikinci maçında Eczacıbaşı VitrA'yı 3-1 mağlup ederek seride durumu 1-1'e getirdi." ``` ### Example 2 ```python main_news="2023'te yerli tank motoru : Bir taraftan da tankın motorunu yerlileştirmeye çalıştıklarını ifade eden Öztürk , şu değerlendirmelerde bulundu : `` Bin 500 beygirlik , şanzımanıyla beraber motoru yerlileştirmeye çalışıyoruz . Bu da bir aksilik çıkmazsa ilk tankımızın üzerine 2023'te koyacağız . Bundan sonra hiçbir ülkeye bağımlılığımız kalmadan bu araçları üretmeye devam edeceğiz . Sorumluluğumuzun ağır olduğunu biliyoruz . Ülkemize hizmet etmeye çalışıyoruz . Bunu daha da ileriye götürmek için elimizden gelen çabayı sarf ediyoruz . Ama bu tek başınıza yapılan bir operasyon değil . Türkiye'deki yerli firmalarla beraber ortaklaşa bu işi yürütmeye çalışıyoruz." generate_summary(main_news) #output -> "TÜRKİYE'de bir taraftan da tankın motorunu yerlileştirmeye çalıştıklarını belirten Öztürk, `` Bin 500 beygirlik, şanzımanıyla beraber motoru yerlileştirmeye çalışıyoruz. Bu da bir aksilik çıkmazsa ilk tankımızın üzerine 2023'te koyacağız.'' dedi." ``` Created by Özcan Gündeş ✌️ --- Twitter: <a href="https://twitter.com/ozcangundes" target="blank"><img align="center" src="https://cdn.jsdelivr.net/npm/[email protected]/icons/twitter.svg" alt="ozcangundes" height="30" width="30" /></a> Linkedin: <a href="https://www.linkedin.com/in/%C3%B6zcan-g%C3%BCnde%C5%9F-7693055b/" target="blank"><img align="center" src="https://cdn.jsdelivr.net/npm/[email protected]/icons/linkedin.svg" alt="13198517" height="30" width="30" /></a> Medium: <a href="https://medium.com/@ozcangundes" target="blank"><img align="center" src="https://cdn.jsdelivr.net/npm/[email protected]/icons/medium.svg" alt="@ozcangundes" height="30" width="30" /></a> Github: <a href="https://github.com/ozcangundes" target="blank"><img align="center" src="https://cdn.jsdelivr.net/npm/[email protected]/icons/github.svg" alt="@ozcangundes" height="30" width="30" /></a>
ozcangundes/wav2vec2-large-xlsr-53-turkish
2021-04-02T14:54:49.000Z
[ "pytorch", "wav2vec2", "tr", "dataset:common_voice", "transformers", "audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week", "license:apache-2.0" ]
automatic-speech-recognition
[ ".gitattributes", "README.md", "config.json", "preprocessor_config.json", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "vocab.json" ]
ozcangundes
9
transformers
--- language: - tr datasets: - common_voice metrics: - wer tags: - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: Ozcan Gundes XLSR Wav2Vec2 Large Turkish results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice tr type: common_voice args: tr metrics: - name: Test WER type: wer value: 29.62 --- # Wav2Vec2-Large-XLSR-53-Turkish Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Turkish using the [Common Voice](https://huggingface.co/datasets/common_voice). When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "tr", split="test[:2%]") processor = Wav2Vec2Processor.from_pretrained("ozcangundes/wav2vec2-large-xlsr-53-turkish") model = Wav2Vec2ForCTC.from_pretrained("ozcangundes/wav2vec2-large-xlsr-53-turkish") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): \\tspeech_array, sampling_rate = torchaudio.load(batch["path"]) \\tbatch["speech"] = resampler(speech_array).squeeze().numpy() \\treturn batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): \\tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation The model can be evaluated as follows on the Turkish test data of Common Voice. ```python import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re test_dataset = load_dataset("common_voice", "tr", split="test") wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("ozcangundes/wav2vec2-large-xlsr-53-turkish") model = Wav2Vec2ForCTC.from_pretrained("ozcangundes/wav2vec2-large-xlsr-53-turkish") model.to("cuda") chars_to_ignore_regex = '[\\,\\?\\.\\!\\-\\;\\:\\"\\“\\%\\‘\\”\\�\\’\\']' resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the aduio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 29.62 % ## Training The Common Voice `train` and `validation` datasets were used for training. The script used for training can be found [here](https://colab.research.google.com/drive/1hesw9z_kFFINT93jBvGuFspOLrHx10AE?usp=sharing)
p208p2002/bart-squad-nqg-hl
2021-05-03T03:17:28.000Z
[ "pytorch", "bart", "seq2seq", "dataset:squad", "arxiv:1606.05250", "arxiv:1705.00106", "transformers", "question-generation", "text2text-generation" ]
text2text-generation
[ ".gitattributes", "README.md", "added_tokens.json", "config.json", "merges.txt", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "vocab.json" ]
p208p2002
7
transformers
--- datasets: - squad tags: - question-generation widget: - text: "Harry Potter is a series of seven fantasy novels written by British author, [HL]J. K. Rowling[HL]." --- # Transformer QG on SQuAD HLQG is Proposed by [Ying-Hong Chan & Yao-Chung Fan. (2019). A Re-current BERT-based Model for Question Generation.](https://www.aclweb.org/anthology/D19-5821/) **This is a Reproduce Version** More detail: [p208p2002/Transformer-QG-on-SQuAD](https://github.com/p208p2002/Transformer-QG-on-SQuAD) ## Usage ### Input Format ``` C' = [c1, c2, ..., [HL], a1, ..., a|A|, [HL], ..., c|C|] ``` ### Input Example ``` Harry Potter is a series of seven fantasy novels written by British author, [HL]J. K. Rowling[HL]. ``` > # Who wrote Harry Potter? ## Data setting We report two dataset setting as Follow ### SQuAD - train: 87599\\\\t - validation: 10570 > [SQuAD: 100,000+ Questions for Machine Comprehension of Text](https://arxiv.org/abs/1606.05250) ### SQuAD NQG - train: 75722 - dev: 10570 - test: 11877 > [Learning to Ask: Neural Question Generation for Reading Comprehension](https://arxiv.org/abs/1705.00106) ## Available models - BART - GPT2 - T5 ## Expriments We report score with `NQG Scorer` which is using in SQuAD NQG. If not special explanation, the size of the model defaults to "base". ### SQuAD Model |Bleu 1|Bleu 2|Bleu 3|Bleu 4|METEOR|ROUGE-L| ---------------------------------|------|------|------|------|------|-------| BART-HLSQG |54.67 |39.26 |30.34 |24.15 |25.43 |52.64 | GPT2-HLSQG |49.31 |33.95 |25.41| 19.69 |22.29 |48.82 | T5-HLSQG |54.29 |39.22 |30.43 |24.26 |25.56 |53.11 | ### SQuAD NQG Model |Bleu 1|Bleu 2|Bleu 3|Bleu 4|METEOR|ROUGE-L| ---------------------------------|------|------|------|------|------|-------| BERT-HLSQG (Chan et al.) |49.73 |34.60 |26.13 |20.33 |23.88 |48.23 | BART-HLSQG |54.12 |38.19 |28.84 |22.35 |24.55 |51.03 | GPT2-HLSQG |49.82 |33.69 |24.71 |18.63 |21.90 |47.60 | T5-HLSQG |53.13 |37.60 |28.62 |22.38 |24.48 |51.20 |
p208p2002/bart-squad-qg-hl
2021-05-03T03:17:22.000Z
[ "pytorch", "bart", "seq2seq", "dataset:squad", "arxiv:1606.05250", "arxiv:1705.00106", "transformers", "question-generation", "text2text-generation" ]
text2text-generation
[ ".gitattributes", "README.md", "added_tokens.json", "config.json", "merges.txt", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "vocab.json" ]
p208p2002
497
transformers
--- datasets: - squad tags: - question-generation widget: - text: "Harry Potter is a series of seven fantasy novels written by British author, [HL]J. K. Rowling[HL]." --- # Transformer QG on SQuAD HLQG is Proposed by [Ying-Hong Chan & Yao-Chung Fan. (2019). A Re-current BERT-based Model for Question Generation.](https://www.aclweb.org/anthology/D19-5821/) **This is a Reproduce Version** More detail: [p208p2002/Transformer-QG-on-SQuAD](https://github.com/p208p2002/Transformer-QG-on-SQuAD) ## Usage ### Input Format ``` C' = [c1, c2, ..., [HL], a1, ..., a|A|, [HL], ..., c|C|] ``` ### Input Example ``` Harry Potter is a series of seven fantasy novels written by British author, [HL]J. K. Rowling[HL]. ``` > # Who wrote Harry Potter? ## Data setting We report two dataset setting as Follow ### SQuAD - train: 87599\\\\t - validation: 10570 > [SQuAD: 100,000+ Questions for Machine Comprehension of Text](https://arxiv.org/abs/1606.05250) ### SQuAD NQG - train: 75722 - dev: 10570 - test: 11877 > [Learning to Ask: Neural Question Generation for Reading Comprehension](https://arxiv.org/abs/1705.00106) ## Available models - BART - GPT2 - T5 ## Expriments We report score with `NQG Scorer` which is using in SQuAD NQG. If not special explanation, the size of the model defaults to "base". ### SQuAD Model |Bleu 1|Bleu 2|Bleu 3|Bleu 4|METEOR|ROUGE-L| ---------------------------------|------|------|------|------|------|-------| BART-HLSQG |54.67 |39.26 |30.34 |24.15 |25.43 |52.64 | GPT2-HLSQG |49.31 |33.95 |25.41| 19.69 |22.29 |48.82 | T5-HLSQG |54.29 |39.22 |30.43 |24.26 |25.56 |53.11 | ### SQuAD NQG Model |Bleu 1|Bleu 2|Bleu 3|Bleu 4|METEOR|ROUGE-L| ---------------------------------|------|------|------|------|------|-------| BERT-HLSQG (Chan et al.) |49.73 |34.60 |26.13 |20.33 |23.88 |48.23 | BART-HLSQG |54.12 |38.19 |28.84 |22.35 |24.55 |51.03 | GPT2-HLSQG |49.82 |33.69 |24.71 |18.63 |21.90 |47.60 | T5-HLSQG |53.13 |37.60 |28.62 |22.38 |24.48 |51.20 |
p208p2002/focus_rqg
2021-06-19T05:14:39.000Z
[ "pytorch", "bart", "seq2seq", "transformers", "text2text-generation" ]
text2text-generation
[ ".gitattributes", "added_tokens.json", "config.json", "merges.txt", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "vocab.json" ]
p208p2002
75
transformers
p208p2002/gpt2-drcd-qg-hl
2021-05-23T10:52:50.000Z
[ "pytorch", "jax", "gpt2", "lm-head", "causal-lm", "transformers", "text-generation" ]
text-generation
[ ".gitattributes", "README.md", "added_tokens.json", "config.json", "flax_model.msgpack", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "vocab.txt" ]
p208p2002
35
transformers
## Usage Please use BertTokenizerFast as tokenizer instead of AutoTokenizer. 請使用 BertTokenizerFast 而非 AutoTokenizer。 ``` from transformers import ( BertTokenizerFast, AutoModelForCausalLM, ) tokenizer = BertTokenizerFast.from_pretrained('p208p2002/gpt2-drcd-qg-hl') model = AutoModelForCausalLM.from_pretrained('p208p2002/gpt2-drcd-qg-hl') ``` ### Input Format ``` C' = [c1, c2, ..., [HL], a1, ..., a|A|, [HL], ..., c|C|] ``` ### Input Example ``` 哈利·波特是英國作家[HL]羅琳[HL]撰寫的七部幻想小說系列。 ``` > 誰撰寫哈利·波特?
p208p2002/gpt2-squad-nqg-hl
2021-05-23T10:53:50.000Z
[ "pytorch", "jax", "gpt2", "lm-head", "causal-lm", "dataset:squad", "arxiv:1606.05250", "arxiv:1705.00106", "transformers", "question-generation", "text-generation" ]
text-generation
[ ".gitattributes", "README.md", "added_tokens.json", "config.json", "flax_model.msgpack", "merges.txt", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "vocab.json" ]
p208p2002
16
transformers
--- datasets: - squad tags: - question-generation widget: - text: "Harry Potter is a series of seven fantasy novels written by British author, [HL]J. K. Rowling[HL]." --- # Transformer QG on SQuAD HLQG is Proposed by [Ying-Hong Chan & Yao-Chung Fan. (2019). A Re-current BERT-based Model for Question Generation.](https://www.aclweb.org/anthology/D19-5821/) **This is a Reproduce Version** More detail: [p208p2002/Transformer-QG-on-SQuAD](https://github.com/p208p2002/Transformer-QG-on-SQuAD) ## Usage ### Input Format ``` C' = [c1, c2, ..., [HL], a1, ..., a|A|, [HL], ..., c|C|] ``` ### Input Example ``` Harry Potter is a series of seven fantasy novels written by British author, [HL]J. K. Rowling[HL]. ``` > # Who wrote Harry Potter? ## Data setting We report two dataset setting as Follow ### SQuAD - train: 87599\\\\t - validation: 10570 > [SQuAD: 100,000+ Questions for Machine Comprehension of Text](https://arxiv.org/abs/1606.05250) ### SQuAD NQG - train: 75722 - dev: 10570 - test: 11877 > [Learning to Ask: Neural Question Generation for Reading Comprehension](https://arxiv.org/abs/1705.00106) ## Available models - BART - GPT2 - T5 ## Expriments We report score with `NQG Scorer` which is using in SQuAD NQG. If not special explanation, the size of the model defaults to "base". ### SQuAD Model |Bleu 1|Bleu 2|Bleu 3|Bleu 4|METEOR|ROUGE-L| ---------------------------------|------|------|------|------|------|-------| BART-HLSQG |54.67 |39.26 |30.34 |24.15 |25.43 |52.64 | GPT2-HLSQG |49.31 |33.95 |25.41| 19.69 |22.29 |48.82 | T5-HLSQG |54.29 |39.22 |30.43 |24.26 |25.56 |53.11 | ### SQuAD NQG Model |Bleu 1|Bleu 2|Bleu 3|Bleu 4|METEOR|ROUGE-L| ---------------------------------|------|------|------|------|------|-------| BERT-HLSQG (Chan et al.) |49.73 |34.60 |26.13 |20.33 |23.88 |48.23 | BART-HLSQG |54.12 |38.19 |28.84 |22.35 |24.55 |51.03 | GPT2-HLSQG |49.82 |33.69 |24.71 |18.63 |21.90 |47.60 | T5-HLSQG |53.13 |37.60 |28.62 |22.38 |24.48 |51.20 |
p208p2002/gpt2-squad-qg-hl
2021-05-23T10:54:57.000Z
[ "pytorch", "jax", "gpt2", "lm-head", "causal-lm", "dataset:squad", "arxiv:1606.05250", "arxiv:1705.00106", "transformers", "question-generation", "text-generation" ]
text-generation
[ ".gitattributes", "README.md", "added_tokens.json", "config.json", "flax_model.msgpack", "merges.txt", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "vocab.json" ]
p208p2002
58
transformers
--- datasets: - squad tags: - question-generation widget: - text: "Harry Potter is a series of seven fantasy novels written by British author, [HL]J. K. Rowling[HL]." --- # Transformer QG on SQuAD HLQG is Proposed by [Ying-Hong Chan & Yao-Chung Fan. (2019). A Re-current BERT-based Model for Question Generation.](https://www.aclweb.org/anthology/D19-5821/) **This is a Reproduce Version** More detail: [p208p2002/Transformer-QG-on-SQuAD](https://github.com/p208p2002/Transformer-QG-on-SQuAD) ## Usage ### Input Format ``` C' = [c1, c2, ..., [HL], a1, ..., a|A|, [HL], ..., c|C|] ``` ### Input Example ``` Harry Potter is a series of seven fantasy novels written by British author, [HL]J. K. Rowling[HL]. ``` > # Who wrote Harry Potter? ## Data setting We report two dataset setting as Follow ### SQuAD - train: 87599\\\\t - validation: 10570 > [SQuAD: 100,000+ Questions for Machine Comprehension of Text](https://arxiv.org/abs/1606.05250) ### SQuAD NQG - train: 75722 - dev: 10570 - test: 11877 > [Learning to Ask: Neural Question Generation for Reading Comprehension](https://arxiv.org/abs/1705.00106) ## Available models - BART - GPT2 - T5 ## Expriments We report score with `NQG Scorer` which is using in SQuAD NQG. If not special explanation, the size of the model defaults to "base". ### SQuAD Model |Bleu 1|Bleu 2|Bleu 3|Bleu 4|METEOR|ROUGE-L| ---------------------------------|------|------|------|------|------|-------| BART-HLSQG |54.67 |39.26 |30.34 |24.15 |25.43 |52.64 | GPT2-HLSQG |49.31 |33.95 |25.41| 19.69 |22.29 |48.82 | T5-HLSQG |54.29 |39.22 |30.43 |24.26 |25.56 |53.11 | ### SQuAD NQG Model |Bleu 1|Bleu 2|Bleu 3|Bleu 4|METEOR|ROUGE-L| ---------------------------------|------|------|------|------|------|-------| BERT-HLSQG (Chan et al.) |49.73 |34.60 |26.13 |20.33 |23.88 |48.23 | BART-HLSQG |54.12 |38.19 |28.84 |22.35 |24.55 |51.03 | GPT2-HLSQG |49.82 |33.69 |24.71 |18.63 |21.90 |47.60 | T5-HLSQG |53.13 |37.60 |28.62 |22.38 |24.48 |51.20 |
p208p2002/t5-squad-nqg-hl
2021-05-03T03:16:19.000Z
[ "pytorch", "t5", "seq2seq", "dataset:squad", "arxiv:1606.05250", "arxiv:1705.00106", "transformers", "question-generation", "text2text-generation" ]
text2text-generation
[ ".gitattributes", "README.md", "added_tokens.json", "config.json", "pytorch_model.bin", "special_tokens_map.json", "spiece.model", "tokenizer_config.json" ]
p208p2002
142
transformers
--- datasets: - squad tags: - question-generation widget: - text: "Harry Potter is a series of seven fantasy novels written by British author, [HL]J. K. Rowling[HL]." --- # Transformer QG on SQuAD HLQG is Proposed by [Ying-Hong Chan & Yao-Chung Fan. (2019). A Re-current BERT-based Model for Question Generation.](https://www.aclweb.org/anthology/D19-5821/) **This is a Reproduce Version** More detail: [p208p2002/Transformer-QG-on-SQuAD](https://github.com/p208p2002/Transformer-QG-on-SQuAD) ## Usage ### Input Format ``` C' = [c1, c2, ..., [HL], a1, ..., a|A|, [HL], ..., c|C|] ``` ### Input Example ``` Harry Potter is a series of seven fantasy novels written by British author, [HL]J. K. Rowling[HL]. ``` > # Who wrote Harry Potter? ## Data setting We report two dataset setting as Follow ### SQuAD - train: 87599\\t - validation: 10570 > [SQuAD: 100,000+ Questions for Machine Comprehension of Text](https://arxiv.org/abs/1606.05250) ### SQuAD NQG - train: 75722 - dev: 10570 - test: 11877 > [Learning to Ask: Neural Question Generation for Reading Comprehension](https://arxiv.org/abs/1705.00106) ## Available models - BART - GPT2 - T5 ## Expriments We report score with `NQG Scorer` which is using in SQuAD NQG. If not special explanation, the size of the model defaults to "base". ### SQuAD Model |Bleu 1|Bleu 2|Bleu 3|Bleu 4|METEOR|ROUGE-L| ---------------------------------|------|------|------|------|------|-------| BART-HLSQG |54.67 |39.26 |30.34 |24.15 |25.43 |52.64 | GPT2-HLSQG |49.31 |33.95 |25.41| 19.69 |22.29 |48.82 | T5-HLSQG |54.29 |39.22 |30.43 |24.26 |25.56 |53.11 | ### SQuAD NQG Model |Bleu 1|Bleu 2|Bleu 3|Bleu 4|METEOR|ROUGE-L| ---------------------------------|------|------|------|------|------|-------| BERT-HLSQG (Chan et al.) |49.73 |34.60 |26.13 |20.33 |23.88 |48.23 | BART-HLSQG |54.12 |38.19 |28.84 |22.35 |24.55 |51.03 | GPT2-HLSQG |49.82 |33.69 |24.71 |18.63 |21.90 |47.60 | T5-HLSQG |53.13 |37.60 |28.62 |22.38 |24.48 |51.20 |
p208p2002/t5-squad-qg-hl
2021-05-03T03:17:14.000Z
[ "pytorch", "t5", "seq2seq", "dataset:squad", "arxiv:1606.05250", "arxiv:1705.00106", "transformers", "question-generation", "text2text-generation" ]
text2text-generation
[ ".gitattributes", "README.md", "added_tokens.json", "config.json", "pytorch_model.bin", "special_tokens_map.json", "spiece.model", "tokenizer_config.json" ]
p208p2002
18
transformers
--- datasets: - squad tags: - question-generation widget: - text: "Harry Potter is a series of seven fantasy novels written by British author, [HL]J. K. Rowling[HL]." --- # Transformer QG on SQuAD HLQG is Proposed by [Ying-Hong Chan & Yao-Chung Fan. (2019). A Re-current BERT-based Model for Question Generation.](https://www.aclweb.org/anthology/D19-5821/) **This is a Reproduce Version** More detail: [p208p2002/Transformer-QG-on-SQuAD](https://github.com/p208p2002/Transformer-QG-on-SQuAD) ## Usage ### Input Format ``` C' = [c1, c2, ..., [HL], a1, ..., a|A|, [HL], ..., c|C|] ``` ### Input Example ``` Harry Potter is a series of seven fantasy novels written by British author, [HL]J. K. Rowling[HL]. ``` > # Who wrote Harry Potter? ## Data setting We report two dataset setting as Follow ### SQuAD - train: 87599\\\\t - validation: 10570 > [SQuAD: 100,000+ Questions for Machine Comprehension of Text](https://arxiv.org/abs/1606.05250) ### SQuAD NQG - train: 75722 - dev: 10570 - test: 11877 > [Learning to Ask: Neural Question Generation for Reading Comprehension](https://arxiv.org/abs/1705.00106) ## Available models - BART - GPT2 - T5 ## Expriments We report score with `NQG Scorer` which is using in SQuAD NQG. If not special explanation, the size of the model defaults to "base". ### SQuAD Model |Bleu 1|Bleu 2|Bleu 3|Bleu 4|METEOR|ROUGE-L| ---------------------------------|------|------|------|------|------|-------| BART-HLSQG |54.67 |39.26 |30.34 |24.15 |25.43 |52.64 | GPT2-HLSQG |49.31 |33.95 |25.41| 19.69 |22.29 |48.82 | T5-HLSQG |54.29 |39.22 |30.43 |24.26 |25.56 |53.11 | ### SQuAD NQG Model |Bleu 1|Bleu 2|Bleu 3|Bleu 4|METEOR|ROUGE-L| ---------------------------------|------|------|------|------|------|-------| BERT-HLSQG (Chan et al.) |49.73 |34.60 |26.13 |20.33 |23.88 |48.23 | BART-HLSQG |54.12 |38.19 |28.84 |22.35 |24.55 |51.03 | GPT2-HLSQG |49.82 |33.69 |24.71 |18.63 |21.90 |47.60 | T5-HLSQG |53.13 |37.60 |28.62 |22.38 |24.48 |51.20 |
pakupoko/bizlin-distil-model
2020-11-29T07:58:15.000Z
[ "pytorch", "distilbert", "question-answering", "transformers" ]
question-answering
[ ".gitattributes", "config.json", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_78b3253a26.model", "tokenizer_config.json", "training_args.bin", "vocab.txt" ]
pakupoko
10
transformers
panggi/t5-base-indonesian-summarization-cased
2020-12-19T17:57:55.000Z
[ "pytorch", "t5", "seq2seq", "id", "dataset:indosum", "transformers", "pipeline:summarization", "summarization", "text2text-generation" ]
summarization
[ ".gitattributes", "README.md", "config.json", "pytorch_model.bin", "special_tokens_map.json", "spiece.model", "tokenizer_config.json" ]
panggi
18
transformers
--- language: id tags: - pipeline:summarization - summarization - t5 datasets: - indosum --- # Indonesian T5 Summarization Base Model Finetuned T5 base summarization model for Indonesian. ## Finetuning Corpus `t5-base-indonesian-summarization-cased` model is based on `t5-base-bahasa-summarization-cased` by [huseinzol05](https://huggingface.co/huseinzol05), finetuned using [indosum](https://github.com/kata-ai/indosum) dataset. ## Load Finetuned Model ```python from transformers import T5Tokenizer, T5Model, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("panggi/t5-base-indonesian-summarization-cased") model = T5ForConditionalGeneration.from_pretrained("panggi/t5-base-indonesian-summarization-cased") ``` ## Code Sample ```python from transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("panggi/t5-base-indonesian-summarization-cased") model = T5ForConditionalGeneration.from_pretrained("panggi/t5-base-indonesian-summarization-cased") # https://www.sehatq.com/artikel/apa-itu-dispepsia-fungsional-ketahui-gejala-dan-faktor-risikonya ARTICLE_TO_SUMMARIZE = "Secara umum, dispepsia adalah kumpulan gejala pada saluran pencernaan seperti nyeri, sensasi terbakar, dan rasa tidak nyaman pada perut bagian atas. Pada beberapa kasus, dispepsia yang dialami seseorang tidak dapat diketahui penyebabnya. Jenis dispepsia ini disebut dengan dispepsia fungsional. Apa saja gejala dispepsia fungsional? Apa itu dispepsia fungsional? Dispepsia fungsional adalah kumpulan gejala tanpa sebab pada saluran pencernaan bagian atas. Gejala tersebut dapat berupa rasa sakit, nyeri, dan tak nyaman pada perut bagian atas atau ulu hati. Penderita dispepsia fungsional juga akan merasakan kenyang lebih cepat dan sensasi perut penuh berkepanjangan. Gejala-gejala tersebut bisa berlangsung selama sebulan atau lebih. Dispepsia ini memiliki nama “fungsional” karena kumpulan gejalanya tidak memiliki penyebab yang jelas. Dilihat dari fungsi dan struktur saluran pencernaan, dokter tidak menemukan hal yang salah. Namun, gejalanya bisa sangat mengganggu dan menyiksa. Dispepsia fungsional disebut juga dengan dispepsia nonulkus. Diperkirakan bahwa 20% masyarakat dunia menderita dispepsia fungsional. Kondisi ini berisiko tinggi dialami oleh wanita, perokok, dan orang yang mengonsumsi obat anti-peradangan nonsteroid (NSAID). Dispepsia fungsional bisa bersifat kronis dan mengganggu kehidupan penderitanya. Namun beruntung, ada beberapa strategi yang bisa diterapkan untuk mengendalikan gejala dispepsia ini. Strategi tersebut termasuk perubahan gaya hidup, obat-obatan, dan terapi.Ragam gejala dispepsia fungsional Gejala dispepsia fungsional dapat bervariasi antara satu pasien dengan pasien lain. Beberapa tanda yang bisa dirasakan seseorang, yaitu: Sensasi terbakar atau nyeri di saluran pencernaan bagian atas Perut kembung Cepat merasa kenyang walau baru makan sedikit Mual Muntah Bersendawa Rasa asam di mulut Penurunan berat badan Tekanan psikologis terkait dengan kondisi yang dialami Apa sebenarnya penyebab dispepsia fungsional? Sebagai penyakit fungsional, dokter mengkategorikan dispepsia ini sebagai penyakit yang tidak diketahui penyebabnya. Hanya saja, beberapa faktor bisa meningkatkan risiko seseorang terkena dispepsia fungsional. Faktor risiko tersebut, termasuk: Alergi terhadap zat tertentu Perubahan mikrobioma usus Infeksi, seperti yang dipicu oleh bakteriHelicobacter pylori Sekresi asam lambung yang tidak normal Peradangan pada saluran pencernaan bagian atas Gangguan pada fungsi lambung untuk mencerna makanan Pola makan tertentu Gaya hidup tidak sehat Stres Kecemasan atau depresi Efek samping pemakaian obat seperti obat antiinflamasi nonsteroid Penanganan untuk dispepsia fungsional Ada banyak pilihan pengobatan untuk dispepsia fungsional. Seperti yang disampaikan di atas, tidak ada penyebab tunggal dispepsia ini yang bisa diketahui. Gejala yang dialami antara satu pasien juga mungkin amat berbeda dari orang lain. Dengan demikian, jenis pengobatan dispepsia fungsional juga akan bervariasi. Beberapa pilihan strategi penanganan untuk dispepsia fungsional, meliputi: 1. Obat-obatan Ada beberapa jenis obat yang mungkin akan diberikan dokter, seperti Obat penetral asam lambung yang disebut penghambat reseptor H2 Obat penghambat produksi asam lambung yang disebut proton pump inhibitors Obat untuk mengendalikan gas di perut yang mengandung simetikon Antidepresan seperti amitriptyline Obat penguat kerongkongan yang disebut agen prokinetik Obat untuk pengosongan isi lambung seperti metoclopramide Antibiotik jika dokter mendeteksi adanya infeksi bakteri H. pylori 2. Anjuran terkait perubahan gaya hidup Selain obat-obatan, dokter akan memberikan rekomendasi perubahan gaya hidup yang harus diterapkan pasien. Tips terkait perubahan gaya hidup termasuk: Makan lebih sering namun dengan porsi yang lebih sedikit Menjauhi makanan berlemak karena memperlambat pengosongan makanan di lambung Menjauhi jenis makanan lain yang memicu gejala dispepsia, seperti makanan pedas, makanan tinggi asam, produk susu, dan produk kafein Menjauhi rokok Dokter juga akan meminta pasien untuk mencari cara untuk mengendalikan stres, tidur dengan kepala lebih tinggi, dan menjalankan usaha untuk mengendalikan berat badan. Apakah penyakit dispepsia itu berbahaya? Dispepsia, termasuk dispepsia fungsional, dapat menjadi kronis dengan gejala yang menyiksa. Jika tidak ditangani, dispepsia tentu dapat berbahaya dan mengganggu kehidupan pasien. Segera hubungi dokter apabila Anda merasakan gejala dispepsia, terlebih jika tidak merespons obat-obatan yang dijual bebas. Catatan dari SehatQ Dispepsia fungsional adalah kumpulan gejala pada saluran pencernaan bagian atas yang tidak diketahui penyebabnya. Dispepsia fungsional dapat ditangani dengan kombinasi obat-obatan dan perubahan gaya hidup. Jika masih memiliki pertanyaan terkait dispepsia fungsional, Anda bisa menanyakan ke dokter di aplikasi kesehatan keluarga SehatQ. Aplikasi SehatQ bisa diunduh gratis di Appstore dan Playstore yang berikan informasi penyakit terpercaya." # generate summary input_ids = tokenizer.encode(ARTICLE_TO_SUMMARIZE, return_tensors='pt') summary_ids = model.generate(input_ids, max_length=100, num_beams=2, repetition_penalty=2.5, length_penalty=1.0, early_stopping=True, no_repeat_ngram_size=2, use_cache=True) summary_text = tokenizer.decode(summary_ids[0], skip_special_tokens=True) print(summary_text) ``` Output: ``` 'Dispepsia fungsional adalah kumpulan gejala tanpa sebab pada saluran pencernaan bagian atas. Gejala tersebut dapat berupa rasa sakit, nyeri, dan tak nyaman pada perut bagian atas. Penderita dispepsia fungsional juga akan merasakan kenyang lebih cepat dan sensasi perut penuh berkepanjangan. Gejala-gejala tersebut bisa berlangsung selama sebulan atau lebih. ``` ## Acknowledgement Thanks to Immanuel Drexel for his article [Text Summarization, Extractive, T5, Bahasa Indonesia, Huggingface’s Transformers](https://medium.com/analytics-vidhya/text-summarization-t5-bahasa-indonesia-huggingfaces-transformers-ee9bfe368e2f)
panggi/t5-small-indonesian-summarization-cased
2020-12-19T18:01:23.000Z
[ "pytorch", "t5", "seq2seq", "id", "dataset:indosum", "transformers", "pipeline:summarization", "summarization", "text2text-generation" ]
summarization
[ ".gitattributes", "README.md", "config.json", "pytorch_model.bin", "special_tokens_map.json", "spiece.model", "tokenizer_config.json" ]
panggi
28
transformers
--- language: id tags: - pipeline:summarization - summarization - t5 datasets: - indosum --- # Indonesian T5 Summarization Small Model Finetuned T5 small summarization model for Indonesian. ## Finetuning Corpus `t5-small-indonesian-summarization-cased` model is based on `t5-small-bahasa-summarization-cased` by [huseinzol05](https://huggingface.co/huseinzol05), finetuned using [indosum](https://github.com/kata-ai/indosum) dataset. ## Load Finetuned Model ```python from transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("panggi/t5-small-indonesian-summarization-cased") model = T5ForConditionalGeneration.from_pretrained("panggi/t5-small-indonesian-summarization-cased") ``` ## Code Sample ```python from transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("panggi/t5-small-indonesian-summarization-cased") model = T5ForConditionalGeneration.from_pretrained("panggi/t5-small-indonesian-summarization-cased") # https://www.sehatq.com/artikel/apa-itu-dispepsia-fungsional-ketahui-gejala-dan-faktor-risikonya ARTICLE_TO_SUMMARIZE = "Secara umum, dispepsia adalah kumpulan gejala pada saluran pencernaan seperti nyeri, sensasi terbakar, dan rasa tidak nyaman pada perut bagian atas. Pada beberapa kasus, dispepsia yang dialami seseorang tidak dapat diketahui penyebabnya. Jenis dispepsia ini disebut dengan dispepsia fungsional. Apa saja gejala dispepsia fungsional? Apa itu dispepsia fungsional? Dispepsia fungsional adalah kumpulan gejala tanpa sebab pada saluran pencernaan bagian atas. Gejala tersebut dapat berupa rasa sakit, nyeri, dan tak nyaman pada perut bagian atas atau ulu hati. Penderita dispepsia fungsional juga akan merasakan kenyang lebih cepat dan sensasi perut penuh berkepanjangan. Gejala-gejala tersebut bisa berlangsung selama sebulan atau lebih. Dispepsia ini memiliki nama “fungsional” karena kumpulan gejalanya tidak memiliki penyebab yang jelas. Dilihat dari fungsi dan struktur saluran pencernaan, dokter tidak menemukan hal yang salah. Namun, gejalanya bisa sangat mengganggu dan menyiksa. Dispepsia fungsional disebut juga dengan dispepsia nonulkus. Diperkirakan bahwa 20% masyarakat dunia menderita dispepsia fungsional. Kondisi ini berisiko tinggi dialami oleh wanita, perokok, dan orang yang mengonsumsi obat anti-peradangan nonsteroid (NSAID). Dispepsia fungsional bisa bersifat kronis dan mengganggu kehidupan penderitanya. Namun beruntung, ada beberapa strategi yang bisa diterapkan untuk mengendalikan gejala dispepsia ini. Strategi tersebut termasuk perubahan gaya hidup, obat-obatan, dan terapi.Ragam gejala dispepsia fungsional Gejala dispepsia fungsional dapat bervariasi antara satu pasien dengan pasien lain. Beberapa tanda yang bisa dirasakan seseorang, yaitu: Sensasi terbakar atau nyeri di saluran pencernaan bagian atas Perut kembung Cepat merasa kenyang walau baru makan sedikit Mual Muntah Bersendawa Rasa asam di mulut Penurunan berat badan Tekanan psikologis terkait dengan kondisi yang dialami Apa sebenarnya penyebab dispepsia fungsional? Sebagai penyakit fungsional, dokter mengkategorikan dispepsia ini sebagai penyakit yang tidak diketahui penyebabnya. Hanya saja, beberapa faktor bisa meningkatkan risiko seseorang terkena dispepsia fungsional. Faktor risiko tersebut, termasuk: Alergi terhadap zat tertentu Perubahan mikrobioma usus Infeksi, seperti yang dipicu oleh bakteriHelicobacter pylori Sekresi asam lambung yang tidak normal Peradangan pada saluran pencernaan bagian atas Gangguan pada fungsi lambung untuk mencerna makanan Pola makan tertentu Gaya hidup tidak sehat Stres Kecemasan atau depresi Efek samping pemakaian obat seperti obat antiinflamasi nonsteroid Penanganan untuk dispepsia fungsional Ada banyak pilihan pengobatan untuk dispepsia fungsional. Seperti yang disampaikan di atas, tidak ada penyebab tunggal dispepsia ini yang bisa diketahui. Gejala yang dialami antara satu pasien juga mungkin amat berbeda dari orang lain. Dengan demikian, jenis pengobatan dispepsia fungsional juga akan bervariasi. Beberapa pilihan strategi penanganan untuk dispepsia fungsional, meliputi: 1. Obat-obatan Ada beberapa jenis obat yang mungkin akan diberikan dokter, seperti Obat penetral asam lambung yang disebut penghambat reseptor H2 Obat penghambat produksi asam lambung yang disebut proton pump inhibitors Obat untuk mengendalikan gas di perut yang mengandung simetikon Antidepresan seperti amitriptyline Obat penguat kerongkongan yang disebut agen prokinetik Obat untuk pengosongan isi lambung seperti metoclopramide Antibiotik jika dokter mendeteksi adanya infeksi bakteri H. pylori 2. Anjuran terkait perubahan gaya hidup Selain obat-obatan, dokter akan memberikan rekomendasi perubahan gaya hidup yang harus diterapkan pasien. Tips terkait perubahan gaya hidup termasuk: Makan lebih sering namun dengan porsi yang lebih sedikit Menjauhi makanan berlemak karena memperlambat pengosongan makanan di lambung Menjauhi jenis makanan lain yang memicu gejala dispepsia, seperti makanan pedas, makanan tinggi asam, produk susu, dan produk kafein Menjauhi rokok Dokter juga akan meminta pasien untuk mencari cara untuk mengendalikan stres, tidur dengan kepala lebih tinggi, dan menjalankan usaha untuk mengendalikan berat badan. Apakah penyakit dispepsia itu berbahaya? Dispepsia, termasuk dispepsia fungsional, dapat menjadi kronis dengan gejala yang menyiksa. Jika tidak ditangani, dispepsia tentu dapat berbahaya dan mengganggu kehidupan pasien. Segera hubungi dokter apabila Anda merasakan gejala dispepsia, terlebih jika tidak merespons obat-obatan yang dijual bebas. Catatan dari SehatQ Dispepsia fungsional adalah kumpulan gejala pada saluran pencernaan bagian atas yang tidak diketahui penyebabnya. Dispepsia fungsional dapat ditangani dengan kombinasi obat-obatan dan perubahan gaya hidup. Jika masih memiliki pertanyaan terkait dispepsia fungsional, Anda bisa menanyakan ke dokter di aplikasi kesehatan keluarga SehatQ. Aplikasi SehatQ bisa diunduh gratis di Appstore dan Playstore yang berikan informasi penyakit terpercaya." # generate summary input_ids = tokenizer.encode(ARTICLE_TO_SUMMARIZE, return_tensors='pt') summary_ids = model.generate(input_ids, max_length=100, num_beams=2, repetition_penalty=2.5, length_penalty=1.0, early_stopping=True, no_repeat_ngram_size=2, use_cache=True) summary_text = tokenizer.decode(summary_ids[0], skip_special_tokens=True) print(summary_text) ``` Output: ``` 'Dispepsia fungsional adalah kumpulan gejala tanpa sebab pada saluran pencernaan bagian atas. Gejala tersebut dapat berupa rasa sakit, nyeri, dan tak nyaman pada perut bagian atas. Penderita dispepsia fungsional juga akan merasakan kenyang lebih cepat dan sensasi perut penuh berkepanjangan. Gejala-gejala tersebut bisa berlangsung selama sebulan atau lebih. ``` ## Acknowledgement Thanks to Immanuel Drexel for his article [Text Summarization, Extractive, T5, Bahasa Indonesia, Huggingface’s Transformers](https://medium.com/analytics-vidhya/text-summarization-t5-bahasa-indonesia-huggingfaces-transformers-ee9bfe368e2f)
para-zhou/cunlp-bert-case-uncased
2021-05-20T02:17:20.000Z
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
[ ".gitattributes", "config.json", "flax_model.msgpack", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "training_args.bin", "vocab.txt" ]
para-zhou
22
transformers
para-zhou/cunlp-gpt2-dialog
2021-05-23T10:56:01.000Z
[ "pytorch", "jax", "gpt2", "lm-head", "causal-lm", "transformers", "text-generation" ]
text-generation
[ ".gitattributes", "config.json", "eval_results.txt", "flax_model.msgpack", "merges.txt", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "training_args.bin", "vocab.json" ]
para-zhou
17
transformers
patricklai14/tapt_citation
2021-05-20T19:15:14.000Z
[ "pytorch", "jax", "roberta", "masked-lm", "transformers", "fill-mask" ]
fill-mask
[ ".gitattributes", "config.json", "flax_model.msgpack", "pytorch_model.bin", "training_args.bin" ]
patricklai14
17
transformers
patrickvonplaten/bert-base-cased_fine_tuned_glue_mrpc_demo
2021-05-20T14:17:38.000Z
[ "jax", "bert", "text-classification", "en", "dataset:glue", "transformers", "license:apache-2.0" ]
text-classification
[ ".gitattributes", "README.md", "config.json", "flax_model.msgpack", "special_tokens_map.json", "tokenizer.json", "tokenizer_config.json", "vocab.txt" ]
patrickvonplaten
19
transformers
--- language: en license: apache-2.0 datasets: - glue --- # Bert-base-cased Fine Tuned Glue Mrpc Demo This checkpoint was initialized from the pre-trained checkpoint bert-base-cased and subsequently fine-tuned on GLUE task: mrpc using [this](https://colab.research.google.com/drive/162pW3wonGcMMrGxmA-jdxwy1rhqXd90x?usp=sharing) notebook. Training was conducted for 3 epochs, using a linear decaying learning rate of 2e-05, and a total batch size of 32. The model has a final training loss of 0.103 and a accuracy of 0.831.
patrickvonplaten/bert-testing
2021-05-20T02:17:51.000Z
[ "pytorch", "jax", "bert", "masked-lm", "transformers", "fill-mask" ]
fill-mask
[ ".gitattributes", "config.json", "flax_model.msgpack", "pytorch_model.bin" ]
patrickvonplaten
19
transformers
patrickvonplaten/bert2bert-cnn_dailymail-fp16
2020-12-12T11:22:49.000Z
[ "pytorch", "encoder-decoder", "seq2seq", "transformers", "text2text-generation" ]
text2text-generation
[ ".gitattributes", "README.md", "config.json", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "vocab.txt" ]
patrickvonplaten
193
transformers
# Bert2Bert Summarization with 🤗 EncoderDecoder Framework This model is a Bert2Bert model fine-tuned on summarization. Bert2Bert is a `EncoderDecoderModel`, meaning that both the encoder and the decoder are `bert-base-uncased` BERT models. Leveraging the [EncoderDecoderFramework](https://huggingface.co/transformers/model_doc/encoderdecoder.html#encoder-decoder-models), the two pretrained models can simply be loaded into the framework via: ```python bert2bert = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased") ``` The decoder of an `EncoderDecoder` model needs cross-attention layers and usually makes use of causal masking for auto-regressiv generation. Thus, ``bert2bert`` is consequently fined-tuned on the `CNN/Daily Mail`dataset and the resulting model `bert2bert-cnn_dailymail-fp16` is uploaded here. ## Example The model is by no means a state-of-the-art model, but nevertheless produces reasonable summarization results. It was mainly fine-tuned as a proof-of-concept for the 🤗 EncoderDecoder Framework. The model can be used as follows: ```python from transformers import BertTokenizer, EncoderDecoderModel model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16") tokenizer = BertTokenizer.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16") article = """(CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 1856, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which "369 went to war for the Confederate States and seven for the Union Army," the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 "colonies" seeking full membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on the fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for "health and safety incidents" since 2010. At least 30 of the incidents involved hazing, and dozens more involved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members allegedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a fraternity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. "The media has labeled us as the 'nation's deadliest fraternity,' " Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. "As a result, we are paying Lloyd's of London the highest insurance rates in the Greek-letter world," Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing incidents.""" input_ids = tokenizer(article, return_tensors="pt").input_ids output_ids = model.generate(input_ids) print(tokenizer.decode(output_ids[0], skip_special_tokens=True)) # should produce # sae was founded in 1856, five years before the civil war. the fraternity has had to work hard to change recently. the university of oklahoma president says the university's affiliation with the fraternity is permanently done. the sae has had a string of members in recent mon ths. ``` ## Training script: Please follow this tutorial to see how to warm-start a BERT2BERT model: https://colab.research.google.com/drive/1WIk2bxglElfZewOHboPFNj8H44_VAyKE?usp=sharing The obtained results should be: | - | Rouge2 - mid -precision | Rouge2 - mid - recall | Rouge2 - mid - fmeasure | |----------|:-------------:|:------:|:------:| | **CNN/Daily Mail** | 16.12 | 17.07 | **16.1** |
patrickvonplaten/bert2bert-tiny
2020-10-18T19:23:27.000Z
[ "pytorch", "encoder-decoder", "seq2seq", "transformers", "text2text-generation" ]
text2text-generation
[ ".gitattributes", "config.json", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "vocab.txt" ]
patrickvonplaten
45
transformers
patrickvonplaten/bert2bert_cnn_daily_mail
2020-12-11T21:59:13.000Z
[ "pytorch", "encoder-decoder", "seq2seq", "en", "dataset:cnn_dailymail", "transformers", "license:apache-2.0", "summarization", "text2text-generation" ]
summarization
[ ".gitattributes", ".lock", "README.md", "config.json", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json", "vocab.txt" ]
patrickvonplaten
3,206
transformers
--- language: en license: apache-2.0 datasets: - cnn_dailymail tags: - summarization --- Bert2Bert Summarization with 🤗EncoderDecoder Framework This model is a warm-started *BERT2BERT* model fine-tuned on the *CNN/Dailymail* summarization dataset. The model achieves a **18.22** ROUGE-2 score on *CNN/Dailymail*'s test dataset. For more details on how the model was fine-tuned, please refer to [this](https://colab.research.google.com/drive/1Ekd5pUeCX7VOrMx94_czTkwNtLN32Uyu?usp=sharing) notebook.
patrickvonplaten/bert2gpt2-cnn_dailymail-fp16
2020-12-11T21:59:16.000Z
[ "pytorch", "encoder_decoder", "seq2seq", "transformers", "text2text-generation" ]
text2text-generation
[ ".gitattributes", "README.md", "config.json", "pytorch_model.bin" ]
patrickvonplaten
212
transformers
# Bert2GPT2 Summarization with 🤗 EncoderDecoder Framework This model is a Bert2Bert model fine-tuned on summarization. Bert2GPT2 is a `EncoderDecoderModel`, meaning that the encoder is a `bert-base-uncased` BERT model and the decoder is a `gpt2` GPT2 model. Leveraging the [EncoderDecoderFramework](https://huggingface.co/transformers/model_doc/encoderdecoder.html#encoder-decoder-models), the two pretrained models can simply be loaded into the framework via: ```python bert2gpt2 = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "gpt2") ``` The decoder of an `EncoderDecoder` model needs cross-attention layers and usually makes use of causal masking for auto-regressiv generation. Thus, ``bert2gpt2`` is consequently fined-tuned on the `CNN/Daily Mail`dataset and the resulting model `bert2gpt2-cnn_dailymail-fp16` is uploaded here. ## Example The model is by no means a state-of-the-art model, but nevertheless produces reasonable summarization results. It was mainly fine-tuned as a proof-of-concept for the 🤗 EncoderDecoder Framework. The model can be used as follows: ```python from transformers import BertTokenizer, GPT2Tokenizer, EncoderDecoderModel model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16") # reuse tokenizer from bert2bert encoder-decoder model bert_tokenizer = BertTokenizer.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16") article = """(CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David B oren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 185 6, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which "369 went to war for the Confede rate States and seven for the Union Army," the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 "colonies" seeking fu ll membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on t he fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for "health and safety incidents" since 2010. At least 30 of the incidents involved hazing, and dozens more invol ved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members al legedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a frat ernity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. "The media has labeled us as the 'nation's deadliest fraternity, ' " Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. "As a result, we are paying Lloy d's of London the highest insurance rates in the Greek-letter world," Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing in cidents.""" input_ids = bert_tokenizer(article, return_tensors="pt").input_ids output_ids = model.generate(input_ids) # we need a gpt2 tokenizer for the output word embeddings gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2") print(gpt2_tokenizer.decode(output_ids[0], skip_special_tokens=True)) # should produce # SAE's national chapter suspended the students, but university president says it's permanent. # The fraternity has had to deal with a string of incidents since 2010. # SAE has more than 200,000 members, many of whom are students. # A student died while being coerced into drinking alcohol. ``` ## Training script: **IMPORTANT**: In order for this code to work, make sure you checkout to the branch [more_general_trainer_metric](https://github.com/huggingface/transformers/tree/more_general_trainer_metric), which slightly adapts the `Trainer` for `EncoderDecoderModels` according to this PR: https://github.com/huggingface/transformers/pull/5840. The following code shows the complete training script that was used to fine-tune `bert2gpt2-cnn_dailymail-fp16 ` for reproducability. The training last ~11h on a standard GPU. ```python #!/usr/bin/env python3 import nlp import logging from transformers import BertTokenizer, GPT2Tokenizer, EncoderDecoderModel, Trainer, TrainingArguments logging.basicConfig(level=logging.INFO) model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-cased", "gpt2") # cache is currently not supported by EncoderDecoder framework model.decoder.config.use_cache = False bert_tokenizer = BertTokenizer.from_pretrained("bert-base-cased") # CLS token will work as BOS token bert_tokenizer.bos_token = bert_tokenizer.cls_token # SEP token will work as EOS token bert_tokenizer.eos_token = bert_tokenizer.sep_token # make sure GPT2 appends EOS in begin and end def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): outputs = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] return outputs GPT2Tokenizer.build_inputs_with_special_tokens = build_inputs_with_special_tokens gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2") # set pad_token_id to unk_token_id -> be careful here as unk_token_id == eos_token_id == bos_token_id gpt2_tokenizer.pad_token = gpt2_tokenizer.unk_token # set decoding params model.config.decoder_start_token_id = gpt2_tokenizer.bos_token_id model.config.eos_token_id = gpt2_tokenizer.eos_token_id model.config.max_length = 142 model.config.min_length = 56 model.config.no_repeat_ngram_size = 3 model.early_stopping = True model.length_penalty = 2.0 model.num_beams = 4 # load train and validation data train_dataset = nlp.load_dataset("cnn_dailymail", "3.0.0", split="train") val_dataset = nlp.load_dataset("cnn_dailymail", "3.0.0", split="validation[:5%]") # load rouge for validation rouge = nlp.load_metric("rouge", experiment_id=1) encoder_length = 512 decoder_length = 128 batch_size = 16 # map data correctly def map_to_encoder_decoder_inputs(batch): # Tokenizer will automatically set [BOS] <text> [EOS] # use bert tokenizer here for encoder inputs = bert_tokenizer(batch["article"], padding="max_length", truncation=True, max_length=encoder_length) # force summarization <= 128 outputs = gpt2_tokenizer(batch["highlights"], padding="max_length", truncation=True, max_length=decoder_length) batch["input_ids"] = inputs.input_ids batch["attention_mask"] = inputs.attention_mask batch["decoder_input_ids"] = outputs.input_ids batch["labels"] = outputs.input_ids.copy() batch["decoder_attention_mask"] = outputs.attention_mask # complicated list comprehension here because pad_token_id alone is not good enough to know whether label should be excluded or not batch["labels"] = [ [-100 if mask == 0 else token for mask, token in mask_and_tokens] for mask_and_tokens in [zip(masks, labels) for masks, labels in zip(batch["decoder_attention_mask"], batch["labels"])] ] assert all([len(x) == encoder_length for x in inputs.input_ids]) assert all([len(x) == decoder_length for x in outputs.input_ids]) return batch def compute_metrics(pred): labels_ids = pred.label_ids pred_ids = pred.predictions # all unnecessary tokens are removed pred_str = gpt2_tokenizer.batch_decode(pred_ids, skip_special_tokens=True) labels_ids[labels_ids == -100] = gpt2_tokenizer.eos_token_id label_str = gpt2_tokenizer.batch_decode(labels_ids, skip_special_tokens=True) rouge_output = rouge.compute(predictions=pred_str, references=label_str, rouge_types=["rouge2"])["rouge2"].mid return { "rouge2_precision": round(rouge_output.precision, 4), "rouge2_recall": round(rouge_output.recall, 4), "rouge2_fmeasure": round(rouge_output.fmeasure, 4), } # make train dataset ready train_dataset = train_dataset.map( map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) train_dataset.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) # same for validation dataset val_dataset = val_dataset.map( map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) val_dataset.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) # set training arguments - these params are not really tuned, feel free to change training_args = TrainingArguments( output_dir="./", per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, predict_from_generate=True, evaluate_during_training=True, do_train=True, do_eval=True, logging_steps=1000, save_steps=1000, eval_steps=1000, overwrite_output_dir=True, warmup_steps=2000, save_total_limit=10, fp16=True, ) # instantiate trainer trainer = Trainer( model=model, args=training_args, compute_metrics=compute_metrics, train_dataset=train_dataset, eval_dataset=val_dataset, ) # start training trainer.train() ``` ## Evaluation The following script evaluates the model on the test set of CNN/Daily Mail. ```python #!/usr/bin/env python3 import nlp from transformers import BertTokenizer, GPT2Tokenizer, EncoderDecoderModel model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16") model.to("cuda") bert_tokenizer = BertTokenizer.from_pretrained("bert-base-cased") # CLS token will work as BOS token bert_tokenizer.bos_token = bert_tokenizer.cls_token # SEP token will work as EOS token bert_tokenizer.eos_token = bert_tokenizer.sep_token # make sure GPT2 appends EOS in begin and end def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): outputs = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] return outputs GPT2Tokenizer.build_inputs_with_special_tokens = build_inputs_with_special_tokens gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2") # set pad_token_id to unk_token_id -> be careful here as unk_token_id == eos_token_id == bos_token_id gpt2_tokenizer.pad_token = gpt2_tokenizer.unk_token # set decoding params model.config.decoder_start_token_id = gpt2_tokenizer.bos_token_id model.config.eos_token_id = gpt2_tokenizer.eos_token_id model.config.max_length = 142 model.config.min_length = 56 model.config.no_repeat_ngram_size = 3 model.early_stopping = True model.length_penalty = 2.0 model.num_beams = 4 test_dataset = nlp.load_dataset("cnn_dailymail", "3.0.0", split="test") batch_size = 64 # map data correctly def generate_summary(batch): # Tokenizer will automatically set [BOS] <text> [EOS] # cut off at BERT max length 512 inputs = bert_tokenizer(batch["article"], padding="max_length", truncation=True, max_length=512, return_tensors="pt") input_ids = inputs.input_ids.to("cuda") attention_mask = inputs.attention_mask.to("cuda") outputs = model.generate(input_ids, attention_mask=attention_mask) # all special tokens including will be removed output_str = gpt2_tokenizer.batch_decode(outputs, skip_special_tokens=True) batch["pred"] = output_str return batch results = test_dataset.map(generate_summary, batched=True, batch_size=batch_size, remove_columns=["article"]) # load rouge for validation rouge = nlp.load_metric("rouge") pred_str = results["pred"] label_str = results["highlights"] rouge_output = rouge.compute(predictions=pred_str, references=label_str, rouge_types=["rouge2"])["rouge2"].mid print(rouge_output) ``` The obtained results should be: | - | Rouge2 - mid -precision | Rouge2 - mid - recall | Rouge2 - mid - fmeasure | |----------|:-------------:|:------:|:------:| | **CNN/Daily Mail** | 14.42 | 16.99 | **15.16** |
patrickvonplaten/big-bird-base-trivia-qa
2021-03-04T12:13:47.000Z
[ "pytorch", "big_bird", "question-answering", "transformers" ]
question-answering
[ ".gitattributes", "config.json", "pytorch_model.bin", "special_tokens_map.json", "spiece.model", "tokenizer_config.json" ]
patrickvonplaten
24
transformers
patrickvonplaten/bigbird-large-trivia-itc
2021-03-22T13:15:15.000Z
[]
[ ".gitattributes" ]
patrickvonplaten
0
patrickvonplaten/bigbird-roberta-base-original-attn
2021-03-02T16:11:07.000Z
[ "pytorch", "big_bird", "masked-lm", "transformers", "fill-mask" ]
fill-mask
[ ".gitattributes", "config.json", "pytorch_model.bin", "special_tokens_map.json", "spiece.model", "tokenizer_config.json" ]
patrickvonplaten
23
transformers
patrickvonplaten/bigbird-roberta-large
2021-03-22T12:56:14.000Z
[ "pytorch", "big_bird", "pretraining", "transformers" ]
[ ".gitattributes", "config.json", "pytorch_model.bin", "special_tokens_map.json", "spiece.model", "tokenizer_config.json" ]
patrickvonplaten
12
transformers
patrickvonplaten/dummy_to_del
2021-05-26T11:23:40.000Z
[ "pytorch", "bert", "transformers" ]
[ ".gitattributes", "config.json", "pytorch_model.bin" ]
patrickvonplaten
8
transformers
patrickvonplaten/german-roberta-base
2021-05-20T19:15:34.000Z
[ "roberta", "masked-lm", "transformers", "fill-mask" ]
fill-mask
[ ".gitattributes", "config.json", "run_mlm.py", "run_mlm.sh", "tokenizer.json", "train_tokenizer.py" ]
patrickvonplaten
8
transformers
patrickvonplaten/hf-reformer-crime-and-punish
2020-05-11T11:10:52.000Z
[ "pytorch", "reformer", "lm-head", "causal-lm", "transformers", "text-generation" ]
text-generation
[ ".gitattributes", "config.json", "pytorch_model.bin", "spiece.model" ]
patrickvonplaten
58
transformers
patrickvonplaten/led-large-16384-pubmed
2021-01-11T15:42:53.000Z
[ "pytorch", "tf", "led", "seq2seq", "en", "dataset:scientific_papers", "transformers", "license:apache-2.0", "text2text-generation" ]
text2text-generation
[ ".gitattributes", "README.md", "config.json", "merges.txt", "pytorch_model.bin", "special_tokens_map.json", "tf_model.h5", "tokenizer_config.json", "vocab.json" ]
patrickvonplaten
168
transformers
--- language: en datasets: - scientific_papers license: apache-2.0 --- ## Introduction [Allenai's Longformer Encoder-Decoder (LED)](https://github.com/allenai/longformer#longformer). This is an unofficial *led-large-16384* checkpoint that is fine-tuned on the [pubmed dataset](https://huggingface.co/datasets/scientific_papers). The model was fine-tuned and evaluated as detailed in [this notebook](https://colab.research.google.com/drive/12LjJazBl7Gam0XBPy_y0CTOJZeZ34c2v?usp=sharing) ## Results The model achieves a **Rouge-2** score of 19.33 on Pubmed which is competitive to state-of-the-art models. ## Usage The model can be used as follows. The input is taken from the test data of the [pubmed dataset](https://huggingface.co/datasets/scientific_papers). ```python LONG_ARTICLE = """"anxiety affects quality of life in those living with parkinson 's disease ( pd ) more so than overall cognitive status , motor deficits , apathy , and depression [ 13 ] . although anxiety and depression are often related and coexist in pd patients , recent research suggests that anxiety rather than depression is the most prominent and prevalent mood disorder in pd [ 5 , 6 ] . yet , our current understanding of anxiety and its impact on cognition in pd , as well as its neural basis and best treatment practices , remains meager and lags far behind that of depression . overall , neuropsychiatric symptoms in pd have been shown to be negatively associated with cognitive performance . for example , higher depression scores have been correlated with lower scores on the mini - mental state exam ( mmse ) [ 8 , 9 ] as well as tests of memory and executive functions ( e.g. , attention ) [ 1014 ] . likewise , apathy and anhedonia in pd patients have been associated with executive dysfunction [ 10 , 1523 ] . however , few studies have specifically investigated the relationship between anxiety and cognition in pd . one study showed a strong negative relationship between anxiety ( both state and trait ) and overall cognitive performance ( measured by the total of the repeatable battery for the assessment of neuropsychological status index ) within a sample of 27 pd patients . furthermore , trait anxiety was negatively associated with each of the cognitive domains assessed by the rbans ( i.e. , immediate memory , visuospatial construction , language , attention , and delayed memory ) . two further studies have examined whether anxiety differentially affects cognition in patients with left - sided dominant pd ( lpd ) versus right - sided dominant pd ( rpd ) ; however , their findings were inconsistent . the first study found that working memory performance was worse in lpd patients with anxiety compared to rpd patients with anxiety , whereas the second study reported that , in lpd , apathy but not anxiety was associated with performance on nonverbally mediated executive functions and visuospatial tasks ( e.g. , tmt - b , wms - iii spatial span ) , while in rpd , anxiety but not apathy significantly correlated with performance on verbally mediated tasks ( e.g. , clock reading test and boston naming test ) . furthermore , anxiety was significantly correlated with neuropsychological measures of attention and executive and visuospatial functions . taken together , it is evident that there are limited and inconsistent findings describing the relationship between anxiety and cognition in pd and more specifically how anxiety might influence particular domains of cognition such as attention and memory and executive functioning . it is also striking that , to date , no study has examined the influence of anxiety on cognition in pd by directly comparing groups of pd patients with and without anxiety while excluding depression . given that research on healthy young adults suggests that anxiety reduces processing capacity and impairs processing efficiency , especially in the central executive and attentional systems of working memory [ 26 , 27 ] , we hypothesized that pd patients with anxiety would show impairments in attentional set - shifting and working memory compared to pd patients without anxiety . furthermore , since previous work , albeit limited , has focused on the influence of symptom laterality on anxiety and cognition , we also explored this relationship . seventeen pd patients with anxiety and thirty - three pd patients without anxiety were included in this study ( see table 1 ) . the cross - sectional data from these participants was taken from a patient database that has been compiled over the past 8 years ( since 2008 ) at the parkinson 's disease research clinic at the brain and mind centre , university of sydney . inclusion criteria involved a diagnosis of idiopathic pd according to the united kingdom parkinson 's disease society brain bank criteria and were confirmed by a neurologist ( sjgl ) . patients also had to have an adequate proficiency in english and have completed a full neuropsychological assessment . ten patients in this study ( 5 pd with anxiety ; 5 pd without anxiety ) were taking psychotropic drugs ( i.e. , benzodiazepine or selective serotonin reuptake inhibitor ) . patients were also excluded if they had other neurological disorders , psychiatric disorders other than affective disorders ( such as anxiety ) , or if they reported a score greater than six on the depression subscale of the hospital anxiety and depression scale ( hads ) . thus , all participants who scored within a depressed ( hads - d > 6 ) range were excluded from this study , in attempt to examine a refined sample of pd patients with and without anxiety in order to determine the independent effect of anxiety on cognition . this research was approved by the human research ethics committee of the university of sydney , and written informed consent was obtained from all participants . self - reported hads was used to assess anxiety in pd and has been previously shown to be a useful measure of clinical anxiety in pd . a cut - off score of > 8 on the anxiety subscale of the hads ( hads - a ) was used to identify pd cases with anxiety ( pda+ ) , while a cut - off score of < 6 on the hads - a was used to identify pd cases without anxiety ( pda ) . this criterion was more stringent than usual ( > 7 cut - off score ) , in effort to create distinct patient groups . the neurological evaluation rated participants according to hoehn and yahr ( h&y ) stages and assessed their motor symptoms using part iii of the revised mds task force unified parkinson 's disease rating scale ( updrs ) . in a similar way this was determined by calculating a total left and right score from rigidity items 3035 , voluntary movement items 3643 , and tremor items 5057 from the mds - updrs part iii ( see table 1 ) . processing speed was assessed using the trail making test , part a ( tmt - a , z - score ) . attentional set - shifting was measured using the trail making test , part b ( tmt - b , z - score ) . working memory was assessed using the digit span forward and backward subtest of the wechsler memory scale - iii ( raw scores ) . language was assessed with semantic and phonemic verbal fluency via the controlled oral word associated test ( cowat animals and letters , z - score ) . the ability to retain learned verbal memory was assessed using the logical memory subtest from the wechsler memory scale - iii ( lm - i z - score , lm - ii z - score , % lm retention z - score ) . the mini - mental state examination ( mmse ) demographic , clinical , and neuropsychological variables were compared between the two groups with the independent t - test or mann whitney u test , depending on whether the variable met parametric assumptions . chi - square tests were used to examine gender and symptom laterality differences between groups . all analyses employed an alpha level of p < 0.05 and were two - tailed . spearman correlations were performed separately in each group to examine associations between anxiety and/or depression ratings and cognitive functions . as expected , the pda+ group reported significant greater levels of anxiety on the hads - a ( u = 0 , p < 0.001 ) and higher total score on the hads ( u = 1 , p < 0.001 ) compared to the pda group ( table 1 ) . groups were matched in age ( t(48 ) = 1.31 , p = 0.20 ) , disease duration ( u = 259 , p = 0.66 ) , updrs - iii score ( u = 250.5 , p = 0.65 ) , h&y ( u = 245 , p = 0.43 ) , ledd ( u = 159.5 , p = 0.80 ) , and depression ( hads - d ) ( u = 190.5 , p = 0.06 ) . additionally , all groups were matched in the distribution of gender ( = 0.098 , p = 0.75 ) and side - affected ( = 0.765 , p = 0.38 ) . there were no group differences for tmt - a performance ( u = 256 , p = 0.62 ) ( table 2 ) ; however , the pda+ group had worse performance on the trail making test part b ( t(46 ) = 2.03 , p = 0.048 ) compared to the pda group ( figure 1 ) . the pda+ group also demonstrated significantly worse performance on the digit span forward subtest ( t(48 ) = 2.22 , p = 0.031 ) and backward subtest ( u = 190.5 , p = 0.016 ) compared to the pda group ( figures 2(a ) and 2(b ) ) . neither semantic verbal fluency ( t(47 ) = 0.70 , p = 0.49 ) nor phonemic verbal fluency ( t(47 ) = 0.39 , p = 0.70 ) differed between groups . logical memory i immediate recall test ( u = 176 , p = 0.059 ) showed a trend that the pda+ group had worse new verbal learning and immediate recall abilities than the pda group . however , logical memory ii test performance ( u = 219 , p = 0.204 ) and logical memory % retention ( u = 242.5 , p = 0.434 ) did not differ between groups . there were also no differences between groups in global cognition ( mmse ) ( u = 222.5 , p = 0.23 ) . participants were split into lpd and rpd , and then further group differences were examined between pda+ and pda. importantly , the groups remained matched in age , disease duration , updrs - iii , dde , h&y stage , and depression but remained significantly different on self - reported anxiety . lpda+ demonstrated worse performance on the digit span forward test ( t(19 ) = 2.29 , p = 0.033 ) compared to lpda , whereas rpda+ demonstrated worse performance on the digit span backward test ( u = 36.5 , p = 0.006 ) , lm - i immediate recall ( u = 37.5 , p = 0.008 ) , and lm - ii ( u = 45.0 , p = 0.021 ) but not lm % retention ( u = 75.5 , p = 0.39 ) compared to rpda. this study is the first to directly compare cognition between pd patients with and without anxiety . the findings confirmed our hypothesis that anxiety negatively influences attentional set - shifting and working memory in pd . more specifically , we found that pd patients with anxiety were more impaired on the trail making test part b which assessed attentional set - shifting , on both digit span tests which assessed working memory and attention , and to a lesser extent on the logical memory test which assessed memory and new verbal learning compared to pd patients without anxiety . taken together , these findings suggest that anxiety in pd may reduce processing capacity and impair processing efficiency , especially in the central executive and attentional systems of working memory in a similar way as seen in young healthy adults [ 26 , 27 ] . although the neurobiology of anxiety in pd remains unknown , many researchers have postulated that anxiety disorders are related to neurochemical changes that occur during the early , premotor stages of pd - related degeneration [ 37 , 38 ] such as nigrostriatal dopamine depletion , as well as cell loss within serotonergic and noradrenergic brainstem nuclei ( i.e. , raphe nuclei and locus coeruleus , resp . , which provide massive inputs to corticolimbic regions ) . over time , chronic dysregulation of adrenocortical and catecholamine functions can lead to hippocampal damage as well as dysfunctional prefrontal neural circuitries [ 39 , 40 ] , which play a key role in memory and attention . recent functional neuroimaging work has suggested that enhanced hippocampal activation during executive functioning and working memory tasks may represent compensatory processes for impaired frontostriatal functions in pd patients compared to controls . therefore , chronic stress from anxiety , for example , may disrupt compensatory processes in pd patients and explain the cognitive impairments specifically in working memory and attention seen in pd patients with anxiety . it has also been suggested that hyperactivation within the putamen may reflect a compensatory striatal mechanism to maintain normal working memory performance in pd patients ; however , losing this compensatory activation has been shown to contribute to poor working memory performance . anxiety in mild pd has been linked to reduced putamen dopamine uptake which becomes more extensive as the disease progresses . this further supports the notion that anxiety may disrupt compensatory striatal mechanisms as well , providing another possible explanation for the cognitive impairments observed in pd patients with anxiety in this study . noradrenergic and serotonergic systems should also be considered when trying to explain the mechanisms by which anxiety may influence cognition in pd . although these neurotransmitter systems are relatively understudied in pd cognition , treating the noradrenergic and serotonergic systems has shown beneficial effects on cognition in pd . selective serotonin reuptake inhibitor , citalopram , was shown to improve response inhibition deficits in pd , while noradrenaline reuptake blocker , atomoxetine , has been recently reported to have promising effects on cognition in pd [ 45 , 46 ] . overall , very few neuroimaging studies have been conducted in pd in order to understand the neural correlates of pd anxiety and its underlying neural pathology . future research should focus on relating anatomical changes and neurochemical changes to neural activation in order to gain a clearer understanding on how these pathologies affect anxiety in pd . to further understand how anxiety and cognitive dysfunction are related , future research should focus on using advanced structural and function imaging techniques to explain both cognitive and neural breakdowns that are associated with anxiety in pd patients . research has indicated that those with amnestic mild cognitive impairment who have more neuropsychiatric symptoms have a greater risk of developing dementia compared to those with fewer neuropsychiatric symptoms . future studies should also examine whether treating neuropsychiatric symptoms might impact the progression of cognitive decline and improve cognitive impairments in pd patients . previous studies have used pd symptom laterality as a window to infer asymmetrical dysfunction of neural circuits . for example , lpd patients have greater inferred right hemisphere pathology , whereas rpd patients have greater inferred left hemisphere pathology . thus , cognitive domains predominantly subserved by the left hemisphere ( e.g. , verbally mediated tasks of executive function and verbal memory ) might be hypothesized to be more affected in rpd than lpd ; however , this remains controversial . it has also been suggested that since anxiety is a common feature of left hemisphere involvement [ 48 , 49 ] , cognitive domains subserved by the left hemisphere may also be more strongly related to anxiety . results from this study showed selective verbal memory deficits in rpd patients with anxiety compared to rpd without anxiety , whereas lpd patients with anxiety had greater attentional / working memory deficits compared to lpd without anxiety . although these results align with previous research , interpretations of these findings should be made with caution due to the small sample size in the lpd comparison specifically . recent work has suggested that the hads questionnaire may underestimate the burden of anxiety related symptomology and therefore be a less sensitive measure of anxiety in pd [ 30 , 50 ] . in addition , our small sample size also limited the statistical power for detecting significant findings . based on these limitations , our findings are likely conservative and underrepresent the true impact anxiety has on cognition in pd . additionally , the current study employed a very brief neuropsychological assessment including one or two tests for each cognitive domain . future studies are encouraged to collect a more complex and comprehensive battery from a larger sample of pd participants in order to better understand the role anxiety plays on cognition in pd . another limitation of this study was the absence of diagnostic interviews to characterize participants ' psychiatric symptoms and specify the type of anxiety disorders included in this study . future studies should perform diagnostic interviews with participants ( e.g. , using dsm - v criteria ) rather than relying on self - reported measures to group participants , in order to better understand whether the type of anxiety disorder ( e.g. , social anxiety , phobias , panic disorders , and generalized anxiety ) influences cognitive performance differently in pd . one advantage the hads questionnaire provided over other anxiety scales was that it assessed both anxiety and depression simultaneously and allowed us to control for coexisting depression . although there was a trend that the pda+ group self - reported higher levels of depression than the pda group , all participants included in the study scored < 6 on the depression subscale of the hads . controlling for depression while assessing anxiety has been identified as a key shortcoming in the majority of recent work . considering many previous studies have investigated the influence of depression on cognition in pd without accounting for the presence of anxiety and the inconsistent findings reported to date , we recommend that future research should try to disentangle the influence of anxiety versus depression on cognitive impairments in pd . considering the growing number of clinical trials for treating depression , there are few if any for the treatment of anxiety in pd . anxiety is a key contributor to decreased quality of life in pd and greatly requires better treatment options . moreover , anxiety has been suggested to play a key role in freezing of gait ( fog ) , which is also related to attentional set - shifting [ 52 , 53 ] . future research should examine the link between anxiety , set - shifting , and fog , in order to determine whether treating anxiety might be a potential therapy for improving fog .""" from transformers import LEDForConditionalGeneration, LEDTokenizer import torch tokenizer = LEDTokenizer.from_pretrained("patrickvonplaten/led-large-16384-pubmed") input_ids = tokenizer(LONG_ARTICLE, return_tensors="pt").input_ids.to("cuda") global_attention_mask = torch.zeros_like(input_ids) # set global_attention_mask on first token global_attention_mask[:, 0] = 1 model = LEDForConditionalGeneration.from_pretrained("patrickvonplaten/led-large-16384-pubmed", return_dict_in_generate=True).to("cuda") sequences = model.generate(input_ids, global_attention_mask=global_attention_mask).sequences summary = tokenizer.batch_decode(sequences) ```
patrickvonplaten/longformer-random-tiny
2020-08-05T09:22:23.000Z
[ "pytorch", "tf", "longformer", "transformers" ]
[ ".gitattributes", "config.json", "pytorch_model.bin", "tf_model.h5" ]
patrickvonplaten
12,361
transformers
patrickvonplaten/longformer2roberta-cnn_dailymail-fp16
2020-12-11T21:59:19.000Z
[ "pytorch", "encoder_decoder", "seq2seq", "transformers", "text2text-generation" ]
text2text-generation
[ ".gitattributes", "README.md", "config.json", "pytorch_model.bin" ]
patrickvonplaten
1,263
transformers
# Longformer2Roberta Summarization with 🤗 EncoderDecoder Framework This model is a Longformer2Roberta model fine-tuned on summarization. Longformer2Roberta is a `EncoderDecoderModel`, meaning that both the encoder is a `allenai/longformer-base-4096` model and the decoder is a `roberta-base` model. Leveraging the [EncoderDecoderFramework](https://huggingface.co/transformers/model_doc/encoderdecoder.html#encoder-decoder-models), the two pretrained models can simply be loaded into the framework via: ```python roberta2roberta = EncoderDecoderModel.from_encoder_decoder_pretrained("allenai/longformer-base-4096", "roberta-base") ``` The decoder of an `EncoderDecoder` model needs cross-attention layers and usually makes use of causal masking for auto-regressiv generation. Thus, ``longformer2roberta`` is consequently fined-tuned on the `CNN/Daily Mail`dataset and the resulting model `longformer2roberta-cnn_dailymail-fp16` is uploaded here. ## Example The model is by no means a state-of-the-art model, but nevertheless produces reasonable summarization results. It was mainly fine-tuned as a proof-of-concept for the 🤗 EncoderDecoder Framework. The model can be used as follows: ```python from transformers import LongformerTokenizer, EncoderDecoderModel model = EncoderDecoderModel.from_pretrained("patrickvonplaten/longformer2roberta-cnn_dailymail-fp16") tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096") article = """(CNN)James Holmes made his introduction to the world in a Colorado cinema filled with spectators watching a midnight showing of the new Batman movie, "The Dark Knight Rises," in June 2012. The moment became one of the deadliest shootings in U.S. history. Holmes is accused of opening fire on the crowd, killing 12 people and injuring or maiming 70 others in Aurora, a suburb of Denver. Holmes appeared like a comic book character: He resembled the Joker, with red-orange hair, similar to the late actor Heath Ledger\'s portrayal of the villain in an earlier Batman movie, authorities said. But Holmes was hardly a cartoon. Authorities said he wore body armor and carried several guns, including an AR-15 rifle, with lots of ammo. He also wore a gas mask. Holmes says he was insane at the time of the shootings, and that is his legal defense and court plea: not guilty by reason of insanity. Prosecutors aren\'t swayed and will seek the death penalty. Opening statements in his trial are scheduled to begin Monday. Holmes admits to the shootings but says he was suffering "a psychotic episode" at the time, according to court papers filed in July 2013 by the state public defenders, Daniel King and Tamara A. Brady. Evidence "revealed thus far in the case supports the defense\'s position that Mr. Holmes suffers from a severe mental illness and was in the throes of a psychotic episode when he committed the acts that resulted in the tragic loss of life and injuries sustained by moviegoers on July 20, 2012," the public defenders wrote. Holmes no longer looks like a dazed Joker, as he did in his first appearance before a judge in 2012. He appeared dramatically different in January when jury selection began for his trial: 9,000 potential jurors were summoned for duty, described as one of the nation\'s largest jury calls. Holmes now has a cleaner look, with a mustache, button-down shirt and khaki pants. In January, he had a beard and eyeglasses. If this new image sounds like one of an academician, it may be because Holmes, now 27, once was one. Just before the shooting, Holmes was a doctoral student in neuroscience, and he was studying how the brain works, with his schooling funded by a U.S. government grant. Yet for all his learning, Holmes apparently lacked the capacity to command his own mind, according to the case against him. A jury will ultimately decide Holmes\' fate. That panel is made up of 12 jurors and 12 alternates. They are 19 women and five men, and almost all are white and middle-aged. The trial could last until autumn. When jury summonses were issued in January, each potential juror stood a 0.2% chance of being selected, District Attorney George Brauchler told the final jury this month. He described the approaching trial as "four to five months of a horrible roller coaster through the worst haunted house you can imagine." The jury will have to render verdicts on each of the 165 counts against Holmes, including murder and attempted murder charges. Meanwhile, victims and their relatives are challenging all media outlets "to stop the gratuitous use of the name and likeness of mass killers, thereby depriving violent individuals the media celebrity and media spotlight they so crave," the No Notoriety group says. They are joined by victims from eight other mass shootings in recent U.S. history. Raised in central coastal California and in San Diego, James Eagan Holmes is the son of a mathematician father noted for his work at the FICO firm that provides credit scores and a registered nurse mother, according to the U-T San Diego newspaper. Holmes also has a sister, Chris, a musician, who\'s five years younger, the newspaper said. His childhood classmates remember him as a clean-cut, bespectacled boy with an "exemplary" character who "never gave any trouble, and never got in trouble himself," The Salinas Californian reported. His family then moved down the California coast, where Holmes grew up in the San Diego-area neighborhood of Rancho Peñasquitos, which a neighbor described as "kind of like Mayberry," the San Diego newspaper said. Holmes attended Westview High School, which says its school district sits in "a primarily middle- to upper-middle-income residential community." There, Holmes ran cross-country, played soccer and later worked at a biotechnology internship at the Salk Institute and Miramar College, which attracts academically talented students. By then, his peers described him as standoffish and a bit of a wiseacre, the San Diego newspaper said. Holmes attended college fairly close to home, in a neighboring area known as Southern California\'s "inland empire" because it\'s more than an hour\'s drive from the coast, in a warm, low-desert climate. He entered the University of California, Riverside, in 2006 as a scholarship student. In 2008 he was a summer camp counselor for disadvantaged children, age 7 to 14, at Camp Max Straus, run by Jewish Big Brothers Big Sisters of Los Angeles. He graduated from UC Riverside in 2010 with the highest honors and a bachelor\'s degree in neuroscience. "Academically, he was at the top of the top," Chancellor Timothy P. White said. He seemed destined for even higher achievement. By 2011, he had enrolled as a doctoral student in the neuroscience program at the University of Colorado Anschutz Medical Campus in Aurora, the largest academic health center in the Rocky Mountain region. The doctoral in neuroscience program attended by Holmes focuses on how the brain works, with an emphasis on processing of information, behavior, learning and memory. Holmes was one of six pre-thesis Ph.D. students in the program who were awarded a neuroscience training grant from the National Institutes of Health. The grant rewards outstanding neuroscientists who will make major contributions to neurobiology. A syllabus that listed Holmes as a student at the medical school shows he was to have delivered a presentation about microRNA biomarkers. But Holmes struggled, and his own mental health took an ominous turn. In March 2012, he told a classmate he wanted to kill people, and that he would do so "when his life was over," court documents said. Holmes was "denied access to the school after June 12, 2012, after he made threats to a professor," according to court documents. About that time, Holmes was a patient of University of Colorado psychiatrist Lynne Fenton. Fenton was so concerned about Holmes\' behavior that she mentioned it to her colleagues, saying he could be a danger to others, CNN affiliate KMGH-TV reported, citing sources with knowledge of the investigation. Fenton\'s concerns surfaced in early June, sources told the Denver station. Holmes began to fantasize about killing "a lot of people" in early June, nearly six weeks before the shootings, the station reported, citing unidentified sources familiar with the investigation. Holmes\' psychiatrist contacted several members of a "behavioral evaluation and threat assessment" team to say Holmes could be a danger to others, the station reported. At issue was whether to order Holmes held for 72 hours to be evaluated by mental health professionals, the station reported. "Fenton made initial phone calls about engaging the BETA team" in "the first 10 days" of June, but it "never came together" because in the period Fenton was having conversations with team members, Holmes began the process of dropping out of school, a source told KMGH. Defense attorneys have rejected the prosecution\'s assertions that Holmes was barred from campus. Citing statements from the university, Holmes\' attorneys have argued that his access was revoked because that\'s normal procedure when a student drops enrollment. What caused this turn for the worse for Holmes has yet to be clearly detailed. In the months before the shooting, he bought four weapons and more than 6,000 rounds of ammunition, authorities said. Police said he also booby-trapped his third-floor apartment with explosives, but police weren\'t fooled. After Holmes was caught in the cinema parking lot immediately after the shooting, bomb technicians went to the apartment and neutralized the explosives. No one was injured at the apartment building. Nine minutes before Holmes went into the movie theater, he called a University of Colorado switchboard, public defender Brady has said in court. The number he called can be used to get in contact with faculty members during off hours, Brady said. Court documents have also revealed that investigators have obtained text messages that Holmes exchanged with someone before the shooting. That person was not named, and the content of the texts has not been made public. According to The New York Times, Holmes sent a text message to a fellow graduate student, a woman, about two weeks before the shooting. She asked if he had left Aurora yet, reported the newspaper, which didn\'t identify her. No, he had two months left on his lease, Holmes wrote back, according to the Times. He asked if she had heard of "dysphoric mania," a form of bipolar disorder marked by the highs of mania and the dark and sometimes paranoid delusions of major depression. The woman asked if the disorder could be managed with treatment. "It was," Holmes wrote her, according to the Times. But he warned she should stay away from him "because I am bad news," the newspaper reported. It was her last contact with Holmes. After the shooting, Holmes\' family issued a brief statement: "Our hearts go out to those who were involved in this tragedy and to the families and friends of those involved," they said, without giving any information about their son. Since then, prosecutors have refused to offer a plea deal to Holmes. For Holmes, "justice is death," said Brauchler, the district attorney. In December, Holmes\' parents, who will be attending the trial, issued another statement: They asked that their son\'s life be spared and that he be sent to an institution for mentally ill people for the rest of his life, if he\'s found not guilty by reason of insanity. "He is not a monster," Robert and Arlene Holmes wrote, saying the death penalty is "morally wrong, especially when the condemned is mentally ill." "He is a human being gripped by a severe mental illness," the parents said. The matter will be settled by the jury. CNN\'s Ana Cabrera and Sara Weisfeldt contributed to this report from Denver.""" input_ids = tokenizer(article, return_tensors="pt").input_ids output_ids = model.generate(input_ids) print(tokenizer.decode(output_ids[0], skip_special_tokens=True)) # should produce # James Holmes, 27, is accused of opening fire on a Colorado theater. # He was a doctoral student at University of Colorado. # Holmes says he was suffering "a psychotic episode" at the time of the shooting. # Prosecutors won't say whether Holmes was barred from campus. ``` Such an article has a length of > 2000 tokens, which means that it cannot be handled correctly by Bert or Roberta encoders. ## Training script: **IMPORTANT**: In order for this code to work, make sure you checkout to the branch [more_general_trainer_metric](https://github.com/huggingface/transformers/tree/more_general_trainer_metric), which slightly adapts the `Trainer` for `EncoderDecoderModels` according to this PR: https://github.com/huggingface/transformers/pull/5840. The following code shows the complete training script that was used to fine-tune `longformer2roberta-cnn_dailymail-fp16 ` for reproducability. The training last ~90h on a standard GPU. ```python #!/usr/bin/env python3 import nlp import logging from transformers import LongformerTokenizer, EncoderDecoderModel, Trainer, TrainingArguments logging.basicConfig(level=logging.INFO) model = EncoderDecoderModel.from_encoder_decoder_pretrained("allenai/longformer-base-4096", "roberta-base") tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096") # load train and validation data train_dataset = nlp.load_dataset("cnn_dailymail", "3.0.0", split="train") val_dataset = nlp.load_dataset("cnn_dailymail", "3.0.0", split="validation[:5%]") # load rouge for validation rouge = nlp.load_metric("rouge", experiment_id=0) # enable gradient checkpointing for longformer encoder model.encoder.config.gradient_checkpointing = True # set decoding params model.config.decoder_start_token_id = tokenizer.bos_token_id model.config.eos_token_id = tokenizer.eos_token_id model.config.max_length = 142 model.config.min_length = 56 model.config.no_repeat_ngram_size = 3 model.early_stopping = True model.length_penalty = 2.0 model.num_beams = 4 encoder_length = 2048 decoder_length = 128 batch_size = 16 # map data correctly def map_to_encoder_decoder_inputs(batch): # Tokenizer will automatically set [BOS] <text> [EOS] # cut off at Longformer at 2048 inputs = tokenizer(batch["article"], padding="max_length", truncation=True, max_length=encoder_length) # force summarization <= 128 outputs = tokenizer(batch["highlights"], padding="max_length", truncation=True, max_length=decoder_length) batch["input_ids"] = inputs.input_ids batch["attention_mask"] = inputs.attention_mask # set 128 tokens to global attention batch["global_attention_mask"] = [[1 if i < 128 else 0 for i in range(sequence_length)] for sequence_length in len(inputs.input_ids) * [encoder_length]] batch["decoder_input_ids"] = outputs.input_ids batch["labels"] = outputs.input_ids.copy() # mask loss for padding batch["labels"] = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] batch["decoder_attention_mask"] = outputs.attention_mask assert all([len(x) == encoder_length for x in inputs.input_ids]) assert all([len(x) == decoder_length for x in outputs.input_ids]) return batch def compute_metrics(pred): labels_ids = pred.label_ids pred_ids = pred.predictions # all unnecessary tokens are removed pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) labels_ids[labels_ids == -100] = tokenizer.eos_token_id label_str = tokenizer.batch_decode(labels_ids, skip_special_tokens=True) rouge_output = rouge.compute(predictions=pred_str, references=label_str, rouge_types=["rouge2"])["rouge2"].mid return { "rouge2_precision": round(rouge_output.precision, 4), "rouge2_recall": round(rouge_output.recall, 4), "rouge2_fmeasure": round(rouge_output.fmeasure, 4), } return { "rouge2_precision": round(rouge_output.precision, 4), "rouge2_recall": round(rouge_output.recall, 4), "rouge2_fmeasure": round(rouge_output.fmeasure, 4), } # make train dataset ready train_dataset = train_dataset.map( map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) train_dataset.set_format( type="torch", columns=["input_ids", "attention_mask", "global_attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) # same for validation dataset val_dataset = val_dataset.map( map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) val_dataset.set_format( type="torch", columns=["input_ids", "global_attention_mask", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) # set training arguments - these params are not really tuned, feel free to change training_args = TrainingArguments( output_dir="./", per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, predict_from_generate=True, evaluate_during_training=True, do_train=True, do_eval=True, logging_steps=1000, save_steps=1000, eval_steps=1000, overwrite_output_dir=True, warmup_steps=2000, save_total_limit=3, fp16=True, ) # instantiate trainer trainer = Trainer( model=model, args=training_args, compute_metrics=compute_metrics, train_dataset=train_dataset, eval_dataset=val_dataset, ) # start training trainer.train() ``` ## Evaluation The following script evaluates the model on the test set of CNN/Daily Mail. ```python #!/usr/bin/env python3 import nlp import torch from transformers import LongformerTokenizer, EncoderDecoderModel tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096") model = EncoderDecoderModel.from_pretrained("patrickvonplaten/longformer2roberta-cnn_dailymail-fp16") model.to("cuda") test_dataset = nlp.load_dataset("cnn_dailymail", "3.0.0", split="test") batch_size = 32 encoder_length = 2048 decoder_length = 128 # map data correctly def generate_summary(batch): # Tokenizer will automatically set [BOS] <text> [EOS] # cut off at BERT max length 512 inputs = tokenizer(batch["article"], padding="max_length", truncation=True, max_length=encoder_length, return_tensors="pt") input_ids = inputs.input_ids.to("cuda") attention_mask = inputs.attention_mask.to("cuda") global_attention_mask = torch.zeros_like(attention_mask) global_attention_mask[:, :decoder_length] = 1 outputs = model.generate(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask) # all special tokens including will be removed output_str = tokenizer.batch_decode(outputs, skip_special_tokens=True) batch["pred"] = output_str return batch results = test_dataset.map(generate_summary, batched=True, batch_size=batch_size, remove_columns=["article"]) # load rouge for validation rouge = nlp.load_metric("rouge") pred_str = results["pred"] label_str = results["highlights"] rouge_output = rouge.compute(predictions=pred_str, references=label_str, rouge_types=["rouge2"])["rouge2"].mid print(rouge_output) ``` The obtained results should be: | - | Rouge2 - mid -precision | Rouge2 - mid - recall | Rouge2 - mid - fmeasure | |----------|:-------------:|:------:|:------:| | **CNN/Daily Mail** | 12.39 | 15.05 | **13.21** | **Note** This model was trained to show how Longformer can be used as an Encoder model in a EncoderDecoder setup. Better results are obtained for datasets of much longer inputs.
patrickvonplaten/norwegian-roberta-base
2021-05-19T10:12:21.000Z
[ "pytorch", "jax", "roberta", "masked-lm", "transformers", "fill-mask" ]
fill-mask
[ ".gitattributes", "README.md", "config.json", "flax_model.msgpack", "pytorch_model.bin", "tokenizer.json" ]
patrickvonplaten
64
transformers
## Roberta-Base This repo trains [roberta-base](https://huggingface.co/roberta-base) from scratch on the [Norwegian training subset of Oscar](https://oscar-corpus.com/) containing roughly 4.7 GB of data according to [this](https://github.com/huggingface/transformers/tree/master/examples/flax/language-modeling) example. Training is done on a TPUv3-8 in Flax. More statistics on the training run can be found under [tf.hub](https://tensorboard.dev/experiment/GdYmdak2TWeVz0DDRYOrrg).
patrickvonplaten/norwegian-roberta-large
2021-05-20T19:15:37.000Z
[ "tensorboard", "roberta", "masked-lm", "transformers", "fill-mask" ]
fill-mask
[ ".gitattributes", "README.md", "config.json", "run_mlm.sh", "run_mlm_flax.py", "tokenizer.json", "train_tokenizer.py", "runs/config.json", "runs/logs/events.out.tfevents.1619649831.t1v-n-71556209-w-0.63096.3.v2", "runs/logs/events.out.tfevents.1619650245.t1v-n-71556209-w-0.65176.3.v2", "runs/logs/events.out.tfevents.1619658715.t1v-n-71556209-w-0.66938.3.v2", "runs/logs/events.out.tfevents.1619789329.t1v-n-71556209-w-0.90330.3.v2", "runs/logs/events.out.tfevents.1619963181.t1v-n-71556209-w-0.47643.3.v2", "runs/logs/events.out.tfevents.1619964922.t1v-n-71556209-w-0.54388.3.v2", "runs/logs/events.out.tfevents.1619992500.t1v-n-71556209-w-0.79622.3.v2", "runs/logs/events.out.tfevents.1619992737.t1v-n-71556209-w-0.81383.3.v2", "runs/logs/events.out.tfevents.1619993670.t1v-n-71556209-w-0.83638.3.v2", "runs/logs/events.out.tfevents.1619994044.t1v-n-71556209-w-0.85400.3.v2", "runs/logs/events.out.tfevents.1619994255.t1v-n-71556209-w-0.87341.3.v2", "runs/logs/events.out.tfevents.1619994499.t1v-n-71556209-w-0.89395.3.v2", "runs/logs/events.out.tfevents.1619994922.t1v-n-71556209-w-0.91380.3.v2" ]
patrickvonplaten
6
transformers
## Roberta-Large This repo trains [roberta-large](https://huggingface.co/roberta-large) from scratch on the [Norwegian training subset of Oscar](https://oscar-corpus.com/) containing roughly 4.7 GB of data. A ByteLevelBPETokenizer as shown in [this]( ) blog post was trained on the whole [Norwegian training subset of Oscar](https://oscar-corpus.com/). Training is done on a TPUv3-8 in Flax. The training script as well as the script to create a tokenizer are attached below. ### Run 1 ``` --weight_decay="0.01" --max_seq_length="128" --train_batch_size="1048" --eval_batch_size="1048" --learning_rate="1e-3" --warmup_steps="2000" --pad_to_max_length --num_train_epochs="12" --adam_beta1="0.9" --adam_beta2="0.98" ``` Trained for 12 epochs with each epoch including 8005 steps => Total of 96K steps. 1 epoch + eval takes roughly 2 hours 40 minutes => trained in total for 1 day and 8 hours. Final loss was 3.695. **Acc**: ![Acc](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/flax_experiments/norwegian_large_acc_1.svg) **Loss**: ![Loss](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/flax_experiments/norwegian_large_loss_1.svg) ### Run 2 ``` --weight_decay="0.01" --max_seq_length="128" --train_batch_size="1048" --eval_batch_size="1048" --learning_rate="5e-3" --warmup_steps="2000" --pad_to_max_length --num_train_epochs="7" --adam_beta1="0.9" --adam_beta2="0.98" ``` Trained for 7 epochs with each epoch including 8005 steps => Total of 96K steps. 1 epoch + eval takes roughly 2 hours 40 minutes => trained in total for 18 hours. Final loss was 2.216 and accuracy 0.58. **Acc**: ![Acc](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/flax_experiments/norwegian_large_acc_2.svg) **Loss**: ![Loss](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/flax_experiments/norwegian_large_loss_2.svg)
patrickvonplaten/prophetnet-decoder-clm-large-uncased
2020-10-21T10:06:17.000Z
[ "pytorch", "prophetnet", "causal-lm", "transformers", "text-generation" ]
text-generation
[ ".gitattributes", "config.json", "pytorch_model.bin" ]
patrickvonplaten
17
transformers
patrickvonplaten/prophetnet-large-uncased-cnndm_old
2020-10-16T12:55:25.000Z
[ "pytorch", "prophetnet", "seq2seq", "transformers", "text2text-generation" ]
text2text-generation
[ ".gitattributes", "config.json", "prophetnet.tokenizer", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json" ]
patrickvonplaten
9
transformers
patrickvonplaten/prophetnet-large-uncased-standalone
2020-10-21T10:13:29.000Z
[ "pytorch", "prophetnet", "transformers" ]
[ ".gitattributes", "config.json", "pytorch_model.bin" ]
patrickvonplaten
95
transformers
patrickvonplaten/prophetnet-large-uncased_old
2020-10-16T12:37:59.000Z
[ "pytorch", "prophetnet", "seq2seq", "transformers", "text2text-generation" ]
text2text-generation
[ ".gitattributes", "config.json", "prophetnet.tokenizer", "pytorch_model.bin", "special_tokens_map.json", "tokenizer_config.json" ]
patrickvonplaten
15
transformers
patrickvonplaten/rag-sequence-gen-prev
2020-09-24T12:42:35.000Z
[ "pytorch", "bart", "seq2seq", "transformers", "text2text-generation" ]
text2text-generation
[ ".gitattributes", "config.json", "pytorch_model.bin" ]
patrickvonplaten
10
transformers
patrickvonplaten/rag-sequence-ques-enc-prev
2020-09-24T12:43:40.000Z
[ "pytorch", "dpr", "transformers" ]
[ ".gitattributes", "config.json", "pytorch_model.bin" ]
patrickvonplaten
11
transformers
patrickvonplaten/rag-tiny-random
2020-09-18T08:34:42.000Z
[ "pytorch", "rag", "transformers" ]
[ ".gitattributes", "config.json", "pytorch_model.bin" ]
patrickvonplaten
15
transformers
patrickvonplaten/reformer-random
2021-05-20T02:18:08.000Z
[ "pytorch", "bert", "lm-head", "masked-lm", "transformers", "fill-mask" ]
fill-mask
[ ".gitattributes", "config.json", "pytorch_model.bin", "reformer-crime-and-punishment-spiece.model", "spiece.model" ]
patrickvonplaten
57
transformers