Search is not available for this dataset
pipeline_tag
stringclasses
48 values
library_name
stringclasses
205 values
text
stringlengths
0
18.3M
metadata
stringlengths
2
1.07B
id
stringlengths
5
122
last_modified
null
tags
listlengths
1
1.84k
sha
null
created_at
stringlengths
25
25
token-classification
transformers
{}
Media1129/keyword-tag-model-4000-9-16_more_ingredient
null
[ "transformers", "pytorch", "bert", "token-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
token-classification
transformers
{}
Media1129/keyword-tag-model-4000
null
[ "transformers", "pytorch", "bert", "token-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
token-classification
transformers
{}
Media1129/keyword-tag-model-6000-9-16_more_ingredient
null
[ "transformers", "pytorch", "bert", "token-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
token-classification
transformers
{}
Media1129/keyword-tag-model-6000-v2
null
[ "transformers", "pytorch", "bert", "token-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
token-classification
transformers
{}
Media1129/keyword-tag-model-6000
null
[ "transformers", "pytorch", "bert", "token-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
token-classification
transformers
{}
Media1129/keyword-tag-model-8000-9-16_more_ingredient
null
[ "transformers", "pytorch", "bert", "token-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
token-classification
transformers
{}
Media1129/keyword-tag-model-9000-v2
null
[ "transformers", "pytorch", "bert", "token-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
token-classification
transformers
{}
Media1129/keyword-tag-model
null
[ "transformers", "pytorch", "bert", "token-classification", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
token-classification
transformers
{}
Media1129/recipe-tag-model
null
[ "transformers", "pytorch", "bert", "token-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
MehdiHosseiniMoghadam/wav2vec2-large-xls-r-300m-Swedish-colab
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
MehdiHosseiniMoghadam/wav2vec2-large-xls-r-300m-turkish-colab
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
automatic-speech-recognition
transformers
# wav2vec2-large-xlsr-53-Czech Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) in Czech using the [Common Voice](https://huggingface.co/datasets/common_voice) When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "cs", split="test[:2%]") processor = Wav2Vec2Processor.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Czech") model = Wav2Vec2ForCTC.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Czech") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation The model can be evaluated as follows on the Czech test data of Common Voice. ```python import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re test_dataset = load_dataset("common_voice", "cs", split="test") wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Czech") model = Wav2Vec2ForCTC.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Czech") model.to("cuda") chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�]' resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the aduio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 27.047806 % ## Training The Common Voice `train`, `validation` datasets were used for training.
{"language": "cs", "license": "apache-2.0", "tags": ["audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week"], "datasets": ["common_voice"], "model-index": [{"name": "wav2vec2-large-xlsr-53-Czech by Mehdi Hosseini Moghadam", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Speech Recognition"}, "dataset": {"name": "Common Voice cs", "type": "common_voice", "args": "cs"}, "metrics": [{"type": "wer", "value": 27.047806, "name": "Test WER"}]}]}]}
MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Czech
null
[ "transformers", "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "audio", "speech", "xlsr-fine-tuning-week", "cs", "dataset:common_voice", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
automatic-speech-recognition
transformers
# wav2vec2-large-xlsr-53-Dutch Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) in Dutch using the [Common Voice](https://huggingface.co/datasets/common_voice) When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "nl", split="test[:2%]") processor = Wav2Vec2Processor.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Dutch") model = Wav2Vec2ForCTC.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Dutch") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation The model can be evaluated as follows on the Dutch test data of Common Voice. ```python import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re test_dataset = load_dataset("common_voice", "nl", split="test") wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Dutch") model = Wav2Vec2ForCTC.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Dutch") model.to("cuda") chars_to_ignore_regex = '[\\\\,\\\\?\\\\.\\\\!\\\\-\\\\;\\\\:\\\\"\\\\“]' resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the aduio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 26.494162 % ## Training The Common Voice `train`, `validation` datasets were used for training.
{"language": "nl", "license": "apache-2.0", "tags": ["audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week"], "datasets": ["common_voice"], "model-index": [{"name": "wav2vec2-large-xlsr-53-Dutch by Mehdi Hosseini Moghadam", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Speech Recognition"}, "dataset": {"name": "Common Voice nl", "type": "common_voice", "args": "nl"}, "metrics": [{"type": "wer", "value": 26.494162, "name": "Test WER"}]}]}]}
MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Dutch
null
[ "transformers", "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "audio", "speech", "xlsr-fine-tuning-week", "nl", "dataset:common_voice", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
automatic-speech-recognition
transformers
# wav2vec2-large-xlsr-53-French Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) in French using the [Common Voice](https://huggingface.co/datasets/common_voice) When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "fr", split="test[:2%]") processor = Wav2Vec2Processor.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-French") model = Wav2Vec2ForCTC.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-French") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation The model can be evaluated as follows on the French test data of Common Voice. ```python import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re test_dataset = load_dataset("common_voice", "fr", split="test[:10%]") wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-French") model = Wav2Vec2ForCTC.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-French") model.to("cuda") chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�]' resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the aduio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 34.856015 % ## Training 10% of the Common Voice `train`, `validation` datasets were used for training. ## Testing 10% of the Common Voice `Test` dataset were used for training.
{"language": "fr", "license": "apache-2.0", "tags": ["audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week"], "datasets": ["common_voice"], "model-index": [{"name": "wav2vec2-large-xlsr-53-French by Mehdi Hosseini Moghadam", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Speech Recognition"}, "dataset": {"name": "Common Voice fr", "type": "common_voice", "args": "fr"}, "metrics": [{"type": "wer", "value": 34.856015, "name": "Test WER"}]}]}]}
MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-French
null
[ "transformers", "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "audio", "speech", "xlsr-fine-tuning-week", "fr", "dataset:common_voice", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
automatic-speech-recognition
transformers
# wav2vec2-large-xlsr-53-Georgian Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) in Georgian using the [Common Voice](https://huggingface.co/datasets/common_voice) When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "ka", split="test[:2%]") processor = Wav2Vec2Processor.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Georgian") model = Wav2Vec2ForCTC.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Georgian") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation The model can be evaluated as follows on the Georgian test data of Common Voice. ```python import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re test_dataset = load_dataset("common_voice", "ka", split="test") wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Georgian") model = Wav2Vec2ForCTC.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Georgian") model.to("cuda") chars_to_ignore_regex = '[\\,\\?\\.\\!\\-\\;\\:\\"\\“]' resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the aduio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 60.504024 % ## Training The Common Voice `train`, `validation` datasets were used for training.
{"language": "ka", "license": "apache-2.0", "tags": ["audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week"], "datasets": ["common_voice"], "model-index": [{"name": "wav2vec2-large-xlsr-53-Georgian by Mehdi Hosseini Moghadam", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Speech Recognition"}, "dataset": {"name": "Common Voice ka", "type": "common_voice", "args": "ka"}, "metrics": [{"type": "wer", "value": 60.504024, "name": "Test WER"}]}]}]}
MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Georgian
null
[ "transformers", "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "audio", "speech", "xlsr-fine-tuning-week", "ka", "dataset:common_voice", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
automatic-speech-recognition
transformers
# wav2vec2-large-xlsr-53-German Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) in German using the [Common Voice](https://huggingface.co/datasets/common_voice) When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "de", split="test[:2%]") processor = Wav2Vec2Processor.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-German") model = Wav2Vec2ForCTC.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-German") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation The model can be evaluated as follows on the Czech test data of Common Voice. ```python import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re test_dataset = load_dataset("common_voice", "de", split="test[:15%]") wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-German") model = Wav2Vec2ForCTC.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-German") model.to("cuda") chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�]' resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the aduio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 25.284593 % ## Training 10% of the Common Voice `train`, `validation` datasets were used for training. ## Testing 15% of the Common Voice `Test` dataset were used for training.
{"language": "de", "license": "apache-2.0", "tags": ["audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week"], "datasets": ["common_voice"], "model-index": [{"name": "wav2vec2-large-xlsr-53-German by Mehdi Hosseini Moghadam", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Speech Recognition"}, "dataset": {"name": "Common Voice de", "type": "common_voice", "args": "de"}, "metrics": [{"type": "wer", "value": 25.284593, "name": "Test WER"}]}]}]}
MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-German
null
[ "transformers", "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "audio", "speech", "xlsr-fine-tuning-week", "de", "dataset:common_voice", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
automatic-speech-recognition
transformers
# wav2vec2-large-xlsr-53-Swedish Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) in Swedish using the [Common Voice](https://huggingface.co/datasets/common_voice) When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "sv-SE", split="test[:2%]") processor = Wav2Vec2Processor.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Swedish") model = Wav2Vec2ForCTC.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Swedish") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation The model can be evaluated as follows on the Swedish test data of Common Voice. ```python import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re test_dataset = load_dataset("common_voice", "sv-SE", split="test") wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Swedish") model = Wav2Vec2ForCTC.from_pretrained("MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Swedish") model.to("cuda") chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“]' resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the aduio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 41.388337 % ## Training The Common Voice `train`, `validation` datasets were used for training.
{"language": "sv-SE", "license": "apache-2.0", "tags": ["audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week"], "datasets": ["common_voice"], "model-index": [{"name": "wav2vec2-large-xlsr-53-Swedish by Mehdi Hosseini Moghadam", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Speech Recognition"}, "dataset": {"name": "Common Voice sv-SE", "type": "common_voice", "args": "sv-SE"}, "metrics": [{"type": "wer", "value": 41.388337, "name": "Test WER"}]}]}]}
MehdiHosseiniMoghadam/wav2vec2-large-xlsr-53-Swedish
null
[ "transformers", "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "audio", "speech", "xlsr-fine-tuning-week", "dataset:common_voice", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Mehrad1/DiabloGPT-medium-Ron
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Mehrdad/test
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# GPT-2 Story Generator ## Model description Generate a short story from an input prompt. Put the vocab ` [endprompt]` after your input. Example of an input: ``` A person with a high school education gets sent back into the 1600s and tries to explain science and technology to the people. [endprompt] ``` #### Limitations and bias The data we used to train was collected from reddit, so it could be very biased towards young, white, male demographic. ## Training data The data was collected from scraping reddit.
{"language": ["en"], "tags": ["gpt2", "text-generation"], "pipeline_tag": "text-generation", "widget": [{"text": "A person with a high school education gets sent back into the 1600s and tries to explain science and technology to the people. [endprompt]"}, {"text": "A kid doodling in a math class accidentally creates the world's first functional magic circle in centuries. [endprompt]"}]}
Meli/GPT2-Prompt
null
[ "transformers", "pytorch", "jax", "gpt2", "text-generation", "en", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.6324 - Matthews Correlation: 0.5207 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5245 | 1.0 | 535 | 0.5155 | 0.4181 | | 0.3446 | 2.0 | 1070 | 0.5623 | 0.4777 | | 0.2331 | 3.0 | 1605 | 0.6324 | 0.5207 | | 0.1678 | 4.0 | 2140 | 0.7706 | 0.5106 | | 0.1255 | 5.0 | 2675 | 0.8852 | 0.4998 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.18.0 - Tokenizers 0.10.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["glue"], "metrics": ["matthews_correlation"], "model-index": [{"name": "distilbert-base-uncased-finetuned-cola", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "glue", "type": "glue", "args": "cola"}, "metrics": [{"type": "matthews_correlation", "value": 0.5206791471093309, "name": "Matthews Correlation"}]}]}]}
MelissaTESSA/distilbert-base-uncased-finetuned-cola
null
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:glue", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Mengliu/distilbert-base-uncased-finetuned-ner
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{"license": "agpl-3.0"}
MeowcaTheoRange/SCOTT_WOZ
null
[ "license:agpl-3.0", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Mercxxix/DialoGPT-medium-friend
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Mercxxix/DialoGPT-medium-friendbot
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Merlin97/Milu
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Merlinji/esperantoCL
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
Gggg
{}
Mervtttt/Ges
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
MeuMoc/Draco
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
feature-extraction
transformers
{}
MeysamAC/packt_bert_random
null
[ "transformers", "pytorch", "bert", "feature-extraction", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Mezzat/arabicpearl
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-distilled-clinc This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the clinc_oos dataset. It achieves the following results on the evaluation set: - Loss: 0.2663 - Accuracy: 0.9461 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 9 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 4.1991 | 1.0 | 318 | 3.1495 | 0.7523 | | 2.4112 | 2.0 | 636 | 1.5868 | 0.8510 | | 1.1887 | 3.0 | 954 | 0.7975 | 0.9203 | | 0.5952 | 4.0 | 1272 | 0.4870 | 0.9319 | | 0.3275 | 5.0 | 1590 | 0.3571 | 0.9419 | | 0.2066 | 6.0 | 1908 | 0.3070 | 0.9429 | | 0.1456 | 7.0 | 2226 | 0.2809 | 0.9448 | | 0.1154 | 8.0 | 2544 | 0.2697 | 0.9468 | | 0.1011 | 9.0 | 2862 | 0.2663 | 0.9461 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.11.0
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["clinc_oos"], "metrics": ["accuracy"], "model-index": [{"name": "distilbert-base-uncased-distilled-clinc", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "clinc_oos", "type": "clinc_oos", "args": "plus"}, "metrics": [{"type": "accuracy", "value": 0.9461290322580646, "name": "Accuracy"}]}]}]}
MhF/distilbert-base-uncased-distilled-clinc
null
[ "transformers", "pytorch", "distilbert", "text-classification", "generated_from_trainer", "dataset:clinc_oos", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-clinc This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the clinc_oos dataset. It achieves the following results on the evaluation set: - Loss: 0.7703 - Accuracy: 0.9187 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 4.2896 | 1.0 | 318 | 3.2887 | 0.7419 | | 2.6309 | 2.0 | 636 | 1.8797 | 0.8310 | | 1.5443 | 3.0 | 954 | 1.1537 | 0.8974 | | 1.0097 | 4.0 | 1272 | 0.8560 | 0.9135 | | 0.7918 | 5.0 | 1590 | 0.7703 | 0.9187 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.11.0
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["clinc_oos"], "metrics": ["accuracy"], "model-index": [{"name": "distilbert-base-uncased-finetuned-clinc", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "clinc_oos", "type": "clinc_oos", "args": "plus"}, "metrics": [{"type": "accuracy", "value": 0.9187096774193548, "name": "Accuracy"}]}]}]}
MhF/distilbert-base-uncased-finetuned-clinc
null
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:clinc_oos", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2232 - Accuracy: 0.9215 - F1: 0.9218 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8098 | 1.0 | 250 | 0.3138 | 0.9025 | 0.9001 | | 0.2429 | 2.0 | 500 | 0.2232 | 0.9215 | 0.9218 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.11.0
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["emotion"], "metrics": ["accuracy", "f1"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "args": "default"}, "metrics": [{"type": "accuracy", "value": 0.9215, "name": "Accuracy"}, {"type": "f1", "value": 0.9217985126397109, "name": "F1"}]}]}]}
MhF/distilbert-base-uncased-finetuned-emotion
null
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
token-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-all This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1753 - F1: 0.8520 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2989 | 1.0 | 835 | 0.1878 | 0.8123 | | 0.1548 | 2.0 | 1670 | 0.1745 | 0.8480 | | 0.1012 | 3.0 | 2505 | 0.1753 | 0.8520 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.11.0
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["f1"], "model-index": [{"name": "xlm-roberta-base-finetuned-panx-all", "results": []}]}
MhF/xlm-roberta-base-finetuned-panx-all
null
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
token-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de-fr This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1576 - F1: 0.8571 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2924 | 1.0 | 715 | 0.1819 | 0.8286 | | 0.1503 | 2.0 | 1430 | 0.1580 | 0.8511 | | 0.0972 | 3.0 | 2145 | 0.1576 | 0.8571 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.11.0
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["f1"], "model-index": [{"name": "xlm-roberta-base-finetuned-panx-de-fr", "results": []}]}
MhF/xlm-roberta-base-finetuned-panx-de-fr
null
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
token-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1354 - F1: 0.8621 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.254 | 1.0 | 525 | 0.1652 | 0.8254 | | 0.1293 | 2.0 | 1050 | 0.1431 | 0.8489 | | 0.0797 | 3.0 | 1575 | 0.1354 | 0.8621 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.11.0
{"license": "mit", "tags": ["generated_from_trainer"], "datasets": ["xtreme"], "metrics": ["f1"], "model-index": [{"name": "xlm-roberta-base-finetuned-panx-de", "results": [{"task": {"type": "token-classification", "name": "Token Classification"}, "dataset": {"name": "xtreme", "type": "xtreme", "args": "PAN-X.de"}, "metrics": [{"type": "f1", "value": 0.862053266560437, "name": "F1"}]}]}]}
MhF/xlm-roberta-base-finetuned-panx-de
null
[ "transformers", "pytorch", "tensorboard", "xlm-roberta", "token-classification", "generated_from_trainer", "dataset:xtreme", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
token-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-en This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.3856 - F1: 0.6808 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 1.1038 | 1.0 | 50 | 0.5255 | 0.5331 | | 0.4922 | 2.0 | 100 | 0.4377 | 0.6379 | | 0.3664 | 3.0 | 150 | 0.3856 | 0.6808 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.11.0
{"license": "mit", "tags": ["generated_from_trainer"], "datasets": ["xtreme"], "metrics": ["f1"], "model-index": [{"name": "xlm-roberta-base-finetuned-panx-en", "results": [{"task": {"type": "token-classification", "name": "Token Classification"}, "dataset": {"name": "xtreme", "type": "xtreme", "args": "PAN-X.en"}, "metrics": [{"type": "f1", "value": 0.6807563959955506, "name": "F1"}]}]}]}
MhF/xlm-roberta-base-finetuned-panx-en
null
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "generated_from_trainer", "dataset:xtreme", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
token-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-fr This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.2736 - F1: 0.8353 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.5826 | 1.0 | 191 | 0.3442 | 0.7888 | | 0.2669 | 2.0 | 382 | 0.2848 | 0.8326 | | 0.1818 | 3.0 | 573 | 0.2736 | 0.8353 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.11.0
{"license": "mit", "tags": ["generated_from_trainer"], "datasets": ["xtreme"], "metrics": ["f1"], "model-index": [{"name": "xlm-roberta-base-finetuned-panx-fr", "results": [{"task": {"type": "token-classification", "name": "Token Classification"}, "dataset": {"name": "xtreme", "type": "xtreme", "args": "PAN-X.fr"}, "metrics": [{"type": "f1", "value": 0.8353494623655915, "name": "F1"}]}]}]}
MhF/xlm-roberta-base-finetuned-panx-fr
null
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "generated_from_trainer", "dataset:xtreme", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
token-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-it This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.2491 - F1: 0.8213 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.8192 | 1.0 | 70 | 0.3300 | 0.7184 | | 0.2949 | 2.0 | 140 | 0.2817 | 0.7959 | | 0.189 | 3.0 | 210 | 0.2491 | 0.8213 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.11.0
{"license": "mit", "tags": ["generated_from_trainer"], "datasets": ["xtreme"], "metrics": ["f1"], "model-index": [{"name": "xlm-roberta-base-finetuned-panx-it", "results": [{"task": {"type": "token-classification", "name": "Token Classification"}, "dataset": {"name": "xtreme", "type": "xtreme", "args": "PAN-X.it"}, "metrics": [{"type": "f1", "value": 0.8213114754098361, "name": "F1"}]}]}]}
MhF/xlm-roberta-base-finetuned-panx-it
null
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "generated_from_trainer", "dataset:xtreme", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Mhmdsabry/wv-robust2bart
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
MiBo/RepAlBERTo
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MiBo/RepML
null
[ "transformers", "pytorch", "safetensors", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MiBo/SABERT
null
[ "transformers", "pytorch", "safetensors", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
MiBo/SADistilBERT
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MiBo/SADistilGPT2
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MiBo/SAGPT2
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
MiaSagarnaga/Boo
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# feinschwarz This model is a fine-tuned version of [dbmdz/german-gpt2](https://huggingface.co/dbmdz/german-gpt2). The dataset was compiled from all texts of https://www.feinschwarz.net (as of October 2021). The homepage gathers essayistic texts on theological topics. The model will be used to explore the challenges of text-generating AI for theology with a hands on approach. Can an AI generate theological knowledge? Is a text by Karl Rahner of more value than an AI-generated text? Can we even distinguish a Rahner text from an AI-generated text in the future? And the crucial question: Would it be bad if not? The model is a very first attempt and in its current version certainly not yet a danger for academic theology 🤓 # Using the model You can create text with the model using this code: ```python from transformers import pipeline pipe = pipeline('text-generation', model="Michael711/feinschwarz", tokenizer="Michael711/feinschwarz") text = pipe("Der Sinn des Lebens ist es", max_length=100)[0]["generated_text"] print(text) ``` Have fun theologizing!
{"license": "mit", "tags": ["generated_from_trainer", "de"], "model-index": [{"name": "feinesblack", "results": []}]}
Michael711/feinschwarz
null
[ "transformers", "pytorch", "gpt2", "text-generation", "generated_from_trainer", "de", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
MichaelReusens/dutch_twitter_sentiment_model
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# Harry Potter DialoGPT Model
{"tags": ["conversational"]}
MichaelTheLearner/DialoGPT-medium-harry
null
[ "transformers", "pytorch", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Michaelmatao/bert-base-uncased-finetuned-swag
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text2text-generation
transformers
## About the model The model has been trained on a collection of 500k articles with headings. Its purpose is to create a one-line heading suitable for the given article. Sample code with a WikiNews article: ```python import torch from transformers import T5ForConditionalGeneration,T5Tokenizer device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = T5ForConditionalGeneration.from_pretrained("Michau/t5-base-en-generate-headline") tokenizer = T5Tokenizer.from_pretrained("Michau/t5-base-en-generate-headline") model = model.to(device) article = ''' Very early yesterday morning, the United States President Donald Trump reported he and his wife First Lady Melania Trump tested positive for COVID-19. Officials said the Trumps' 14-year-old son Barron tested negative as did First Family and Senior Advisors Jared Kushner and Ivanka Trump. Trump took to social media, posting at 12:54 am local time (0454 UTC) on Twitter, "Tonight, [Melania] and I tested positive for COVID-19. We will begin our quarantine and recovery process immediately. We will get through this TOGETHER!" Yesterday afternoon Marine One landed on the White House's South Lawn flying Trump to Walter Reed National Military Medical Center (WRNMMC) in Bethesda, Maryland. Reports said both were showing "mild symptoms". Senior administration officials were tested as people were informed of the positive test. Senior advisor Hope Hicks had tested positive on Thursday. Presidential physician Sean Conley issued a statement saying Trump has been given zinc, vitamin D, Pepcid and a daily Aspirin. Conley also gave a single dose of the experimental polyclonal antibodies drug from Regeneron Pharmaceuticals. According to official statements, Trump, now operating from the WRNMMC, is to continue performing his duties as president during a 14-day quarantine. In the event of Trump becoming incapacitated, Vice President Mike Pence could take over the duties of president via the 25th Amendment of the US Constitution. The Pence family all tested negative as of yesterday and there were no changes regarding Pence's campaign events. ''' text = "headline: " + article max_len = 256 encoding = tokenizer.encode_plus(text, return_tensors = "pt") input_ids = encoding["input_ids"].to(device) attention_masks = encoding["attention_mask"].to(device) beam_outputs = model.generate( input_ids = input_ids, attention_mask = attention_masks, max_length = 64, num_beams = 3, early_stopping = True, ) result = tokenizer.decode(beam_outputs[0]) print(result) ``` Result: ```Trump and First Lady Melania Test Positive for COVID-19```
{}
Michau/t5-base-en-generate-headline
null
[ "transformers", "pytorch", "tf", "jax", "t5", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
question-answering
transformers
{}
MichelBartels/tinybert-6l-768d-squad2-large-teacher-dummy
null
[ "transformers", "pytorch", "bert", "question-answering", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
question-answering
transformers
{}
MichelBartels/tinybert-6l-768d-squad2-large-teacher-finetuned-step1
null
[ "transformers", "pytorch", "bert", "question-answering", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
question-answering
transformers
{}
MichelBartels/tinybert-6l-768d-squad2-large-teacher-finetuned
null
[ "transformers", "pytorch", "bert", "question-answering", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
question-answering
transformers
{}
MichelBartels/tinybert-6l-768d-squad2-large-teacher
null
[ "transformers", "pytorch", "bert", "question-answering", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Mickey/distilbert-base-uncased-finetuned-ner
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Micky/gpt2-wikitext2
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-appceleratorstudio
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-aptanastudio
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-bamboo
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-clover
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-datamanagement
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-duracloud
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-jirasoftware
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-mesos
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-moodle
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-mule
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-mulestudio
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-springxd
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-talenddataquality
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-talendesb
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-titanium
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/0-GPT2SP-usergrid
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/00-GPT2SP-appceleratorstudio-aptanastudio
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/00-GPT2SP-appceleratorstudio-titanium
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/00-GPT2SP-aptanastudio-titanium
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/00-GPT2SP-mesos-usergrid
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/00-GPT2SP-mule-mulestudio
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/00-GPT2SP-mulestudio-mule
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/00-GPT2SP-titanium-appceleratorstudio
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/00-GPT2SP-usergrid-mesos
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/000-GPT2SP-appceleratorstudio-mule
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/000-GPT2SP-appceleratorstudio-mulestudio
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/000-GPT2SP-clover-usergrid
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/000-GPT2SP-mule-titanium
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/000-GPT2SP-mulestudio-titanium
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/000-GPT2SP-talenddataquality-appceleratorstudio
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/000-GPT2SP-talenddataquality-aptanastudio
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/000-GPT2SP-talendesb-mesos
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/1-GPT2SP-appceleratorstudio
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/1-GPT2SP-aptanastudio
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/1-GPT2SP-bamboo
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/1-GPT2SP-clover
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/1-GPT2SP-datamanagement
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/1-GPT2SP-duracloud
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/1-GPT2SP-jirasoftware
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/1-GPT2SP-mesos
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
MickyMike/1-GPT2SP-moodle
null
[ "transformers", "pytorch", "gpt2", "text-classification", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00