Search is not available for this dataset
pipeline_tag
stringclasses 48
values | library_name
stringclasses 205
values | text
stringlengths 0
18.3M
| metadata
stringlengths 2
1.07B
| id
stringlengths 5
122
| last_modified
null | tags
listlengths 1
1.84k
| sha
null | created_at
stringlengths 25
25
|
---|---|---|---|---|---|---|---|---|
null | null | {} | cambridgeltl/mbert-lang-sft-fr-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-gv-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-hau-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-hi-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-hsb-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-ibo-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-id-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-ja-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-kin-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-ko-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-kpv-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-lug-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-luo-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-mt-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-myv-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-olo-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-pcm-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-pt-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-ro-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-ru-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-sa-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-sme-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-swa-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-ta-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-th-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-tr-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-ug-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-vi-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-wol-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-yor-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-yue-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-lang-sft-zh-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-task-sft-dp-ms | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-task-sft-dp | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-task-sft-masakhaner | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-task-sft-pos-ms | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/mbert-task-sft-pos | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | ---
language: en
tags:
- sentence-embeddings
- sentence-similarity
### cambridgeltl/mirror-bert-base-uncased-sentence-drophead
An unsupervised sentence encoder proposed by [Liu et al. (2021)](https://arxiv.org/pdf/2104.08027.pdf), using [drophead](https://aclanthology.org/2020.findings-emnlp.178.pdf) instead of dropout as feature space augmentation. Trained with unlabelled raw sentences, using [bert-base-uncased](https://huggingface.co/bert-base-uncased) as the base model. Please use mean-pooling over *all tokens* as the representation of the input.
Note the model does not replicate the exact numbers in the paper since the reported numbers in the paper are average of three runs.
### Citation
```bibtex
@inproceedings{
liu2021fast,
title={Fast, Effective, and Self-Supervised: Transforming Masked Language Models into Universal Lexical and Sentence Encoders},
author={Liu, Fangyu and Vuli{\'c}, Ivan and Korhonen, Anna and Collier, Nigel},
booktitle={EMNLP 2021},
year={2021}
}
```
| {} | cambridgeltl/mirror-bert-base-uncased-sentence-drophead | null | [
"transformers",
"pytorch",
"safetensors",
"bert",
"feature-extraction",
"arxiv:2104.08027",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
feature-extraction | transformers | ---
language: en
tags:
- sentence-embeddings
- sentence-similarity
### cambridgeltl/mirror-bert-base-uncased-sentence
An unsupervised sentence encoder proposed by [Liu et al. (2021)](https://arxiv.org/pdf/2104.08027.pdf). Trained with unlabelled raw sentences, using [bert-base-uncased](https://huggingface.co/bert-base-uncased) as the base model. Please use mean-pooling over *all tokens* (including padded ones) as the representation of the input.
Note the model does not replicate the exact numbers in the paper since the reported numbers in the paper are average of three runs.
### Citation
```bibtex
@inproceedings{
liu2021fast,
title={Fast, Effective, and Self-Supervised: Transforming Masked Language Models into Universal Lexical and Sentence Encoders},
author={Liu, Fangyu and Vuli{\'c}, Ivan and Korhonen, Anna and Collier, Nigel},
booktitle={EMNLP 2021},
year={2021}
}
```
| {} | cambridgeltl/mirror-bert-base-uncased-sentence | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"arxiv:2104.08027",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
feature-extraction | transformers | ---
language: en
tags:
- word-embeddings
- word-similarity
### mirror-bert-base-uncased-word
An unsupervised word encoder proposed by [Liu et al. (2021)](https://arxiv.org/pdf/2104.08027.pdf). Trained with a set of unlabelled words, using [bert-base-uncased](https://huggingface.co/bert-base-uncased) as the base model. Please use `[CLS]` as the representation of the input.
### Citation
```bibtex
@inproceedings{
liu2021fast,
title={Fast, Effective and Self-Supervised: Transforming Masked LanguageModels into Universal Lexical and Sentence Encoders},
author={Liu, Fangyu and Vuli{\'c}, Ivan and Korhonen, Anna and Collier, Nigel},
booktitle={EMNLP 2021},
year={2021}
}
```
| {} | cambridgeltl/mirror-bert-base-uncased-word | null | [
"transformers",
"pytorch",
"safetensors",
"bert",
"feature-extraction",
"arxiv:2104.08027",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
feature-extraction | transformers | ---
language: en
tags:
- sentence-embeddings
- sentence-similarity
### cambridgeltl/mirror-roberta-base-sentence-drophead
An unsupervised sentence encoder proposed by [Liu et al. (2021)](https://arxiv.org/pdf/2104.08027.pdf), using [drophead](https://aclanthology.org/2020.findings-emnlp.178.pdf) instead of dropout as feature space augmentation. The model is trained with unlabelled raw sentences, using [roberta-base](https://huggingface.co/roberta-base) as the base model. Please use `[CLS]` (before pooler) as the representation of the input.
Note the model does not replicate the exact numbers in the paper since the reported numbers in the paper are average of three runs.
### Citation
```bibtex
@inproceedings{
liu2021fast,
title={Fast, Effective, and Self-Supervised: Transforming Masked Language Models into Universal Lexical and Sentence Encoders},
author={Liu, Fangyu and Vuli{\'c}, Ivan and Korhonen, Anna and Collier, Nigel},
booktitle={EMNLP 2021},
year={2021}
}
```
| {} | cambridgeltl/mirror-roberta-base-sentence-drophead | null | [
"transformers",
"pytorch",
"safetensors",
"roberta",
"feature-extraction",
"arxiv:2104.08027",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
feature-extraction | transformers | ---
language: en
tags:
- sentence-embeddings
- sentence-similarity
### cambridgeltl/mirror-roberta-base-sentence
An unsupervised sentence encoder proposed by [Liu et al. (2021)](https://arxiv.org/pdf/2104.08027.pdf). The model is trained with unlabelled raw sentences, using [roberta-base](https://huggingface.co/roberta-base) as the base model. Please use `[CLS]` (before pooler) as the representation of the input.
Note the model does not replicate the exact numbers in the paper since the reported numbers in the paper are average of three runs.
### Citation
```bibtex
@inproceedings{
liu2021fast,
title={Fast, Effective, and Self-Supervised: Transforming Masked Language Models into Universal Lexical and Sentence Encoders},
author={Liu, Fangyu and Vuli{\'c}, Ivan and Korhonen, Anna and Collier, Nigel},
booktitle={EMNLP 2021},
year={2021}
}
```
| {} | cambridgeltl/mirror-roberta-base-sentence | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"arxiv:2104.08027",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
feature-extraction | transformers | {} | cambridgeltl/mirrorwic-bert-base-uncased | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | cambridgeltl/mirrorwic-deberta-base | null | [
"transformers",
"pytorch",
"deberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | cambridgeltl/mirrorwic-roberta-base | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/pptod-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-generation | transformers | This model provides a GPT-2 language model trained with SimCTG on the English Wikipedia based on our paper [_A Contrastive Framework for Neural Text Generation_](https://arxiv.org/abs/2202.06417).
We provide a detailed tutorial on how to apply SimCTG and Contrastive Search in our [project repo](https://github.com/yxuansu/SimCTG#4-huggingface-style-tutorials-back-to-top). In the following, we illustrate a brief tutorial on how to use our approach to perform text generation.
## 1. Installation of SimCTG:
```yaml
pip install simctg --upgrade
```
## 2. Initialize SimCTG Model:
```python
import torch
# load SimCTG language model
from simctg.simctggpt import SimCTGGPT
model_name = r'cambridgeltl/simctg_english_wikipedia'
model = SimCTGGPT(model_name)
model.eval()
tokenizer = model.tokenizer
```
## 3. Prepare the Text Prefix:
```python
prefix_text = r"Insect farming is the practice of raising and breeding insects as livestock, also referred to as minilivestock or micro stock. Insects may be farmed for the commodities"
print ('Prefix is: {}'.format(prefix_text))
tokens = tokenizer.tokenize(prefix_text)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = torch.LongTensor(input_ids).view(1,-1)
```
## 4. Generate Text with Contrastive Search:
```python
beam_width, alpha, decoding_len = 5, 0.6, 128
output = model.fast_contrastive_search(input_ids=input_ids, beam_width=beam_width,
alpha=alpha, decoding_len=decoding_len)
print("Output:\n" + 100 * '-')
print(tokenizer.decode(output))
'''
Prefix is: Insect farming is the practice of raising and breeding insects as livestock, also referred to as minilivestock or
micro stock. Insects may be farmed for the commodities
Output:
----------------------------------------------------------------------------------------------------
Insect farming is the practice of raising and breeding insects as livestock, also referred to as minilivestock or micro stock.
Insects may be farmed for the commodities they produce, such as honey, corn, sorghum, and other crops. In some cases, the
production of insects is a way to increase income for the owner or his family. This type of farming has been described as "an
economic system that benefits all people regardless of race, sex, or social status" (p. 9). A large number of farmers in North
America, Europe, and South America have used the method of farming for food production in order to feed their families and livestock.
The most common method of farming is by hand-cropping, which consists of cutting a hole in the ground and using a saw
'''
```
For more details of our work, please refer to our main [project repo](https://github.com/yxuansu/SimCTG).
## 5. Citation:
If you find our paper and resources useful, please kindly leave a star and cite our paper. Thanks!
```bibtex
@article{su2022contrastive,
title={A Contrastive Framework for Neural Text Generation},
author={Su, Yixuan and Lan, Tian and Wang, Yan and Yogatama, Dani and Kong, Lingpeng and Collier, Nigel},
journal={arXiv preprint arXiv:2202.06417},
year={2022}
}
```
| {} | cambridgeltl/simctg_english_wikipedia | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"arxiv:2202.06417",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text-generation | transformers | This model provides a Chinese GPT-2 language model trained with SimCTG on the LCCC benchmark [(Wang et al., 2020)](https://arxiv.org/pdf/2008.03946v2.pdf) based on our paper [_A Contrastive Framework for Neural Text Generation_](https://arxiv.org/abs/2202.06417).
We provide a detailed tutorial on how to apply SimCTG and Contrastive Search in our [project repo](https://github.com/yxuansu/SimCTG#4-huggingface-style-tutorials-back-to-top). In the following, we illustrate a brief tutorial on how to use our approach to perform text generation.
## 1. Installation of SimCTG:
```yaml
pip install simctg --upgrade
```
## 2. Initialize SimCTG Model:
```python
import torch
# load SimCTG language model
from simctg.simctggpt import SimCTGGPT
model_name = r'cambridgeltl/simctg_lccc_dialogue'
model = SimCTGGPT(model_name)
model.eval()
tokenizer = model.tokenizer
eos_token = '[SEP]'
eos_token_id = tokenizer.convert_tokens_to_ids([eos_token])[0]
```
## 3. Prepare the Text Prefix:
```python
context_list = ['刺猬很可爱!以前别人送了只没养,味儿太大!', '是很可爱但是非常臭', '是啊,没办法养', '那个怎么养哦不会扎手吗']
prefix_text = eos_token.join(context_list).strip(eos_token) + eos_token
print ('Prefix is: {}'.format(prefix_text))
tokens = tokenizer.tokenize(prefix_text)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = torch.LongTensor(input_ids).view(1,-1)
```
## 4. Generate Text with Contrastive Search:
```python
beam_width, alpha, decoding_len = 5, 0.6, 64
output = model.fast_contrastive_search(input_ids=input_ids, beam_width=beam_width, alpha=alpha,
decoding_len=decoding_len, end_of_sequence_token_id=eos_token_id,
early_stop=True)
print("Output:\n" + 100 * '-')
print(''.join(tokenizer.decode(output)))
'''
Prefix is: 刺猬很可爱!以前别人送了只没养,味儿太大![SEP]是很可爱但是非常臭[SEP]是啊,没办法养[SEP]那个怎么养哦不会扎手吗[SEP]
Output:
----------------------------------------------------------------------------------------------------
刺猬很可爱!以前别人送了只没养,味儿太大![SEP]是很可爱但是非常臭[SEP]是啊,没办法养[SEP]那个怎么养哦不会扎手吗[SEP]我觉得还好,就是有点臭
'''
```
For more details of our work, please refer to our main [project repo](https://github.com/yxuansu/SimCTG).
## 5. Citation:
If you find our paper and resources useful, please kindly leave a star and cite our paper. Thanks!
```bibtex
@article{su2022contrastive,
title={A Contrastive Framework for Neural Text Generation},
author={Su, Yixuan and Lan, Tian and Wang, Yan and Yogatama, Dani and Kong, Lingpeng and Collier, Nigel},
journal={arXiv preprint arXiv:2202.06417},
year={2022}
}
```
| {} | cambridgeltl/simctg_lccc_dialogue | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"arxiv:2008.03946",
"arxiv:2202.06417",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text-generation | transformers | This model provides a GPT-2 language model trained with SimCTG on the Wikitext-103 benchmark [(Merity et al., 2016)](https://arxiv.org/abs/1609.07843) based on our paper [_A Contrastive Framework for Neural Text Generation_](https://arxiv.org/abs/2202.06417).
We provide a detailed tutorial on how to apply SimCTG and Contrastive Search in our [project repo](https://github.com/yxuansu/SimCTG#4-huggingface-style-tutorials-back-to-top). In the following, we illustrate a brief tutorial on how to use our approach to perform text generation.
## 1. Installation of SimCTG:
```yaml
pip install simctg --upgrade
```
## 2. Initialize SimCTG Model:
```python
import torch
# load SimCTG language model
from simctg.simctggpt import SimCTGGPT
model_name = r'cambridgeltl/simctg_wikitext103'
model = SimCTGGPT(model_name)
model.eval()
tokenizer = model.tokenizer
```
## 3. Prepare the Text Prefix:
```python
prefix_text = r"Butt criticized Donald 's controls in certain situations in the game , as well as the difficulty of some levels and puzzles .
Buchanan also criticized the controls , calling"
print ('Prefix is: {}'.format(prefix_text))
tokens = tokenizer.tokenize(prefix_text)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = torch.LongTensor(input_ids).view(1,-1)
```
## 4. Generate Text with Contrastive Search:
```python
beam_width, alpha, decoding_len = 8, 0.6, 128
output = model.fast_contrastive_search(input_ids=input_ids, beam_width=beam_width,
alpha=alpha, decoding_len=decoding_len)
print("Output:\n" + 100 * '-')
print(tokenizer.decode(output))
'''
Prefix is: Butt criticized Donald 's controls in certain situations in the game , as well as the difficulty of some levels and puzzles .
Buchanan also criticized the controls , calling
Output:
----------------------------------------------------------------------------------------------------
Butt criticized Donald's controls in certain situations in the game, as well as the difficulty of some levels and puzzles. Buchanan also
criticized the controls, calling them " unimpressive " and a " nightmare " of an experience to play with players unfamiliar with Tetris.
On the other hand, his opinion was shared by other reviewers, and some were critical of the game's technical design for the Wii version
of Tetris. In addition, Tintin's review included a quote from Roger Ebert, who said that Tetris was better than the original game due to
its simplicity and ease of play. Ebert's comments were included in the game's DVD commentary, released on March 22, 2010. It is unclear
if any of the video commentary was taken from the DVD
'''
```
For more details of our work, please refer to our main [project repo](https://github.com/yxuansu/SimCTG).
## 5. Citation:
If you find our paper and resources useful, please kindly leave a star and cite our paper. Thanks!
```bibtex
@article{su2022contrastive,
title={A Contrastive Framework for Neural Text Generation},
author={Su, Yixuan and Lan, Tian and Wang, Yan and Yogatama, Dani and Kong, Lingpeng and Collier, Nigel},
journal={arXiv preprint arXiv:2202.06417},
year={2022}
}
```
| {} | cambridgeltl/simctg_wikitext103 | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"arxiv:1609.07843",
"arxiv:2202.06417",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
feature-extraction | transformers | {} | cambridgeltl/tacl-bert-base-chinese | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | cambridgeltl/tacl-bert-base-uncased | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | ---
language: en
tags:
- sentence-embeddings
- sentence-similarity
- dual-encoder
### cambridgeltl/trans-encoder-bi-simcse-bert-base
An unsupervised sentence encoder (bi-encoder) proposed by [Liu et al. (2021)](https://arxiv.org/pdf/2109.13059.pdf). The model is trained with unlabelled sentence pairs sampled from STS2012-2016, STS-b, and SICK-R, using [princeton-nlp/unsup-simcse-bert-base-uncased](https://huggingface.co/princeton-nlp/unsup-simcse-bert-base-uncased) as the base model. Please use `[CLS]` (before pooler) as the representation of the input.
### Citation
```bibtex
@article{liu2021trans,
title={Trans-Encoder: Unsupervised sentence-pair modelling through self-and mutual-distillations},
author={Liu, Fangyu and Jiao, Yunlong and Massiah, Jordan and Yilmaz, Emine and Havrylov, Serhii},
journal={arXiv preprint arXiv:2109.13059},
year={2021}
}
```
| {} | cambridgeltl/trans-encoder-bi-simcse-bert-base | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"arxiv:2109.13059",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
feature-extraction | transformers | ---
language: en
tags:
- sentence-embeddings
- sentence-similarity
- dual-encoder
### cambridgeltl/trans-encoder-bi-simcse-bert-large
An unsupervised sentence encoder (bi-encoder) proposed by [Liu et al. (2021)](https://arxiv.org/pdf/2109.13059.pdf). The model is trained with unlabelled sentence pairs sampled from STS2012-2016, STS-b, and SICK-R, using [princeton-nlp/unsup-simcse-bert-large-uncased](https://huggingface.co/princeton-nlp/unsup-simcse-bert-large-uncased) as the base model. Please use `[CLS]` (before pooler) as the representation of the input.
### Citation
```bibtex
@article{liu2021trans,
title={Trans-Encoder: Unsupervised sentence-pair modelling through self-and mutual-distillations},
author={Liu, Fangyu and Jiao, Yunlong and Massiah, Jordan and Yilmaz, Emine and Havrylov, Serhii},
journal={arXiv preprint arXiv:2109.13059},
year={2021}
}
```
| {} | cambridgeltl/trans-encoder-bi-simcse-bert-large | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"arxiv:2109.13059",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
feature-extraction | transformers | ---
language: en
tags:
- sentence-embeddings
- sentence-similarity
- dual-encoder
### cambridgeltl/trans-encoder-bi-simcse-roberta-base
An unsupervised sentence encoder (bi-encoder) proposed by [Liu et al. (2021)](https://arxiv.org/pdf/2109.13059.pdf). The model is trained with unlabelled sentence pairs sampled from STS2012-2016, STS-b, and SICK-R, using [princeton-nlp/unsup-simcse-roberta-base](https://huggingface.co/princeton-nlp/unsup-simcse-roberta-base) as the base model. Please use `[CLS]` (before pooler) as the representation of the input.
### Citation
```bibtex
@article{liu2021trans,
title={Trans-Encoder: Unsupervised sentence-pair modelling through self-and mutual-distillations},
author={Liu, Fangyu and Jiao, Yunlong and Massiah, Jordan and Yilmaz, Emine and Havrylov, Serhii},
journal={arXiv preprint arXiv:2109.13059},
year={2021}
}
```
| {} | cambridgeltl/trans-encoder-bi-simcse-roberta-base | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"arxiv:2109.13059",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
feature-extraction | transformers | ---
language: en
tags:
- sentence-embeddings
- sentence-similarity
- dual-encoder
### cambridgeltl/trans-encoder-bi-simcse-roberta-large
An unsupervised sentence encoder (bi-encoder) proposed by [Liu et al. (2021)](https://arxiv.org/pdf/2109.13059.pdf). The model is trained with unlabelled sentence pairs sampled from STS2012-2016, STS-b, and SICK-R, using [princeton-nlp/unsup-simcse-roberta-large](https://huggingface.co/princeton-nlp/unsup-simcse-roberta-large) as the base model. Please use `[CLS]` (before pooler) as the representation of the input.
### Citation
```bibtex
@article{liu2021trans,
title={Trans-Encoder: Unsupervised sentence-pair modelling through self-and mutual-distillations},
author={Liu, Fangyu and Jiao, Yunlong and Massiah, Jordan and Yilmaz, Emine and Havrylov, Serhii},
journal={arXiv preprint arXiv:2109.13059},
year={2021}
}
```
| {} | cambridgeltl/trans-encoder-bi-simcse-roberta-large | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"arxiv:2109.13059",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text-classification | transformers | {} | cambridgeltl/trans-encoder-cross-simcse-bert-base | null | [
"transformers",
"pytorch",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | cambridgeltl/trans-encoder-cross-simcse-bert-large | null | [
"transformers",
"pytorch",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | cambridgeltl/trans-encoder-cross-simcse-roberta-base | null | [
"transformers",
"pytorch",
"roberta",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | cambridgeltl/trans-encoder-cross-simcse-roberta-large | null | [
"transformers",
"pytorch",
"roberta",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-ar-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-aym-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-bg-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-bzd-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-cni-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-de-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-el-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-en-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-es-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-fr-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-gn-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-hch-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-hi-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-ko-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-nah-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-oto-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-quy-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-ro-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-ru-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-shp-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-swa-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-tar-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-th-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-tr-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-ur-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-vi-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-lang-sft-zh-small | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-task-sft-nli-ms | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-task-sft-nli | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | cambridgeltl/xlmr-task-sft-squadv1-ms | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers |
# CamemBERT: a Tasty French Language Model
## Introduction
[CamemBERT](https://arxiv.org/abs/1911.03894) is a state-of-the-art language model for French based on the RoBERTa model.
It is now available on Hugging Face in 6 different versions with varying number of parameters, amount of pretraining data and pretraining data source domains.
For further information or requests, please go to [Camembert Website](https://camembert-model.fr/)
## Pre-trained models
| Model | #params | Arch. | Training data |
|--------------------------------|--------------------------------|-------|-----------------------------------|
| `camembert-base` | 110M | Base | OSCAR (138 GB of text) |
| `camembert/camembert-large` | 335M | Large | CCNet (135 GB of text) |
| `camembert/camembert-base-ccnet` | 110M | Base | CCNet (135 GB of text) |
| `camembert/camembert-base-wikipedia-4gb` | 110M | Base | Wikipedia (4 GB of text) |
| `camembert/camembert-base-oscar-4gb` | 110M | Base | Subsample of OSCAR (4 GB of text) |
| `camembert/camembert-base-ccnet-4gb` | 110M | Base | Subsample of CCNet (4 GB of text) |
## How to use CamemBERT with HuggingFace
##### Load CamemBERT and its sub-word tokenizer :
```python
from transformers import CamembertModel, CamembertTokenizer
# You can replace "camembert-base" with any other model from the table, e.g. "camembert/camembert-large".
tokenizer = CamembertTokenizer.from_pretrained("camembert/camembert-base-ccnet-4gb")
camembert = CamembertModel.from_pretrained("camembert/camembert-base-ccnet-4gb")
camembert.eval() # disable dropout (or leave in train mode to finetune)
```
##### Filling masks using pipeline
```python
from transformers import pipeline
camembert_fill_mask = pipeline("fill-mask", model="camembert/camembert-base-ccnet-4gb", tokenizer="camembert/camembert-base-ccnet-4gb")
results = camembert_fill_mask("Le camembert est-il <mask> ?")
# results
#[{'sequence': '<s> Le camembert est-il sain?</s>', 'score': 0.07001790404319763, 'token': 10286},
#{'sequence': '<s> Le camembert est-il français?</s>', 'score': 0.057594332844018936, 'token': 384},
#{'sequence': '<s> Le camembert est-il bon?</s>', 'score': 0.04098724573850632, 'token': 305},
#{'sequence': '<s> Le camembert est-il périmé?</s>', 'score': 0.03486393392086029, 'token': 30862},
#{'sequence': '<s> Le camembert est-il cher?</s>', 'score': 0.021535946056246758, 'token': 1604}]
```
##### Extract contextual embedding features from Camembert output
```python
import torch
# Tokenize in sub-words with SentencePiece
tokenized_sentence = tokenizer.tokenize("J'aime le camembert !")
# ['▁J', "'", 'aime', '▁le', '▁ca', 'member', 't', '▁!']
# 1-hot encode and add special starting and end tokens
encoded_sentence = tokenizer.encode(tokenized_sentence)
# [5, 133, 22, 1250, 16, 12034, 14324, 81, 76, 6]
# NB: Can be done in one step : tokenize.encode("J'aime le camembert !")
# Feed tokens to Camembert as a torch tensor (batch dim 1)
encoded_sentence = torch.tensor(encoded_sentence).unsqueeze(0)
embeddings, _ = camembert(encoded_sentence)
# embeddings.detach()
# embeddings.size torch.Size([1, 10, 768])
#tensor([[[ 0.0331, 0.0095, -0.2776, ..., 0.2875, -0.0827, -0.2467],
# [-0.1348, 0.0478, -0.5409, ..., 0.8330, 0.0467, 0.0662],
# [ 0.0920, -0.0264, 0.0177, ..., 0.1112, 0.0108, -0.1123],
# ...,
```
##### Extract contextual embedding features from all Camembert layers
```python
from transformers import CamembertConfig
# (Need to reload the model with new config)
config = CamembertConfig.from_pretrained("camembert/camembert-base-ccnet-4gb", output_hidden_states=True)
camembert = CamembertModel.from_pretrained("camembert/camembert-base-ccnet-4gb", config=config)
embeddings, _, all_layer_embeddings = camembert(encoded_sentence)
# all_layer_embeddings list of len(all_layer_embeddings) == 13 (input embedding layer + 12 self attention layers)
all_layer_embeddings[5]
# layer 5 contextual embedding : size torch.Size([1, 10, 768])
#tensor([[[-0.0144, 0.1855, 0.4895, ..., -0.1537, 0.0107, -0.2293],
# [-0.6664, -0.0880, -0.1539, ..., 0.3635, 0.4047, 0.1258],
# [ 0.0511, 0.0540, 0.2545, ..., 0.0709, -0.0288, -0.0779],
# ...,
```
## Authors
CamemBERT was trained and evaluated by Louis Martin\*, Benjamin Muller\*, Pedro Javier Ortiz Suárez\*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
## Citation
If you use our work, please cite:
```bibtex
@inproceedings{martin2020camembert,
title={CamemBERT: a Tasty French Language Model},
author={Martin, Louis and Muller, Benjamin and Su{\'a}rez, Pedro Javier Ortiz and Dupont, Yoann and Romary, Laurent and de la Clergerie, {\'E}ric Villemonte and Seddah, Djam{\'e} and Sagot, Beno{\^\i}t},
booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
year={2020}
}
```
| {"language": "fr"} | almanach/camembert-base-ccnet-4gb | null | [
"transformers",
"pytorch",
"camembert",
"fr",
"arxiv:1911.03894",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | transformers |
# CamemBERT: a Tasty French Language Model
## Introduction
[CamemBERT](https://arxiv.org/abs/1911.03894) is a state-of-the-art language model for French based on the RoBERTa model.
It is now available on Hugging Face in 6 different versions with varying number of parameters, amount of pretraining data and pretraining data source domains.
For further information or requests, please go to [Camembert Website](https://camembert-model.fr/)
## Pre-trained models
| Model | #params | Arch. | Training data |
|--------------------------------|--------------------------------|-------|-----------------------------------|
| `camembert-base` | 110M | Base | OSCAR (138 GB of text) |
| `camembert/camembert-large` | 335M | Large | CCNet (135 GB of text) |
| `camembert/camembert-base-ccnet` | 110M | Base | CCNet (135 GB of text) |
| `camembert/camembert-base-wikipedia-4gb` | 110M | Base | Wikipedia (4 GB of text) |
| `camembert/camembert-base-oscar-4gb` | 110M | Base | Subsample of OSCAR (4 GB of text) |
| `camembert/camembert-base-ccnet-4gb` | 110M | Base | Subsample of CCNet (4 GB of text) |
## How to use CamemBERT with HuggingFace
##### Load CamemBERT and its sub-word tokenizer :
```python
from transformers import CamembertModel, CamembertTokenizer
# You can replace "camembert-base" with any other model from the table, e.g. "camembert/camembert-large".
tokenizer = CamembertTokenizer.from_pretrained("camembert/camembert-base-ccnet")
camembert = CamembertModel.from_pretrained("camembert/camembert-base-ccnet")
camembert.eval() # disable dropout (or leave in train mode to finetune)
```
##### Filling masks using pipeline
```python
from transformers import pipeline
camembert_fill_mask = pipeline("fill-mask", model="camembert/camembert-base-ccnet", tokenizer="camembert/camembert-base-ccnet")
results = camembert_fill_mask("Le camembert est <mask> :)")
# results
#[{'sequence': '<s> Le camembert est bon :)</s>', 'score': 0.14011502265930176, 'token': 305},
# {'sequence': '<s> Le camembert est délicieux :)</s>', 'score': 0.13929404318332672, 'token': 11661},
# {'sequence': '<s> Le camembert est excellent :)</s>', 'score': 0.07010319083929062, 'token': 3497},
# {'sequence': '<s> Le camembert est parfait :)</s>', 'score': 0.025885622948408127, 'token': 2528},
# {'sequence': '<s> Le camembert est top :)</s>', 'score': 0.025684962049126625, 'token': 2328}]
```
##### Extract contextual embedding features from Camembert output
```python
import torch
# Tokenize in sub-words with SentencePiece
tokenized_sentence = tokenizer.tokenize("J'aime le camembert !")
# ['▁J', "'", 'aime', '▁le', '▁cam', 'ember', 't', '▁!']
# 1-hot encode and add special starting and end tokens
encoded_sentence = tokenizer.encode(tokenized_sentence)
# [5, 133, 22, 1250, 16, 12034, 14324, 81, 76, 6]
# NB: Can be done in one step : tokenize.encode("J'aime le camembert !")
# Feed tokens to Camembert as a torch tensor (batch dim 1)
encoded_sentence = torch.tensor(encoded_sentence).unsqueeze(0)
embeddings, _ = camembert(encoded_sentence)
# embeddings.detach()
# embeddings.size torch.Size([1, 10, 768])
#tensor([[[ 0.0667, -0.2467, 0.0954, ..., 0.2144, 0.0279, 0.3621],
# [-0.0472, 0.4092, -0.6602, ..., 0.2095, 0.1391, -0.0401],
# [ 0.1911, -0.2347, -0.0811, ..., 0.4306, -0.0639, 0.1821],
# ...,
```
##### Extract contextual embedding features from all Camembert layers
```python
from transformers import CamembertConfig
# (Need to reload the model with new config)
config = CamembertConfig.from_pretrained("camembert/camembert-base-ccnet", output_hidden_states=True)
camembert = CamembertModel.from_pretrained("camembert/camembert-base-ccnet", config=config)
embeddings, _, all_layer_embeddings = camembert(encoded_sentence)
# all_layer_embeddings list of len(all_layer_embeddings) == 13 (input embedding layer + 12 self attention layers)
all_layer_embeddings[5]
# layer 5 contextual embedding : size torch.Size([1, 10, 768])
#tensor([[[ 0.0057, -0.1022, 0.0163, ..., -0.0675, -0.0360, 0.1078],
# [-0.1096, -0.3344, -0.0593, ..., 0.1625, -0.0432, -0.1646],
# [ 0.3751, -0.3829, 0.0844, ..., 0.1067, -0.0330, 0.3334],
# ...,
```
## Authors
CamemBERT was trained and evaluated by Louis Martin\*, Benjamin Muller\*, Pedro Javier Ortiz Suárez\*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
## Citation
If you use our work, please cite:
```bibtex
@inproceedings{martin2020camembert,
title={CamemBERT: a Tasty French Language Model},
author={Martin, Louis and Muller, Benjamin and Su{\'a}rez, Pedro Javier Ortiz and Dupont, Yoann and Romary, Laurent and de la Clergerie, {\'E}ric Villemonte and Seddah, Djam{\'e} and Sagot, Beno{\^\i}t},
booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
year={2020}
}
```
| {"language": "fr"} | almanach/camembert-base-ccnet | null | [
"transformers",
"pytorch",
"camembert",
"fr",
"arxiv:1911.03894",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | transformers |
# CamemBERT: a Tasty French Language Model
## Introduction
[CamemBERT](https://arxiv.org/abs/1911.03894) is a state-of-the-art language model for French based on the RoBERTa model.
It is now available on Hugging Face in 6 different versions with varying number of parameters, amount of pretraining data and pretraining data source domains.
For further information or requests, please go to [Camembert Website](https://camembert-model.fr/)
## Pre-trained models
| Model | #params | Arch. | Training data |
|--------------------------------|--------------------------------|-------|-----------------------------------|
| `camembert-base` | 110M | Base | OSCAR (138 GB of text) |
| `camembert/camembert-large` | 335M | Large | CCNet (135 GB of text) |
| `camembert/camembert-base-ccnet` | 110M | Base | CCNet (135 GB of text) |
| `camembert/camembert-base-wikipedia-4gb` | 110M | Base | Wikipedia (4 GB of text) |
| `camembert/camembert-base-oscar-4gb` | 110M | Base | Subsample of OSCAR (4 GB of text) |
| `camembert/camembert-base-ccnet-4gb` | 110M | Base | Subsample of CCNet (4 GB of text) |
## How to use CamemBERT with HuggingFace
##### Load CamemBERT and its sub-word tokenizer :
```python
from transformers import CamembertModel, CamembertTokenizer
# You can replace "camembert-base" with any other model from the table, e.g. "camembert/camembert-large".
tokenizer = CamembertTokenizer.from_pretrained("camembert/camembert-base-oscar-4gb")
camembert = CamembertModel.from_pretrained("camembert/camembert-base-oscar-4gb")
camembert.eval() # disable dropout (or leave in train mode to finetune)
```
##### Filling masks using pipeline
```python
from transformers import pipeline
camembert_fill_mask = pipeline("fill-mask", model="camembert/camembert-base-oscar-4gb", tokenizer="camembert/camembert-base-oscar-4gb")
>>> results = camembert_fill_mask("Le camembert est <mask> !")
# results
#[{'sequence': '<s> Le camembert est parfait!</s>', 'score': 0.04089554399251938, 'token': 1654},
#{'sequence': '<s> Le camembert est délicieux!</s>', 'score': 0.037193264812231064, 'token': 7200},
#{'sequence': '<s> Le camembert est prêt!</s>', 'score': 0.025467922911047935, 'token': 1415},
#{'sequence': '<s> Le camembert est meilleur!</s>', 'score': 0.022812040522694588, 'token': 528},
#{'sequence': '<s> Le camembert est différent!</s>', 'score': 0.017135459929704666, 'token': 2935}]
```
##### Extract contextual embedding features from Camembert output
```python
import torch
# Tokenize in sub-words with SentencePiece
tokenized_sentence = tokenizer.tokenize("J'aime le camembert !")
# ['▁J', "'", 'aime', '▁le', '▁ca', 'member', 't', '▁!']
# 1-hot encode and add special starting and end tokens
encoded_sentence = tokenizer.encode(tokenized_sentence)
# [5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]
# NB: Can be done in one step : tokenize.encode("J'aime le camembert !")
# Feed tokens to Camembert as a torch tensor (batch dim 1)
encoded_sentence = torch.tensor(encoded_sentence).unsqueeze(0)
embeddings, _ = camembert(encoded_sentence)
# embeddings.detach()
# embeddings.size torch.Size([1, 10, 768])
#tensor([[[-0.1120, -0.1464, 0.0181, ..., -0.1723, -0.0278, 0.1606],
# [ 0.1234, 0.1202, -0.0773, ..., -0.0405, -0.0668, -0.0788],
# [-0.0440, 0.0480, -0.1926, ..., 0.1066, -0.0961, 0.0637],
# ...,
```
##### Extract contextual embedding features from all Camembert layers
```python
from transformers import CamembertConfig
# (Need to reload the model with new config)
config = CamembertConfig.from_pretrained("camembert/camembert-base-oscar-4gb", output_hidden_states=True)
camembert = CamembertModel.from_pretrained("camembert/camembert-base-oscar-4gb", config=config)
embeddings, _, all_layer_embeddings = camembert(encoded_sentence)
# all_layer_embeddings list of len(all_layer_embeddings) == 13 (input embedding layer + 12 self attention layers)
all_layer_embeddings[5]
# layer 5 contextual embedding : size torch.Size([1, 10, 768])
#tensor([[[-0.1584, -0.1207, -0.0179, ..., 0.5457, 0.1491, -0.1191],
# [-0.1122, 0.3634, 0.0676, ..., 0.4395, -0.0470, -0.3781],
# [-0.2232, 0.0019, 0.0140, ..., 0.4461, -0.0233, 0.0735],
# ...,
```
## Authors
CamemBERT was trained and evaluated by Louis Martin\*, Benjamin Muller\*, Pedro Javier Ortiz Suárez\*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
## Citation
If you use our work, please cite:
```bibtex
@inproceedings{martin2020camembert,
title={CamemBERT: a Tasty French Language Model},
author={Martin, Louis and Muller, Benjamin and Su{\'a}rez, Pedro Javier Ortiz and Dupont, Yoann and Romary, Laurent and de la Clergerie, {\'E}ric Villemonte and Seddah, Djam{\'e} and Sagot, Beno{\^\i}t},
booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
year={2020}
}
```
| {"language": "fr"} | almanach/camembert-base-oscar-4gb | null | [
"transformers",
"pytorch",
"camembert",
"fr",
"arxiv:1911.03894",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | transformers |
# CamemBERT: a Tasty French Language Model
## Introduction
[CamemBERT](https://arxiv.org/abs/1911.03894) is a state-of-the-art language model for French based on the RoBERTa model.
It is now available on Hugging Face in 6 different versions with varying number of parameters, amount of pretraining data and pretraining data source domains.
For further information or requests, please go to [Camembert Website](https://camembert-model.fr/)
## Pre-trained models
| Model | #params | Arch. | Training data |
|--------------------------------|--------------------------------|-------|-----------------------------------|
| `camembert-base` | 110M | Base | OSCAR (138 GB of text) |
| `camembert/camembert-large` | 335M | Large | CCNet (135 GB of text) |
| `camembert/camembert-base-ccnet` | 110M | Base | CCNet (135 GB of text) |
| `camembert/camembert-base-wikipedia-4gb` | 110M | Base | Wikipedia (4 GB of text) |
| `camembert/camembert-base-oscar-4gb` | 110M | Base | Subsample of OSCAR (4 GB of text) |
| `camembert/camembert-base-ccnet-4gb` | 110M | Base | Subsample of CCNet (4 GB of text) |
## How to use CamemBERT with HuggingFace
##### Load CamemBERT and its sub-word tokenizer :
```python
from transformers import CamembertModel, CamembertTokenizer
# You can replace "camembert-base" with any other model from the table, e.g. "camembert/camembert-large".
tokenizer = CamembertTokenizer.from_pretrained("camembert/camembert-base-wikipedia-4gb")
camembert = CamembertModel.from_pretrained("camembert/camembert-base-wikipedia-4gb")
camembert.eval() # disable dropout (or leave in train mode to finetune)
```
##### Filling masks using pipeline
```python
from transformers import pipeline
camembert_fill_mask = pipeline("fill-mask", model="camembert/camembert-base-wikipedia-4gb", tokenizer="camembert/camembert-base-wikipedia-4gb")
results = camembert_fill_mask("Le camembert est un fromage de <mask>!")
# results
#[{'sequence': '<s> Le camembert est un fromage de chèvre!</s>', 'score': 0.4937814474105835, 'token': 19370},
#{'sequence': '<s> Le camembert est un fromage de brebis!</s>', 'score': 0.06255942583084106, 'token': 30616},
#{'sequence': '<s> Le camembert est un fromage de montagne!</s>', 'score': 0.04340197145938873, 'token': 2364},
# {'sequence': '<s> Le camembert est un fromage de Noël!</s>', 'score': 0.02823255956172943, 'token': 3236},
#{'sequence': '<s> Le camembert est un fromage de vache!</s>', 'score': 0.021357402205467224, 'token': 12329}]
```
##### Extract contextual embedding features from Camembert output
```python
import torch
# Tokenize in sub-words with SentencePiece
tokenized_sentence = tokenizer.tokenize("J'aime le camembert !")
# ['▁J', "'", 'aime', '▁le', '▁ca', 'member', 't', '▁!']
# 1-hot encode and add special starting and end tokens
encoded_sentence = tokenizer.encode(tokenized_sentence)
# [5, 221, 10, 10600, 14, 8952, 10540, 75, 1114, 6]
# NB: Can be done in one step : tokenize.encode("J'aime le camembert !")
# Feed tokens to Camembert as a torch tensor (batch dim 1)
encoded_sentence = torch.tensor(encoded_sentence).unsqueeze(0)
embeddings, _ = camembert(encoded_sentence)
# embeddings.detach()
# embeddings.size torch.Size([1, 10, 768])
#tensor([[[-0.0928, 0.0506, -0.0094, ..., -0.2388, 0.1177, -0.1302],
# [ 0.0662, 0.1030, -0.2355, ..., -0.4224, -0.0574, -0.2802],
# [-0.0729, 0.0547, 0.0192, ..., -0.1743, 0.0998, -0.2677],
# ...,
```
##### Extract contextual embedding features from all Camembert layers
```python
from transformers import CamembertConfig
# (Need to reload the model with new config)
config = CamembertConfig.from_pretrained("camembert/camembert-base-wikipedia-4gb", output_hidden_states=True)
camembert = CamembertModel.from_pretrained("camembert/camembert-base-wikipedia-4gb", config=config)
embeddings, _, all_layer_embeddings = camembert(encoded_sentence)
# all_layer_embeddings list of len(all_layer_embeddings) == 13 (input embedding layer + 12 self attention layers)
all_layer_embeddings[5]
# layer 5 contextual embedding : size torch.Size([1, 10, 768])
#tensor([[[-0.0059, -0.0227, 0.0065, ..., -0.0770, 0.0369, 0.0095],
# [ 0.2838, -0.1531, -0.3642, ..., -0.0027, -0.8502, -0.7914],
# [-0.0073, -0.0338, -0.0011, ..., 0.0533, -0.0250, -0.0061],
# ...,
```
## Authors
CamemBERT was trained and evaluated by Louis Martin\*, Benjamin Muller\*, Pedro Javier Ortiz Suárez\*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
## Citation
If you use our work, please cite:
```bibtex
@inproceedings{martin2020camembert,
title={CamemBERT: a Tasty French Language Model},
author={Martin, Louis and Muller, Benjamin and Su{\'a}rez, Pedro Javier Ortiz and Dupont, Yoann and Romary, Laurent and de la Clergerie, {\'E}ric Villemonte and Seddah, Djam{\'e} and Sagot, Beno{\^\i}t},
booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
year={2020}
}
```
| {"language": "fr"} | almanach/camembert-base-wikipedia-4gb | null | [
"transformers",
"pytorch",
"camembert",
"fr",
"arxiv:1911.03894",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
fill-mask | transformers |
> 🚨 **Update:** This checkpoint is deprecated, please use https://huggingface.co/almanach/camembert-base instead 🚨
# CamemBERT: a Tasty French Language Model
## Introduction
[CamemBERT](https://arxiv.org/abs/1911.03894) is a state-of-the-art language model for French based on the RoBERTa model.
It is now available on Hugging Face in 6 different versions with varying number of parameters, amount of pretraining data and pretraining data source domains.
For further information or requests, please go to [Camembert Website](https://camembert-model.fr/)
## Pre-trained models
| Model | #params | Arch. | Training data |
|--------------------------------|--------------------------------|-------|-----------------------------------|
| `camembert-base` | 110M | Base | OSCAR (138 GB of text) |
| `camembert/camembert-large` | 335M | Large | CCNet (135 GB of text) |
| `camembert/camembert-base-ccnet` | 110M | Base | CCNet (135 GB of text) |
| `camembert/camembert-base-wikipedia-4gb` | 110M | Base | Wikipedia (4 GB of text) |
| `camembert/camembert-base-oscar-4gb` | 110M | Base | Subsample of OSCAR (4 GB of text) |
| `camembert/camembert-base-ccnet-4gb` | 110M | Base | Subsample of CCNet (4 GB of text) |
## How to use CamemBERT with HuggingFace
##### Load CamemBERT and its sub-word tokenizer :
```python
from transformers import CamembertModel, CamembertTokenizer
# You can replace "camembert-base" with any other model from the table, e.g. "camembert/camembert-large".
tokenizer = CamembertTokenizer.from_pretrained("camembert/camembert-base-wikipedia-4gb")
camembert = CamembertModel.from_pretrained("camembert/camembert-base-wikipedia-4gb")
camembert.eval() # disable dropout (or leave in train mode to finetune)
```
##### Filling masks using pipeline
```python
from transformers import pipeline
camembert_fill_mask = pipeline("fill-mask", model="camembert/camembert-base-wikipedia-4gb", tokenizer="camembert/camembert-base-wikipedia-4gb")
results = camembert_fill_mask("Le camembert est un fromage de <mask>!")
# results
#[{'sequence': '<s> Le camembert est un fromage de chèvre!</s>', 'score': 0.4937814474105835, 'token': 19370},
#{'sequence': '<s> Le camembert est un fromage de brebis!</s>', 'score': 0.06255942583084106, 'token': 30616},
#{'sequence': '<s> Le camembert est un fromage de montagne!</s>', 'score': 0.04340197145938873, 'token': 2364},
# {'sequence': '<s> Le camembert est un fromage de Noël!</s>', 'score': 0.02823255956172943, 'token': 3236},
#{'sequence': '<s> Le camembert est un fromage de vache!</s>', 'score': 0.021357402205467224, 'token': 12329}]
```
##### Extract contextual embedding features from Camembert output
```python
import torch
# Tokenize in sub-words with SentencePiece
tokenized_sentence = tokenizer.tokenize("J'aime le camembert !")
# ['▁J', "'", 'aime', '▁le', '▁ca', 'member', 't', '▁!']
# 1-hot encode and add special starting and end tokens
encoded_sentence = tokenizer.encode(tokenized_sentence)
# [5, 221, 10, 10600, 14, 8952, 10540, 75, 1114, 6]
# NB: Can be done in one step : tokenize.encode("J'aime le camembert !")
# Feed tokens to Camembert as a torch tensor (batch dim 1)
encoded_sentence = torch.tensor(encoded_sentence).unsqueeze(0)
embeddings, _ = camembert(encoded_sentence)
# embeddings.detach()
# embeddings.size torch.Size([1, 10, 768])
#tensor([[[-0.0928, 0.0506, -0.0094, ..., -0.2388, 0.1177, -0.1302],
# [ 0.0662, 0.1030, -0.2355, ..., -0.4224, -0.0574, -0.2802],
# [-0.0729, 0.0547, 0.0192, ..., -0.1743, 0.0998, -0.2677],
# ...,
```
##### Extract contextual embedding features from all Camembert layers
```python
from transformers import CamembertConfig
# (Need to reload the model with new config)
config = CamembertConfig.from_pretrained("camembert/camembert-base-wikipedia-4gb", output_hidden_states=True)
camembert = CamembertModel.from_pretrained("camembert/camembert-base-wikipedia-4gb", config=config)
embeddings, _, all_layer_embeddings = camembert(encoded_sentence)
# all_layer_embeddings list of len(all_layer_embeddings) == 13 (input embedding layer + 12 self attention layers)
all_layer_embeddings[5]
# layer 5 contextual embedding : size torch.Size([1, 10, 768])
#tensor([[[-0.0059, -0.0227, 0.0065, ..., -0.0770, 0.0369, 0.0095],
# [ 0.2838, -0.1531, -0.3642, ..., -0.0027, -0.8502, -0.7914],
# [-0.0073, -0.0338, -0.0011, ..., 0.0533, -0.0250, -0.0061],
# ...,
```
## Authors
CamemBERT was trained and evaluated by Louis Martin\*, Benjamin Muller\*, Pedro Javier Ortiz Suárez\*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
## Citation
If you use our work, please cite:
```bibtex
@inproceedings{martin2020camembert,
title={CamemBERT: a Tasty French Language Model},
author={Martin, Louis and Muller, Benjamin and Su{\'a}rez, Pedro Javier Ortiz and Dupont, Yoann and Romary, Laurent and de la Clergerie, {\'E}ric Villemonte and Seddah, Djam{\'e} and Sagot, Beno{\^\i}t},
booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
year={2020}
}
```
| {"language": "fr"} | almanach/camembert-base-legacy | null | [
"transformers",
"pytorch",
"camembert",
"fill-mask",
"fr",
"arxiv:1911.03894",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | transformers |
# CamemBERT: a Tasty French Language Model
## Introduction
[CamemBERT](https://arxiv.org/abs/1911.03894) is a state-of-the-art language model for French based on the RoBERTa model.
It is now available on Hugging Face in 6 different versions with varying number of parameters, amount of pretraining data and pretraining data source domains.
For further information or requests, please go to [Camembert Website](https://camembert-model.fr/)
## Pre-trained models
| Model | #params | Arch. | Training data |
|--------------------------------|--------------------------------|-------|-----------------------------------|
| `camembert-base` | 110M | Base | OSCAR (138 GB of text) |
| `camembert/camembert-large` | 335M | Large | CCNet (135 GB of text) |
| `camembert/camembert-base-ccnet` | 110M | Base | CCNet (135 GB of text) |
| `camembert/camembert-base-wikipedia-4gb` | 110M | Base | Wikipedia (4 GB of text) |
| `camembert/camembert-base-oscar-4gb` | 110M | Base | Subsample of OSCAR (4 GB of text) |
| `camembert/camembert-base-ccnet-4gb` | 110M | Base | Subsample of CCNet (4 GB of text) |
## How to use CamemBERT with HuggingFace
##### Load CamemBERT and its sub-word tokenizer :
```python
from transformers import CamembertModel, CamembertTokenizer
# You can replace "camembert-base" with any other model from the table, e.g. "camembert/camembert-large".
tokenizer = CamembertTokenizer.from_pretrained("camembert/camembert-large")
camembert = CamembertModel.from_pretrained("camembert/camembert-large")
camembert.eval() # disable dropout (or leave in train mode to finetune)
```
##### Filling masks using pipeline
```python
from transformers import pipeline
camembert_fill_mask = pipeline("fill-mask", model="camembert/camembert-large", tokenizer="camembert/camembert-large")
results = camembert_fill_mask("Le camembert est <mask> :)")
# results
#[{'sequence': '<s> Le camembert est bon :)</s>', 'score': 0.15560828149318695, 'token': 305},
#{'sequence': '<s> Le camembert est excellent :)</s>', 'score': 0.06821336597204208, 'token': 3497},
#{'sequence': '<s> Le camembert est délicieux :)</s>', 'score': 0.060438305139541626, 'token': 11661},
#{'sequence': '<s> Le camembert est ici :)</s>', 'score': 0.02023460529744625, 'token': 373},
#{'sequence': '<s> Le camembert est meilleur :)</s>', 'score': 0.01778135634958744, 'token': 876}]
```
##### Extract contextual embedding features from Camembert output
```python
import torch
# Tokenize in sub-words with SentencePiece
tokenized_sentence = tokenizer.tokenize("J'aime le camembert !")
# ['▁J', "'", 'aime', '▁le', '▁cam', 'ember', 't', '▁!']
# 1-hot encode and add special starting and end tokens
encoded_sentence = tokenizer.encode(tokenized_sentence)
# [5, 133, 22, 1250, 16, 12034, 14324, 81, 76, 6]
# NB: Can be done in one step : tokenize.encode("J'aime le camembert !")
# Feed tokens to Camembert as a torch tensor (batch dim 1)
encoded_sentence = torch.tensor(encoded_sentence).unsqueeze(0)
embeddings, _ = camembert(encoded_sentence)
# embeddings.detach()
# torch.Size([1, 10, 1024])
#tensor([[[-0.1284, 0.2643, 0.4374, ..., 0.1627, 0.1308, -0.2305],
# [ 0.4576, -0.6345, -0.2029, ..., -0.1359, -0.2290, -0.6318],
# [ 0.0381, 0.0429, 0.5111, ..., -0.1177, -0.1913, -0.1121],
# ...,
```
##### Extract contextual embedding features from all Camembert layers
```python
from transformers import CamembertConfig
# (Need to reload the model with new config)
config = CamembertConfig.from_pretrained("camembert/camembert-large", output_hidden_states=True)
camembert = CamembertModel.from_pretrained("camembert/camembert-large", config=config)
embeddings, _, all_layer_embeddings = camembert(encoded_sentence)
# all_layer_embeddings list of len(all_layer_embeddings) == 25 (input embedding layer + 24 self attention layers)
all_layer_embeddings[5]
# layer 5 contextual embedding : size torch.Size([1, 10, 1024])
#tensor([[[-0.0600, 0.0742, 0.0332, ..., -0.0525, -0.0637, -0.0287],
# [ 0.0950, 0.2840, 0.1985, ..., 0.2073, -0.2172, -0.6321],
# [ 0.1381, 0.1872, 0.1614, ..., -0.0339, -0.2530, -0.1182],
# ...,
```
## Authors
CamemBERT was trained and evaluated by Louis Martin\*, Benjamin Muller\*, Pedro Javier Ortiz Suárez\*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
## Citation
If you use our work, please cite:
```bibtex
@inproceedings{martin2020camembert,
title={CamemBERT: a Tasty French Language Model},
author={Martin, Louis and Muller, Benjamin and Su{\'a}rez, Pedro Javier Ortiz and Dupont, Yoann and Romary, Laurent and de la Clergerie, {\'E}ric Villemonte and Seddah, Djam{\'e} and Sagot, Beno{\^\i}t},
booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
year={2020}
}
```
| {"language": "fr"} | almanach/camembert-large | null | [
"transformers",
"pytorch",
"camembert",
"fr",
"arxiv:1911.03894",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
feature-extraction | transformers | {} | camille/bert-base-pruned-voc-esw0.1-40000-en-de-cased | null | [
"transformers",
"pytorch",
"jax",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | camille/bert-base-pruned-voc-esw0.1-40000-en-fr-cased | null | [
"transformers",
"pytorch",
"jax",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | camille/bert-base-pruned-voc-esw0.3-40000-en-de-cased | null | [
"transformers",
"pytorch",
"jax",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | camille/bert-base-pruned-voc-esw0.3-40000-en-fr-cased | null | [
"transformers",
"pytorch",
"jax",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | camille/bert-base-pruned-voc-esw0.5-40000-en-de-cased | null | [
"transformers",
"pytorch",
"jax",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.