David
commited on
Commit
·
b6d9472
1
Parent(s):
01b9197
Update README.md
Browse files
README.md
CHANGED
@@ -27,8 +27,8 @@ The discriminator should therefore activate the logit corresponding to the fake
|
|
27 |
```python
|
28 |
from transformers import ElectraForPreTraining, ElectraTokenizerFast
|
29 |
|
30 |
-
discriminator = ElectraForPreTraining.from_pretrained("Recognai/
|
31 |
-
tokenizer = ElectraTokenizerFast.from_pretrained("Recognai/
|
32 |
|
33 |
sentence_with_fake_token = "Estamos desayunando pan rosa con tomate y aceite de oliva."
|
34 |
|
@@ -39,7 +39,7 @@ print("\t".join(tokenizer.tokenize(sentence_with_fake_token)))
|
|
39 |
print("\t".join(map(lambda x: str(x)[:4], logits[1:-1])))
|
40 |
"""Output:
|
41 |
Estamos desayun ##ando pan rosa con tomate y aceite de oliva .
|
42 |
-
-
|
43 |
"""
|
44 |
```
|
45 |
|
|
|
27 |
```python
|
28 |
from transformers import ElectraForPreTraining, ElectraTokenizerFast
|
29 |
|
30 |
+
discriminator = ElectraForPreTraining.from_pretrained("Recognai/selectra_medium")
|
31 |
+
tokenizer = ElectraTokenizerFast.from_pretrained("Recognai/selectra_medium")
|
32 |
|
33 |
sentence_with_fake_token = "Estamos desayunando pan rosa con tomate y aceite de oliva."
|
34 |
|
|
|
39 |
print("\t".join(map(lambda x: str(x)[:4], logits[1:-1])))
|
40 |
"""Output:
|
41 |
Estamos desayun ##ando pan rosa con tomate y aceite de oliva .
|
42 |
+
-2.2 -1.9 -6.4 -2.0 -0.6 -4.3 -3.2 -4.9 -5.5 -7.2 -4.5 -4.0
|
43 |
"""
|
44 |
```
|
45 |
|