Update README.md
Browse files
README.md
CHANGED
@@ -37,6 +37,19 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
37 |
|
38 |
text = "El president de la generalitat va dir "
|
39 |
inputs = tokenizer(text, return_tensors="pt")
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
|
|
37 |
|
38 |
text = "El president de la generalitat va dir "
|
39 |
inputs = tokenizer(text, return_tensors="pt")
|
40 |
+
|
41 |
+
outputs = model.generate(
|
42 |
+
**inputs,
|
43 |
+
do_sample = True,
|
44 |
+
max_length=150,
|
45 |
+
temperature=0.7,
|
46 |
+
top_p=0.8,
|
47 |
+
top_k=1000,
|
48 |
+
no_repeat_ngram_size=2,
|
49 |
+
num_return_sequences=1
|
50 |
+
)
|
51 |
+
|
52 |
+
# Decodificar y mostrar resultado
|
53 |
+
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
54 |
|
55 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|