Update README.md
Browse files
README.md
CHANGED
@@ -106,30 +106,28 @@ Here is how to use this model:
|
|
106 |
|
107 |
```python
|
108 |
import torch
|
109 |
-
import
|
110 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
111 |
|
112 |
-
input_text = "
|
113 |
-
model = "projecte-aina/aguila-7b"
|
114 |
-
tokenizer = AutoTokenizer.from_pretrained(model)
|
115 |
|
116 |
-
|
|
|
|
|
117 |
"text-generation",
|
118 |
-
model=
|
119 |
tokenizer=tokenizer,
|
120 |
torch_dtype=torch.bfloat16,
|
121 |
trust_remote_code=True,
|
122 |
device_map="auto",
|
123 |
)
|
124 |
-
generation =
|
125 |
input_text,
|
126 |
-
max_length=200,
|
127 |
do_sample=True,
|
128 |
top_k=10,
|
129 |
eos_token_id=tokenizer.eos_token_id,
|
130 |
)
|
131 |
|
132 |
-
print(f"Result: {generation['generated_text']}")
|
133 |
```
|
134 |
|
135 |
## Limitations and bias
|
|
|
106 |
|
107 |
```python
|
108 |
import torch
|
109 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
|
|
110 |
|
111 |
+
input_text = "El mercat del barri és fantàstic, hi pots trobar"
|
|
|
|
|
112 |
|
113 |
+
model_id = "projecte-aina/aguila-7b"
|
114 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
115 |
+
generator = pipeline(
|
116 |
"text-generation",
|
117 |
+
model=model_id,
|
118 |
tokenizer=tokenizer,
|
119 |
torch_dtype=torch.bfloat16,
|
120 |
trust_remote_code=True,
|
121 |
device_map="auto",
|
122 |
)
|
123 |
+
generation = generator(
|
124 |
input_text,
|
|
|
125 |
do_sample=True,
|
126 |
top_k=10,
|
127 |
eos_token_id=tokenizer.eos_token_id,
|
128 |
)
|
129 |
|
130 |
+
print(f"Result: {generation[0]['generated_text']}")
|
131 |
```
|
132 |
|
133 |
## Limitations and bias
|