Update README.md
Browse files
README.md
CHANGED
@@ -60,11 +60,36 @@ This model is not suitable for general-purpose conversation or tasks unrelated t
|
|
60 |
```python
|
61 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
62 |
|
|
|
63 |
tokenizer = AutoTokenizer.from_pretrained("Ellbendls/Qwen-2.5-3b-Quran")
|
64 |
model = AutoModelForCausalLM.from_pretrained("Ellbendls/Qwen-2.5-3b-Quran")
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
```
|
|
|
60 |
```python
|
61 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
62 |
|
63 |
+
# Load the tokenizer and model
|
64 |
tokenizer = AutoTokenizer.from_pretrained("Ellbendls/Qwen-2.5-3b-Quran")
|
65 |
model = AutoModelForCausalLM.from_pretrained("Ellbendls/Qwen-2.5-3b-Quran")
|
66 |
|
67 |
+
# Move the model to GPU
|
68 |
+
model.to("cuda")
|
69 |
+
|
70 |
+
# Define the input message
|
71 |
+
messages = [
|
72 |
+
{
|
73 |
+
"role": "user",
|
74 |
+
"content": "Tafsirkan ayat ini اِهْدِنَا الصِّرَاطَ الْمُسْتَقِيْمَۙ"
|
75 |
+
}
|
76 |
+
]
|
77 |
+
|
78 |
+
# Generate the prompt using the tokenizer
|
79 |
+
prompt = tokenizer.apply_chat_template(messages, tokenize=False,
|
80 |
+
add_generation_prompt=True)
|
81 |
+
|
82 |
+
# Tokenize the prompt and move inputs to GPU
|
83 |
+
inputs = tokenizer(prompt, return_tensors='pt', padding=True,
|
84 |
+
truncation=True).to("cuda")
|
85 |
+
|
86 |
+
# Generate the output using the model
|
87 |
+
outputs = model.generate(**inputs, max_length=150,
|
88 |
+
num_return_sequences=1)
|
89 |
+
|
90 |
+
# Decode the output
|
91 |
+
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
92 |
+
|
93 |
+
# Print the result
|
94 |
+
print(text.split("assistant")[1])
|
95 |
```
|