Update README.md
Browse files
README.md
CHANGED
@@ -48,13 +48,25 @@ import torch
|
|
48 |
model = "Solshine/Jais-590m-merged"
|
49 |
messages = [{"role": "user", "content": "What is a large language model?"}]
|
50 |
|
51 |
-
tokenizer = AutoTokenizer.from_pretrained(model)
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
pipeline = transformers.pipeline(
|
54 |
"text-generation",
|
55 |
model=model,
|
56 |
torch_dtype=torch.float16,
|
57 |
device_map="auto",
|
|
|
58 |
)
|
59 |
|
60 |
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
|
|
48 |
model = "Solshine/Jais-590m-merged"
|
49 |
messages = [{"role": "user", "content": "What is a large language model?"}]
|
50 |
|
51 |
+
tokenizer = AutoTokenizer.from_pretrained(model, trust_remote_code=True)
|
52 |
+
|
53 |
+
# Manually apply a basic chat template since it's not provided by the model
|
54 |
+
def custom_chat_template(messages):
|
55 |
+
chat_prompt = ""
|
56 |
+
for message in messages:
|
57 |
+
role = message["role"]
|
58 |
+
content = message["content"]
|
59 |
+
chat_prompt += f"{role}: {content}\n"
|
60 |
+
return chat_prompt
|
61 |
+
|
62 |
+
prompt = custom_chat_template(messages)
|
63 |
+
|
64 |
pipeline = transformers.pipeline(
|
65 |
"text-generation",
|
66 |
model=model,
|
67 |
torch_dtype=torch.float16,
|
68 |
device_map="auto",
|
69 |
+
trust_remote_code=True,
|
70 |
)
|
71 |
|
72 |
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|