AlphaRandy
commited on
Commit
•
d8f550e
1
Parent(s):
f473ed8
Update README.md
Browse files
README.md
CHANGED
@@ -42,7 +42,7 @@ In the Transformers library, one can use [chat templates](https://huggingface.co
|
|
42 |
```python
|
43 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
44 |
|
45 |
-
model_id = "AlphaRandy/
|
46 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
47 |
|
48 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
|
@@ -72,7 +72,7 @@ Note `float16` precision only works on GPU devices
|
|
72 |
+ import torch
|
73 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
74 |
|
75 |
-
model_id = "AlphaRandy/
|
76 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
77 |
|
78 |
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
|
@@ -93,7 +93,7 @@ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
|
93 |
+ import torch
|
94 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
95 |
|
96 |
-
model_id = "AlphaRandy/
|
97 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
98 |
|
99 |
+ model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True, device_map="auto")
|
@@ -121,7 +121,7 @@ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
|
121 |
+ import torch
|
122 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
123 |
|
124 |
-
model_id = "AlphaRandy/
|
125 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
126 |
|
127 |
+ model = AutoModelForCausalLM.from_pretrained(model_id, use_flash_attention_2=True, device_map="auto")
|
|
|
42 |
```python
|
43 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
44 |
|
45 |
+
model_id = "AlphaRandy/WhelanChatBot"
|
46 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
47 |
|
48 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
|
|
|
72 |
+ import torch
|
73 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
74 |
|
75 |
+
model_id = "AlphaRandy/WhelanChatBot"
|
76 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
77 |
|
78 |
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
|
|
|
93 |
+ import torch
|
94 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
95 |
|
96 |
+
model_id = "AlphaRandy/WhelanChatBot"
|
97 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
98 |
|
99 |
+ model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True, device_map="auto")
|
|
|
121 |
+ import torch
|
122 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
123 |
|
124 |
+
model_id = "AlphaRandy/WhelanChatBot"
|
125 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
126 |
|
127 |
+ model = AutoModelForCausalLM.from_pretrained(model_id, use_flash_attention_2=True, device_map="auto")
|