karimouda commited on
Commit
7d08141
1 Parent(s): 67a9aa7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -6
README.md CHANGED
@@ -52,7 +52,7 @@ from transformers import pipeline
52
 
53
  pipe = pipeline(
54
  "text-generation",
55
- model="silma-ai/SILMA-9B-Instruct-v0.8",
56
  model_kwargs={"torch_dtype": torch.bfloat16},
57
  device="cuda", # replace with "mps" to run on a Mac device
58
  )
@@ -75,7 +75,7 @@ print(assistant_response)
75
  from transformers import AutoTokenizer, AutoModelForCausalLM
76
  import torch
77
 
78
- model_id = "silma-ai/SILMA-9B-Instruct-v0.8"
79
  tokenizer = AutoTokenizer.from_pretrained(model_id)
80
  model = AutoModelForCausalLM.from_pretrained(
81
  model_id,
@@ -133,7 +133,7 @@ print(tokenizer.decode(outputs[0]))
133
  # pip install bitsandbytes accelerate
134
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
135
 
136
- model_id = "silma-ai/SILMA-9B-Instruct-v0.8"
137
  quantization_config = BitsAndBytesConfig(load_in_8bit=True)
138
 
139
  tokenizer = AutoTokenizer.from_pretrained(model_id)
@@ -162,7 +162,7 @@ print(tokenizer.decode(outputs[0]))
162
  # pip install bitsandbytes accelerate
163
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
164
 
165
- model_id = "silma-ai/SILMA-9B-Instruct-v0.8"
166
  quantization_config = BitsAndBytesConfig(load_in_4bit=True)
167
 
168
  tokenizer = AutoTokenizer.from_pretrained(model_id)
@@ -204,7 +204,7 @@ import torch
204
  torch.set_float32_matmul_precision("high")
205
 
206
  # load the model + tokenizer
207
- model_id = "silma-ai/SILMA-9B-Instruct-v0.8"
208
  tokenizer = AutoTokenizer.from_pretrained(model_id)
209
  model = Gemma2ForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16)
210
  model.to("cuda")
@@ -259,7 +259,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
259
  import transformers
260
  import torch
261
 
262
- model_id = "silma-ai/SILMA-9B-Instruct-v0.8"
263
  dtype = torch.bfloat16
264
 
265
  tokenizer = AutoTokenizer.from_pretrained(model_id)
 
52
 
53
  pipe = pipeline(
54
  "text-generation",
55
+ model="silma-ai/SILMA-9B-Instruct-v1.0",
56
  model_kwargs={"torch_dtype": torch.bfloat16},
57
  device="cuda", # replace with "mps" to run on a Mac device
58
  )
 
75
  from transformers import AutoTokenizer, AutoModelForCausalLM
76
  import torch
77
 
78
+ model_id = "silma-ai/SILMA-9B-Instruct-v1.0"
79
  tokenizer = AutoTokenizer.from_pretrained(model_id)
80
  model = AutoModelForCausalLM.from_pretrained(
81
  model_id,
 
133
  # pip install bitsandbytes accelerate
134
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
135
 
136
+ model_id = "silma-ai/SILMA-9B-Instruct-v1.0"
137
  quantization_config = BitsAndBytesConfig(load_in_8bit=True)
138
 
139
  tokenizer = AutoTokenizer.from_pretrained(model_id)
 
162
  # pip install bitsandbytes accelerate
163
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
164
 
165
+ model_id = "silma-ai/SILMA-9B-Instruct-v1.0"
166
  quantization_config = BitsAndBytesConfig(load_in_4bit=True)
167
 
168
  tokenizer = AutoTokenizer.from_pretrained(model_id)
 
204
  torch.set_float32_matmul_precision("high")
205
 
206
  # load the model + tokenizer
207
+ model_id = "silma-ai/SILMA-9B-Instruct-v1.0"
208
  tokenizer = AutoTokenizer.from_pretrained(model_id)
209
  model = Gemma2ForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16)
210
  model.to("cuda")
 
259
  import transformers
260
  import torch
261
 
262
+ model_id = "silma-ai/SILMA-9B-Instruct-v1.0"
263
  dtype = torch.bfloat16
264
 
265
  tokenizer = AutoTokenizer.from_pretrained(model_id)