Transformers
GGUF
llama
TheBloke commited on
Commit
2e702bf
1 Parent(s): 861096f

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -167,7 +167,7 @@ CT_METAL=1 pip install ctransformers>=0.2.24 --no-binary ctransformers
167
  from ctransformers import AutoModelForCausalLM
168
 
169
  # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
170
- llm = AutoModelForCausalLM.from_pretrained("None", model_file="airoboros-c34b-2.1.q4_K_M.gguf", model_type="llama", gpu_layers=50)
171
 
172
  print(llm("AI is going to"))
173
  ```
 
167
  from ctransformers import AutoModelForCausalLM
168
 
169
  # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
170
+ llm = AutoModelForCausalLM.from_pretrained("TheBloke/Airoboros-c34B-2.1-GGUF", model_file="airoboros-c34b-2.1.q4_K_M.gguf", model_type="llama", gpu_layers=50)
171
 
172
  print(llm("AI is going to"))
173
  ```