imranali291 commited on
Commit
7677540
·
verified ·
1 Parent(s): 27a166f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -0
README.md CHANGED
@@ -7,6 +7,9 @@ tags:
7
  # Streaming Inference
8
  ```python
9
  from unsloth import FastLanguageModel
 
 
 
10
  model, tokenizer = FastLanguageModel.from_pretrained(
11
  model_name = "imranali291/gpt-base-prompt-generator",
12
  max_seq_length = max_seq_length,
 
7
  # Streaming Inference
8
  ```python
9
  from unsloth import FastLanguageModel
10
+ max_seq_length = 2048
11
+ dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
12
+ load_in_4bit = True
13
  model, tokenizer = FastLanguageModel.from_pretrained(
14
  model_name = "imranali291/gpt-base-prompt-generator",
15
  max_seq_length = max_seq_length,