CineAI commited on
Commit
69f2c85
·
verified ·
1 Parent(s): 645b047

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +23 -21
README.md CHANGED
@@ -149,27 +149,29 @@ datasets:
149
 
150
  # How to use
151
 
152
- You can use it with a script
153
-
154
- model, tokenizer = FastLanguageModel.from_pretrained(
155
- model_name="CineAI/Llama32-3B-CoT",
156
- max_seq_length=max_length,
157
- dtype=dtype,
158
- load_in_4bit=load_in_4bit
159
- )
160
-
161
- FastLanguageModel.for_inference(model)
162
-
163
- inputs = tokenizer.apply_chat_template(
164
- message,
165
- tokenize = True,
166
- add_generation_prompt = True, # Must add for generation
167
- return_tensors = "pt",
168
- ).to(device)
169
-
170
- text_streamer = TextStreamer(tokenizer, skip_prompt = True)
171
- _ = model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = max_new_tokens,
172
- use_cache = True, temperature = temperature, min_p = min_p)
 
 
173
 
174
  # Uploaded model
175
 
 
149
 
150
  # How to use
151
 
152
+ You can use it with a script
153
+
154
+ ```python
155
+ model, tokenizer = FastLanguageModel.from_pretrained(
156
+ model_name="CineAI/Llama32-3B-CoT",
157
+ max_seq_length=max_length,
158
+ dtype=dtype,
159
+ load_in_4bit=load_in_4bit
160
+ )
161
+
162
+ FastLanguageModel.for_inference(model)
163
+
164
+ inputs = tokenizer.apply_chat_template(
165
+ message,
166
+ tokenize = True,
167
+ add_generation_prompt = True, # Must add for generation
168
+ return_tensors = "pt",
169
+ ).to(device)
170
+
171
+ text_streamer = TextStreamer(tokenizer, skip_prompt = True)
172
+ _ = model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = max_new_tokens,
173
+ use_cache = True, temperature = temperature, min_p = min_p)
174
+ ```
175
 
176
  # Uploaded model
177