nroggendorff commited on
Commit
bae8aa6
·
verified ·
1 Parent(s): 1dfc5eb

Delete index.html

Browse files
Files changed (1) hide show
  1. index.html +0 -43
index.html DELETED
@@ -1,43 +0,0 @@
1
- <h2>Usage</h2>
2
-
3
- <p>You can load models using the Hugging Face Transformers library:</p>
4
-
5
- <p style="background-color: gray">
6
- from transformers import pipeline
7
-
8
- pipe = pipeline("text-generation", model="nroggendorff/mayo")
9
-
10
- question = "What color is the sky?"
11
- conv = [{"role": "user", "content": question}]
12
-
13
- response = pipe(conv, max_new_tokens=32)[0]['generated_text'][-1]['content']
14
- print(response)
15
- </p>
16
-
17
- <p>To use models with quantization:</p>
18
-
19
- <p style="background-color: gray">
20
- from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
21
- import torch
22
-
23
- bnb_config = BitsAndBytesConfig(
24
- load_in_4bit=True,
25
- bnb_4bit_use_double_quant=True,
26
- bnb_4bit_quant_type="nf4",
27
- bnb_4bit_compute_dtype=torch.bfloat16
28
- )
29
-
30
- model_id = "nroggendorff/mayo"
31
-
32
- tokenizer = AutoTokenizer.from_pretrained(model_id)
33
- model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config)
34
-
35
- question = "What color is the sky?"
36
- prompt = tokenizer.apply_chat_template([{"role": "user", "content": question}], tokenize=False)
37
- inputs = tokenizer(prompt, return_tensors="pt")
38
-
39
- outputs = model.generate(**inputs, max_new_tokens=32)
40
-
41
- generated_text = tokenizer.batch_decode(outputs)[0]
42
- print(generated_text)
43
- </p>