Felladrin commited on
Commit
c28f330
·
1 Parent(s): 041f0c2

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +28 -0
README.md CHANGED
@@ -1,3 +1,31 @@
1
  ---
2
  license: apache-2.0
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
+ library_name: "transformers.js"
4
+ base_model: princeton-nlp/Sheared-Pythia-160m
5
  ---
6
+
7
+ INT8 ONNX version of [princeton-nlp/Sheared-Pythia-160m](https://huggingface.co/princeton-nlp/Sheared-Pythia-160m) to use with [Transformers.js](https://huggingface.co/docs/transformers.js).
8
+
9
+ ### Example usage
10
+ #### Pipeline API
11
+ ```js
12
+ import { pipeline } from '@xenova/transformers';
13
+
14
+ const generator = await pipeline('text-generation', 'Felladrin/onnx-Sheared-Pythia-160m');
15
+ const output = await generator('Once upon a time,', { add_special_tokens: true, max_new_tokens: 60, repetition_penalty: 1.2});
16
+ console.log(output);
17
+ ```
18
+
19
+ #### Auto Classes
20
+ ```js
21
+ import { AutoModelForCausalLM, AutoTokenizer } from '@xenova/transformers';
22
+
23
+ const model_path = 'Felladrin/onnx-Sheared-Pythia-160m';
24
+ const model = await AutoModelForCausalLM.from_pretrained(model_path);
25
+ const tokenizer = await AutoTokenizer.from_pretrained(model_path);
26
+
27
+ const prompt = 'Once upon a time,';
28
+ const { input_ids } = tokenizer(prompt);
29
+ const tokens = await model.generate(input_ids, { max_new_tokens: 60, repetition_penalty: 1.2});
30
+ console.log(tokenizer.decode(tokens[0], { skip_special_tokens: true }));
31
+ ```