sarahciston commited on
Commit
7aa7ed3
·
verified ·
1 Parent(s): 435f99e

try original example of tokenizer

Browse files
Files changed (1) hide show
  1. sketch.js +4 -3
sketch.js CHANGED
@@ -1,6 +1,6 @@
1
 
2
  // IMPORT LIBRARIES TOOLS
3
- import { pipeline, env, AutoTokenizer } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
4
  // AutoTokenizer
5
 
6
  // skip local model check
@@ -20,14 +20,15 @@ async function textGenTask(input){
20
  // const = modelsList = ['Xenova/stablelm-2-zephyr-1_6b', 'Xenova/LaMini-Cerebras-256M', 'Xenova/TinyLlama-1.1B-Chat-v1.0']
21
 
22
  const pipe = await pipeline('text-generation', MODEL)
23
- let tokenizer = AutoTokenizer.from_pretrained(MODEL)
24
 
25
  const messages = [
26
  {"role": "system", "content": PREPROMPT},
27
  {"role": "user", "content": input}
28
  ]
29
 
30
- const prompt = tokenizer.apply_chat_template(messages, { tokenize: false });
 
31
 
32
  // run text through model, setting hyperparameters
33
  var out = await pipe(prompt, {
 
1
 
2
  // IMPORT LIBRARIES TOOLS
3
+ import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
4
  // AutoTokenizer
5
 
6
  // skip local model check
 
20
  // const = modelsList = ['Xenova/stablelm-2-zephyr-1_6b', 'Xenova/LaMini-Cerebras-256M', 'Xenova/TinyLlama-1.1B-Chat-v1.0']
21
 
22
  const pipe = await pipeline('text-generation', MODEL)
23
+ // let tokenizer = AutoTokenizer.from_pretrained(MODEL)
24
 
25
  const messages = [
26
  {"role": "system", "content": PREPROMPT},
27
  {"role": "user", "content": input}
28
  ]
29
 
30
+ // const prompt = tokenizer.apply_chat_template(messages, { tokenize: false });
31
+ const prompt = pipe.tokenizer.apply_chat_template(message, { tokenize: false });
32
 
33
  // run text through model, setting hyperparameters
34
  var out = await pipe(prompt, {