Spaces:
Running
Running
new model old format chat_template
Browse files
sketch.js
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
|
2 |
// IMPORT LIBRARIES TOOLS
|
3 |
-
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
|
4 |
// AutoTokenizer
|
5 |
|
6 |
// skip local model check
|
@@ -16,19 +16,19 @@ let PREPROMPT = `You're a friendly pirate. Please complete the phrase and fill i
|
|
16 |
async function textGenTask(input){
|
17 |
console.log('text-gen task initiated')
|
18 |
|
19 |
-
let MODEL = 'Xenova/
|
20 |
-
// const = modelsList = ['Xenova/
|
21 |
|
22 |
const pipe = await pipeline('text-generation', MODEL)
|
23 |
-
|
24 |
|
25 |
const messages = [
|
26 |
{"role": "system", "content": PREPROMPT},
|
27 |
{"role": "user", "content": input}
|
28 |
]
|
29 |
|
30 |
-
|
31 |
-
const prompt = pipe.tokenizer.apply_chat_template(messages, { tokenize: false });
|
32 |
|
33 |
// run text through model, setting hyperparameters
|
34 |
var out = await pipe(prompt, {
|
|
|
1 |
|
2 |
// IMPORT LIBRARIES TOOLS
|
3 |
+
import { pipeline, env, AutoTokenizer } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
|
4 |
// AutoTokenizer
|
5 |
|
6 |
// skip local model check
|
|
|
16 |
async function textGenTask(input){
|
17 |
console.log('text-gen task initiated')
|
18 |
|
19 |
+
let MODEL = 'Xenova/bloomz-560m'
|
20 |
+
// const = modelsList = ['Xenova/LaMini-Cerebras-256M', 'Xenova/TinyLlama-1.1B-Chat-v1.0']
|
21 |
|
22 |
const pipe = await pipeline('text-generation', MODEL)
|
23 |
+
let tokenizer = AutoTokenizer.from_pretrained(MODEL)
|
24 |
|
25 |
const messages = [
|
26 |
{"role": "system", "content": PREPROMPT},
|
27 |
{"role": "user", "content": input}
|
28 |
]
|
29 |
|
30 |
+
const prompt = tokenizer.apply_chat_template(messages, { tokenize: false });
|
31 |
+
// const prompt = pipe.tokenizer.apply_chat_template(messages, { tokenize: false });
|
32 |
|
33 |
// run text through model, setting hyperparameters
|
34 |
var out = await pipe(prompt, {
|