Spaces:
Running
Running
try w diff model
Browse files
sketch.js
CHANGED
@@ -16,7 +16,7 @@ let PREPROMPT = `You're a friendly pirate. Please complete the phrase and fill i
|
|
16 |
async function textGenTask(input){
|
17 |
console.log('text-gen task initiated')
|
18 |
|
19 |
-
let MODEL = 'Xenova/
|
20 |
// const = modelsList = ['Xenova/stablelm-2-zephyr-1_6b', 'Xenova/LaMini-Cerebras-256M', 'Xenova/TinyLlama-1.1B-Chat-v1.0']
|
21 |
|
22 |
const pipe = await pipeline('text-generation', MODEL)
|
@@ -28,14 +28,12 @@ async function textGenTask(input){
|
|
28 |
// ]
|
29 |
|
30 |
const messages = [
|
31 |
-
{ "role": "system", "content": "You are
|
32 |
{ "role": "user", "content": input },
|
33 |
]
|
34 |
|
35 |
const prompt = pipe.tokenizer.apply_chat_template(messages, {
|
36 |
-
tokenize: false
|
37 |
-
add_generation_prompt: false,
|
38 |
-
});
|
39 |
|
40 |
// run text through model, setting hyperparameters
|
41 |
var out = await pipe(input, {
|
|
|
16 |
async function textGenTask(input){
|
17 |
console.log('text-gen task initiated')
|
18 |
|
19 |
+
let MODEL = 'Xenova/TinyLlama-1.1B-Chat-v1.0'
|
20 |
// const = modelsList = ['Xenova/stablelm-2-zephyr-1_6b', 'Xenova/LaMini-Cerebras-256M', 'Xenova/TinyLlama-1.1B-Chat-v1.0']
|
21 |
|
22 |
const pipe = await pipeline('text-generation', MODEL)
|
|
|
28 |
// ]
|
29 |
|
30 |
const messages = [
|
31 |
+
{ "role": "system", "content": "You are pirate." },
|
32 |
{ "role": "user", "content": input },
|
33 |
]
|
34 |
|
35 |
const prompt = pipe.tokenizer.apply_chat_template(messages, {
|
36 |
+
tokenize: false });
|
|
|
|
|
37 |
|
38 |
// run text through model, setting hyperparameters
|
39 |
var out = await pipe(input, {
|