Spaces:
Running
Running
File size: 3,329 Bytes
ceb472c 4189f9e 8993dbb 4189f9e ceb472c 90bf77f ceb472c 4189f9e ceb472c 2988cff ceb472c 6ff171e ceb472c 855a94a ceb472c 6ff171e 2988cff 6ff171e 2988cff 4189f9e 2988cff 6ff171e 855a94a ceb472c 2988cff ceb472c 2988cff 60949e3 2988cff 9f84ffb 2988cff ceb472c 90bf77f 60949e3 227827f 60949e3 227827f 8448a0a 60949e3 bc4bca9 3455917 bc4bca9 60949e3 aa87ec6 60949e3 2988cff 60949e3 2e68462 2988cff 83049d3 2988cff 8448a0a 2988cff 227827f 2988cff 90bf77f 2e68462 60949e3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 |
// IMPORT LIBRARIES TOOLS
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
// skip local model check
env.allowLocalModels = false;
// GLOBAL VARIABLES
var PROMPT_INPUT = `The vice president [MASK] after returning from war.` // a field for writing or changing a text value
var OUTPUT_LIST = [] // a blank array to store the results from the model
// RUN MODEL
async function fillInTask(){
console.log('fill-in task initiated')
const pipe = await pipeline('fill-mask', 'Xenova/bert-base-uncased');
var out = await pipe(PROMPT_INPUT);
console.log(await out) // yields { score, sequence, token, token_str } for each result
await out.forEach(o => {
console.log(o) // yields { score, sequence, token, token_str } for each result
OUTPUT_LIST.push(o.sequence) // put only the full sequence in a list
})
console.log(OUTPUT_LIST)
displayResults(OUTPUT_LIST)
console.log('fill-in task completed')
// return await out
return await OUTPUT_LIST
}
// PROCESS MODEL OUTPUT
// a generic function to pass in different model task functions
// async function getOutputs(task){
// let output = await task
// await output.forEach(o => {
// OUTPUT_LIST.push(o.sequence) // put only the full sequence in a list
// })
// console.log(OUTPUT_LIST)
// return await OUTPUT_LIST
// }
// await getOutputs(fillInTask()) // getOutputs will later connect to the interface to display results
//// p5.js Instance
new p5(function (p5){
p5.setup = function(){
p5.noCanvas()
console.log('p5 instance loaded')
makeTextDisplay()
makeFields()
makeButtons()
}
p5.draw = function(){
//
}
function makeTextDisplay(){
let title = p5.createElement('h1','p5.js Critical AI Prompt Battle')
let intro = p5.createP(`This tool lets you run several AI chat prompts at once and compare their results. Use it to explore what models 'know' about various concepts, communities, and cultures. For more information on prompt programming and critical AI, see [Tutorial & extra info][TO-DO][XXX]`)
}
function makeFields(){
PROMPT_INPUT = p5.createInput(PROMPT_INPUT).value(PROMPT_INPUT) // turns the string into an input; now access the text via PROMPT_INPUT.value()
PROMPT_INPUT.size(700)
PROMPT_INPUT.attribute('label', `Write a text prompt with at least one [MASK] that the model will fill in.`)
p5.createP(PROMPT_INPUT.attribute('label'))
PROMPT_INPUT.addClass("prompt")
console.log(PROMPT_INPUT.value())
}
function makeButtons(){
let submitButton = p5.createButton("SUBMIT")
submitButton.size(170)
submitButton.class('submit')
submitButton.mousePressed(fillInTask)
}
function displayResults(list){
console.log('displayed or pressed')
let outHead = p5.createElement('h4',"Results:")
let outText = p5.createP('')
outText.html(list)
}
// async function makeOutputDisplay(){
// console.log('button pressed')
// let outHead = p5.createElement('h4',"Results:")
// let out = await fillInTask() //just model no parsing
// // let out = await getOutputs(fillInTask()) // model and parsing
// out = str(await out)
// console.log(out)
// let outText = p5.createP('')
// await outText.html(out)
// }
});
|