File size: 2,854 Bytes
ceb472c
 
4189f9e
 
8993dbb
4189f9e
 
ceb472c
90bf77f
ceb472c
 
4189f9e
ceb472c
 
 
6ff171e
ceb472c
855a94a
ceb472c
6ff171e
855a94a
 
 
 
6ff171e
855a94a
4189f9e
855a94a
6ff171e
855a94a
ceb472c
 
60949e3
 
ceb472c
 
 
 
60949e3
 
9f84ffb
 
ceb472c
 
90bf77f
60949e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8448a0a
 
60949e3
 
 
90bf77f
3455917
 
 
 
60949e3
 
aa87ec6
60949e3
 
 
aa87ec6
60949e3
2e68462
8448a0a
aa87ec6
8448a0a
83049d3
 
 
8448a0a
 
83049d3
aa87ec6
90bf77f
2e68462
60949e3
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99

// IMPORT LIBRARIES TOOLS
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';

// skip local model check
env.allowLocalModels = false;

// GLOBAL VARIABLES
var PROMPT_INPUT = `The vice president [MASK] after returning from war.` // a field for writing or changing a text value
var OUTPUT_LIST = [] // a blank array to store the results from the model


// RUN MODEL
async function fillInTask(){
  const pipe = await pipeline('fill-mask', 'Xenova/bert-base-uncased');
  
  var out = await pipe(PROMPT_INPUT);

  console.log(await out) // yields { score, sequence, token, token_str } for each result
  
  // await out.forEach(o => {
  //   console.log(o) // yields { score, sequence, token, token_str } for each result
  //   OUTPUT_LIST.push(o.sequence) // put only the full sequence in a list
  // })
  
  // console.log(OUTPUT_LIST)

  return await out
}

// PROCESS MODEL OUTPUT
// a generic function to pass in different model task functions
async function getOutputs(task){
  let output = await task

  await output.forEach(o => {
    OUTPUT_LIST.push(o.sequence) // put only the full sequence in a list
  })

  console.log(OUTPUT_LIST)

  return await OUTPUT_LIST
}

// await getOutputs(fillInTask()) // getOutputs will later connect to the interface to display results



//// p5.js Instance

new p5(function (p5){
  p5.setup = function(){
      p5.noCanvas()
      console.log('p5 instance loaded')
      makeDisplayText()
      makeFields()
      makeButtons()
    }

  p5.draw = function(){
      // 
  }

  function makeDisplayText(){
    let title = p5.createElement('h1','p5.js Critical AI Prompt Battle')
    let intro = p5.createP(`This tool lets you run several AI chat prompts at once and compare their results. Use it to explore what models 'know' about various concepts, communities, and cultures. For more information on prompt programming and critical AI, see [Tutorial & extra info][TO-DO][XXX]`)
  }

  function makeFields(){
    PROMPT_INPUT = p5.createInput(PROMPT_INPUT) // turns the string into an input; now access the text via PROMPT_INPUT.value()
    PROMPT_INPUT.size(700)
    PROMPT_INPUT.attribute('label', `Write a text prompt with at least one [MASK] that the model will fill in.`)
    p5.createP(PROMPT_INPUT.attribute('label'))
    PROMPT_INPUT.addClass("prompt")
  }

  function makeButtons(){
    let submitButton = p5.createButton("SUBMIT")
    submitButton.size(170)
    submitButton.class('submit')
    submitButton.mousePressed(makeResultsText)
  }

  async function makeResultsText(){
    console.log('button pressed')
    let resultsText = p5.createElement('h4',"Results:")

    // let out = await fillInTask() //just model no parsing    
    let out = await getOutputs(fillInTask()) // model and parsing
    console.log(out)

    await resultsText.html(out)
  }

});