Sarah Ciston commited on
Commit
f9569e7
·
1 Parent(s): 8160912

try new param syntax w inference

Browse files
Files changed (2) hide show
  1. README.md +1 -0
  2. sketch.js +28 -13
README.md CHANGED
@@ -12,6 +12,7 @@ hf_oauth_scopes:
12
  - inference-api
13
  models:
14
  # - gpt-3.5-turbo
 
15
  - Xenova/distilgpt2
16
  # - HuggingFaceH4/zephyr-7b-gemma-v0.1
17
  # - HuggingFaceH4/zephyr-7b-beta
 
12
  - inference-api
13
  models:
14
  # - gpt-3.5-turbo
15
+ - bigscience/bloom-560m
16
  - Xenova/distilgpt2
17
  # - HuggingFaceH4/zephyr-7b-gemma-v0.1
18
  # - HuggingFaceH4/zephyr-7b-beta
sketch.js CHANGED
@@ -4,7 +4,7 @@
4
  // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
5
 
6
  /// AUTHORIZATION
7
-
8
  import { oauthLoginUrl, oauthHandleRedirectIfPresent } from 'https://esm.sh/@huggingface/hub';
9
 
10
  const oauthResult = await oauthHandleRedirectIfPresent();
@@ -16,11 +16,11 @@ if (!oauthResult) {
16
 
17
  // You can use oauthResult.accessToken, oauthResult.accessTokenExpiresAt and oauthResult.userInfo
18
  // console.log(oauthResult);
19
- const HFAUTH = oauthResult.accessToken
20
- console.log(HFAUTH)
21
 
22
- import { HfInference } from 'https://esm.sh/@huggingface/inference';
23
- const inference = new HfInference(HFAUTH);
24
 
25
  // PIPELINE MODELS
26
  // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
@@ -179,17 +179,30 @@ async function runModel(PREPROMPT, PROMPT){
179
  // let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
180
  // let MODELNAME = "openai-community/gpt2"
181
  // let MODELNAME = 'mistral_inference'
182
- let MODELNAME = 'Xenova/distilgpt2'
 
183
 
184
- let out = await inference.textGeneration(INPUT, {
 
185
  model: MODELNAME,
186
- max_new_tokens: 128
187
- })
 
 
 
 
 
 
 
 
 
 
188
 
189
  // text-generation-inference
190
  // Uncaught (in promise) Error: HfApiJson(Deserialize(Error("unknown variant `transformers.js`, expected one of `text-generation-inference`, `transformers`, `allennlp`, `flair`, `espnet`, `asteroid`, `speechbrain`, `timm`, `sentence-transformers`, `spacy`, `sklearn`, `stanza`, `adapter-transformers`, `fasttext`, `fairseq`, `pyannote-audio`, `doctr`, `nemo`, `fastai`, `k2`, `diffusers`, `paddlenlp`, `mindspore`, `open_clip`, `span-marker`, `bertopic`, `peft`, `setfit`", line: 1, column: 397)))
191
 
192
  // let out = await inference.textGeneration({
 
193
  // model: MODELNAME,
194
  // messages: [{
195
  // role: "system",
@@ -203,10 +216,12 @@ async function runModel(PREPROMPT, PROMPT){
203
 
204
  console.log(out)
205
 
 
 
206
  // modelResult = await out.messages[0].content
207
 
208
- var modelResult = await out.choices[0].message.content
209
- // var modelResult = await out[0].generated_text
210
  console.log(modelResult);
211
 
212
  return modelResult
@@ -247,7 +262,7 @@ async function runModel(PREPROMPT, PROMPT){
247
  // // let MODELNAME = 'mistralai/Mistral-Nemo-Instruct-2407'
248
  // let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
249
 
250
- // // HFAUTH
251
 
252
  // // 'meta-llama/Meta-Llama-3-70B-Instruct'
253
  // // 'openai-community/gpt2'
@@ -259,7 +274,7 @@ async function runModel(PREPROMPT, PROMPT){
259
  // // pipeline/transformers version
260
  // let pipe = await pipeline('text-generation', {
261
  // model: MODELNAME,
262
- // accessToken: HFAUTH
263
  // });
264
  // // seems to work with default model distilgpt2 ugh
265
 
 
4
  // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
5
 
6
  /// AUTHORIZATION
7
+ import { textGeneration } from 'https://esm.sh/@huggingface/inference';
8
  import { oauthLoginUrl, oauthHandleRedirectIfPresent } from 'https://esm.sh/@huggingface/hub';
9
 
10
  const oauthResult = await oauthHandleRedirectIfPresent();
 
16
 
17
  // You can use oauthResult.accessToken, oauthResult.accessTokenExpiresAt and oauthResult.userInfo
18
  // console.log(oauthResult);
19
+ const HF_TOKEN = oauthResult.accessToken
20
+ console.log(HF_TOKEN)
21
 
22
+ // import { HfInference } from 'https://esm.sh/@huggingface/inference';
23
+ // const inference = new HfInference(HF_TOKEN);
24
 
25
  // PIPELINE MODELS
26
  // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
 
179
  // let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
180
  // let MODELNAME = "openai-community/gpt2"
181
  // let MODELNAME = 'mistral_inference'
182
+ // let MODELNAME = 'Xenova/distilgpt2'
183
+ let MODELNAME = 'bigscience/bloom-560m'
184
 
185
+ let out = await textGeneration({
186
+ accessToken: HF_TOKEN,
187
  model: MODELNAME,
188
+ inputs: INPUT,
189
+ parameters: {
190
+ max_new_tokens: 128
191
+ }
192
+ });
193
+
194
+ // let out = await inference.textGeneration(INPUT, {
195
+ // model: MODELNAME,
196
+ // max_new_tokens: 128
197
+ // })
198
+
199
+ // let out = await inference.textGeneration(INPUT, 'bigscience/bloom-560m')
200
 
201
  // text-generation-inference
202
  // Uncaught (in promise) Error: HfApiJson(Deserialize(Error("unknown variant `transformers.js`, expected one of `text-generation-inference`, `transformers`, `allennlp`, `flair`, `espnet`, `asteroid`, `speechbrain`, `timm`, `sentence-transformers`, `spacy`, `sklearn`, `stanza`, `adapter-transformers`, `fasttext`, `fairseq`, `pyannote-audio`, `doctr`, `nemo`, `fastai`, `k2`, `diffusers`, `paddlenlp`, `mindspore`, `open_clip`, `span-marker`, `bertopic`, `peft`, `setfit`", line: 1, column: 397)))
203
 
204
  // let out = await inference.textGeneration({
205
+ // accessToken: HF_TOKEN,
206
  // model: MODELNAME,
207
  // messages: [{
208
  // role: "system",
 
216
 
217
  console.log(out)
218
 
219
+ console.log(out.token.text, out.generated_text)
220
+
221
  // modelResult = await out.messages[0].content
222
 
223
+ // var modelResult = await out.choices[0].message.content
224
+ var modelResult = await out[0].generated_text
225
  console.log(modelResult);
226
 
227
  return modelResult
 
262
  // // let MODELNAME = 'mistralai/Mistral-Nemo-Instruct-2407'
263
  // let MODELNAME = "HuggingFaceH4/zephyr-7b-beta"
264
 
265
+ // // HF_TOKEN
266
 
267
  // // 'meta-llama/Meta-Llama-3-70B-Instruct'
268
  // // 'openai-community/gpt2'
 
274
  // // pipeline/transformers version
275
  // let pipe = await pipeline('text-generation', {
276
  // model: MODELNAME,
277
+ // accessToken: HF_TOKEN
278
  // });
279
  // // seems to work with default model distilgpt2 ugh
280