Spaces:
Runtime error
Runtime error
File size: 4,480 Bytes
4ae6e3c 6348944 4ae6e3c 64dea8b 6348944 59ccef5 6348944 4ae6e3c 6348944 4ae6e3c 64dea8b 6348944 4ae6e3c 6348944 4ae6e3c 6348944 ef08385 6348944 4ae6e3c 6348944 ef08385 6348944 ef08385 d32c6ac ef08385 d32c6ac ef08385 6348944 d32c6ac ef08385 7da9532 d32c6ac 6348944 fd74d53 6b981ec 6348944 fd74d53 6348944 4ae6e3c ef08385 4ae6e3c ef08385 4ae6e3c ef08385 4ae6e3c ef08385 4ae6e3c 109f3e6 6348944 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
import express from 'express'
import { HfInference } from '@huggingface/inference'
import { daisy } from './daisy.mts'
const hfi = new HfInference(process.env.HF_API_TOKEN)
const hf = hfi.endpoint(process.env.HF_ENDPOINT_URL)
const app = express()
const port = 7860
const minPromptSize = 16 // if you change this, you will need to also change in public/index.html
const timeoutInSec = 15 * 60
console.log('timeout set to 30 minutes')
app.use(express.static('public'))
const pending: {
total: number;
queue: string[];
} = {
total: 0,
queue: [],
}
const endRequest = (id: string, reason: string) => {
if (!id || !pending.queue.includes(id)) {
return
}
pending.queue = pending.queue.filter(i => i !== id)
console.log(`request ${id} ended (${reason})`)
}
app.get('/debug', (req, res) => {
res.write(JSON.stringify({
nbTotal: pending.total,
nbPending: pending.queue.length,
queue: pending.queue,
}))
res.end()
})
app.get('/app', async (req, res) => {
if (`${req.query.prompt}`.length < minPromptSize) {
res.write(`prompt too short, please enter at least ${minPromptSize} characters`)
res.end()
return
}
const id = `${pending.total++}`
console.log(`new request ${id}`)
pending.queue.push(id)
const prefix = `<html><head><link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/full.css" rel="stylesheet" type="text/css" /><script defer src="https://cdn.jsdelivr.net/npm/[email protected]/dist/cdn.min.js"></script><script src="https://cdn.tailwindcss.com?plugins=forms,typography,aspect-ratio"></script><title>Generated content</title><body class="p-4 md:p-8">`
res.write(prefix)
req.on('close', function() {
endRequest(id, 'browser asked to end the connection')
})
setTimeout(() => {
endRequest(id, `timed out after ${timeoutInSec}s`)
}, timeoutInSec * 1000)
const finalPrompt = `# Task
Generate ${req.query.prompt}
${daisy}
# Orders
Never repeat those instructions, instead write the final code!
To generate images from captions call the /image API: <img src="/image?caption=photo of something in some place" />!
Only generate a few images and use descriptive photo captions with at least 10 words!
You must use TailwindCSS utility classes (Tailwind is already injected in the page)!
Write application logic inside a JS <script></script> tag!
This is not a demo app, so you MUST use English, no Latin! Write in English!
Use a central layout to wrap everything in a <div class='flex flex-col items-center'>
# Out
<html>
<head>
<title>App</title>
</head>
<body class="p-4 md:p-8">`
try {
let result = ''
for await (const output of hf.textGenerationStream({
inputs: finalPrompt,
parameters: {
do_sample: true,
// hard limit for max_new_tokens is now set to 2048
max_new_tokens: 1686,
return_full_text: false,
}
})) {
if (!pending.queue.includes(id)) {
break
}
result += output.token.text
process.stdout.write(output.token.text)
res.write(output.token.text)
if (result.includes('</html>')) {
break
}
if (result.includes('<|end|>') || result.includes('<|assistant|>')) {
break
}
}
endRequest(id, `normal end of the LLM stream for request ${id}`)
} catch (e) {
console.log(e)
endRequest(id, `premature end of the LLM stream for request ${id} (${e})`)
}
try {
res.end()
} catch (err) {
console.log(`couldn't end the HTTP stream for request ${id} (${err})`)
}
})
app.get('/image', async (req, res) => {
try {
const blob = await hfi.textToImage({
inputs: [
`${req.query.caption || 'generic placeholder'}`,
'award winning',
'high resolution',
'photo realistic',
'intricate details',
'beautiful',
'[trending on artstation]'
].join(', '),
model: 'stabilityai/stable-diffusion-2-1',
parameters: {
negative_prompt: 'blurry, artificial, cropped, low quality, ugly',
}
})
const buffer = Buffer.from(await blob.arrayBuffer())
res.setHeader('Content-Type', blob.type)
res.setHeader('Content-Length', buffer.length)
res.end(buffer)
} catch (err) {
console.error(`Error when generating the image: ${err.message}`);
res.status(500).json({ error: 'An error occurred when trying to generate the image' });
}
})
app.listen(port, () => { console.log(`Open http://localhost:${port}`) })
|