illustrateur-cloud / src /app /server /stableDiffusion.ts
jbilcke-hf's picture
jbilcke-hf HF Staff
use stable cascade
4905b6b
"use server"
import { StableDiffusionParams } from "@/types"
import { serverHuggingfaceApiKey, serverHuggingfaceInferenceApiFileType, serverHuggingfaceInferenceApiModel, serverHuggingfaceInferenceApiModelRefinerModel, serverHuggingfaceInferenceApiModelTrigger } from "./config"
export async function stableDiffusion({
prompt,
negativePrompt,
guidanceScale,
seed,
width,
height,
numInferenceSteps,
hfApiKey,
}: StableDiffusionParams) {
// throw new Error("Planned maintenance")
if (!prompt) {
const error = `cannot call the rendering API without a prompt, aborting..`
console.error(error)
throw new Error(error)
}
let huggingfaceApiKey = hfApiKey || serverHuggingfaceApiKey
let huggingfaceInferenceApiModel = serverHuggingfaceInferenceApiModel
let huggingfaceInferenceApiModelRefinerModel = serverHuggingfaceInferenceApiModelRefinerModel
let huggingfaceInferenceApiModelTrigger = serverHuggingfaceInferenceApiModelTrigger
let huggingfaceInferenceApiFileType = serverHuggingfaceInferenceApiFileType
try {
if (!huggingfaceApiKey) {
throw new Error(`invalid huggingfaceApiKey, you need to configure your HF_API_TOKEN`)
}
if (!huggingfaceInferenceApiModel) {
throw new Error(`invalid huggingfaceInferenceApiModel, you need to configure your HF_INFERENCE_API_BASE_MODEL`)
}
if (!huggingfaceInferenceApiModelRefinerModel) {
throw new Error(`invalid huggingfaceInferenceApiModelRefinerModel, you need to configure your HF_INFERENCE_API_REFINER_MODEL`)
}
const baseModelUrl = `https://api-inference.huggingface.co/models/${huggingfaceInferenceApiModel}`
const positivePrompt = [
huggingfaceInferenceApiModelTrigger || "",
prompt,
].filter(x => x).join(", ")
const res = await fetch(baseModelUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
Accept: huggingfaceInferenceApiFileType,
Authorization: `Bearer ${huggingfaceApiKey}`,
},
body: JSON.stringify({
inputs: positivePrompt,
parameters: {
num_inference_steps: numInferenceSteps,
guidance_scale: guidanceScale,
width,
height,
},
// this doesn't do what you think it does
use_cache: false, // withCache,
}),
cache: "no-store",
// we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
// next: { revalidate: 1 }
})
// Recommendation: handle errors
if (res.status !== 200) {
const content = await res.text()
console.error(content)
// This will activate the closest `error.js` Error Boundary
throw new Error('Failed to fetch data')
}
const blob = await res.arrayBuffer()
const contentType = res.headers.get('content-type')
let assetUrl = `data:${contentType};base64,${Buffer.from(blob).toString('base64')}`
try {
const refinerModelUrl = `https://api-inference.huggingface.co/models/${huggingfaceInferenceApiModelRefinerModel}`
const res = await fetch(refinerModelUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${huggingfaceApiKey}`,
},
body: JSON.stringify({
inputs: Buffer.from(blob).toString('base64'),
parameters: {
prompt: positivePrompt,
num_inference_steps: numInferenceSteps,
guidance_scale: guidanceScale,
width,
height,
},
// this doesn't do what you think it does
use_cache: false, // withCache,
}),
cache: "no-store",
// we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
// next: { revalidate: 1 }
})
// Recommendation: handle errors
if (res.status !== 200) {
const content = await res.json()
// if (content.error.include("currently loading")) {
// console.log("refiner isn't ready yet")
throw new Error(content?.error || 'Failed to fetch data')
}
const refinedBlob = await res.arrayBuffer()
const contentType = res.headers.get('content-type')
assetUrl = `data:${contentType};base64,${Buffer.from(refinedBlob).toString('base64')}`
} catch (err) {
console.log(`Refiner step failed, but this is not a blocker. Error details: ${err}`)
}
return assetUrl
} catch (err) {
console.error(err)
return ""
}
}