Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
·
12506f0
1
Parent(s):
c8231d6
VS -> VC
Browse files- src/config.mts +1 -1
- src/index.mts +1 -1
- src/production/generateActor.mts +1 -1
- src/production/generateAudio.mts +1 -1
- src/production/generateAudioLegacy.mts +1 -1
- src/production/generateVideo.mts +1 -1
- src/production/generateVoice.mts +1 -1
- src/production/interpolateVideo.mts +1 -1
- src/production/interpolateVideoLegacy.mts +1 -1
- src/production/upscaleVideo.mts +1 -1
- src/tests/submitVideo.mts +1 -1
src/config.mts
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import path from "node:path"
|
2 |
|
3 |
-
export const storagePath = `${process.env.
|
4 |
|
5 |
export const tasksDirPath = path.join(storagePath, "tasks")
|
6 |
export const pendingTasksDirFilePath = path.join(tasksDirPath, "pending")
|
|
|
1 |
import path from "node:path"
|
2 |
|
3 |
+
export const storagePath = `${process.env.VC_STORAGE_PATH || './sandbox'}`
|
4 |
|
5 |
export const tasksDirPath = path.join(storagePath, "tasks")
|
6 |
export const pendingTasksDirFilePath = path.join(tasksDirPath, "pending")
|
src/index.mts
CHANGED
@@ -23,7 +23,7 @@ app.post("/", async (req, res) => {
|
|
23 |
const request = req.body as VideoSequenceRequest
|
24 |
|
25 |
const token = `${request.token || ""}`
|
26 |
-
if (token !== process.env.
|
27 |
console.log("couldn't find access token in the query")
|
28 |
res.status(401)
|
29 |
res.write(JSON.stringify({ error: "invalid token" }))
|
|
|
23 |
const request = req.body as VideoSequenceRequest
|
24 |
|
25 |
const token = `${request.token || ""}`
|
26 |
+
if (token !== process.env.VC_SECRET_ACCESS_TOKEN) {
|
27 |
console.log("couldn't find access token in the query")
|
28 |
res.status(401)
|
29 |
res.write(JSON.stringify({ error: "invalid token" }))
|
src/production/generateActor.mts
CHANGED
@@ -4,7 +4,7 @@ import tmpDir from "temp-dir"
|
|
4 |
|
5 |
import { HfInference } from "@huggingface/inference"
|
6 |
|
7 |
-
const hf = new HfInference(process.env.
|
8 |
|
9 |
export const generateActor = async (prompt: string, fileName: string, seed: number) => {
|
10 |
const positivePrompt = [
|
|
|
4 |
|
5 |
import { HfInference } from "@huggingface/inference"
|
6 |
|
7 |
+
const hf = new HfInference(process.env.VC_HF_API_TOKEN)
|
8 |
|
9 |
export const generateActor = async (prompt: string, fileName: string, seed: number) => {
|
10 |
const positivePrompt = [
|
src/production/generateAudio.mts
CHANGED
@@ -8,7 +8,7 @@ import { downloadFileToTmp } from "../utils/downloadFileToTmp.mts"
|
|
8 |
import { moveFileFromTmpToPending } from "../utils/moveFileFromTmpToPending.mts"
|
9 |
|
10 |
const instances: string[] = [
|
11 |
-
process.env.
|
12 |
]
|
13 |
|
14 |
// TODO we should use an inference endpoint instead
|
|
|
8 |
import { moveFileFromTmpToPending } from "../utils/moveFileFromTmpToPending.mts"
|
9 |
|
10 |
const instances: string[] = [
|
11 |
+
process.env.VC_AUDIO_GENERATION_SPACE_API_URL
|
12 |
]
|
13 |
|
14 |
// TODO we should use an inference endpoint instead
|
src/production/generateAudioLegacy.mts
CHANGED
@@ -3,7 +3,7 @@ import { client } from '@gradio/client'
|
|
3 |
import { generateSeed } from "../utils/generateSeed.mts"
|
4 |
|
5 |
const instances: string[] = [
|
6 |
-
process.env.
|
7 |
]
|
8 |
|
9 |
export const generateAudio = async (prompt: string, options?: {
|
|
|
3 |
import { generateSeed } from "../utils/generateSeed.mts"
|
4 |
|
5 |
const instances: string[] = [
|
6 |
+
process.env.VC_AUDIO_GENERATION_SPACE_API_URL
|
7 |
]
|
8 |
|
9 |
export const generateAudio = async (prompt: string, options?: {
|
src/production/generateVideo.mts
CHANGED
@@ -4,7 +4,7 @@ import { client } from "@gradio/client"
|
|
4 |
import { generateSeed } from "../utils/generateSeed.mts"
|
5 |
|
6 |
const instances: string[] = [
|
7 |
-
process.env.
|
8 |
]
|
9 |
|
10 |
export const generateVideo = async (prompt: string, options?: {
|
|
|
4 |
import { generateSeed } from "../utils/generateSeed.mts"
|
5 |
|
6 |
const instances: string[] = [
|
7 |
+
process.env.VC_VIDEO_GENERATION_SPACE_API_URL
|
8 |
]
|
9 |
|
10 |
export const generateVideo = async (prompt: string, options?: {
|
src/production/generateVoice.mts
CHANGED
@@ -3,7 +3,7 @@ import puppeteer from "puppeteer"
|
|
3 |
import { downloadFileToTmp } from "../utils/downloadFileToTmp.mts"
|
4 |
|
5 |
const instances: string[] = [
|
6 |
-
process.env.
|
7 |
]
|
8 |
|
9 |
// TODO we should use an inference endpoint instead
|
|
|
3 |
import { downloadFileToTmp } from "../utils/downloadFileToTmp.mts"
|
4 |
|
5 |
const instances: string[] = [
|
6 |
+
process.env.VC_VOICE_GENERATION_SPACE_API_URL
|
7 |
]
|
8 |
|
9 |
// TODO we should use an inference endpoint instead
|
src/production/interpolateVideo.mts
CHANGED
@@ -9,7 +9,7 @@ import { pendingFilesDirFilePath } from "../config.mts"
|
|
9 |
import { moveFileFromTmpToPending } from "../utils/moveFileFromTmpToPending.mts"
|
10 |
|
11 |
const instances: string[] = [
|
12 |
-
process.env.
|
13 |
]
|
14 |
|
15 |
// TODO we should use an inference endpoint instead
|
|
|
9 |
import { moveFileFromTmpToPending } from "../utils/moveFileFromTmpToPending.mts"
|
10 |
|
11 |
const instances: string[] = [
|
12 |
+
process.env.VC_VIDEO_INTERPOLATION_SPACE_API_URL
|
13 |
]
|
14 |
|
15 |
// TODO we should use an inference endpoint instead
|
src/production/interpolateVideoLegacy.mts
CHANGED
@@ -8,7 +8,7 @@ import tmpDir from "temp-dir"
|
|
8 |
import { downloadFileToTmp } from '../utils/downloadFileToTmp.mts'
|
9 |
|
10 |
const instances: string[] = [
|
11 |
-
process.env.
|
12 |
]
|
13 |
|
14 |
export const interpolateVideo = async (fileName: string, steps: number, fps: number) => {
|
|
|
8 |
import { downloadFileToTmp } from '../utils/downloadFileToTmp.mts'
|
9 |
|
10 |
const instances: string[] = [
|
11 |
+
process.env.VC_VIDEO_INTERPOLATION_SPACE_API_URL
|
12 |
]
|
13 |
|
14 |
export const interpolateVideo = async (fileName: string, steps: number, fps: number) => {
|
src/production/upscaleVideo.mts
CHANGED
@@ -9,7 +9,7 @@ import { pendingFilesDirFilePath } from '../config.mts'
|
|
9 |
import { moveFileFromTmpToPending } from "../utils/moveFileFromTmpToPending.mts"
|
10 |
|
11 |
const instances: string[] = [
|
12 |
-
process.env.
|
13 |
]
|
14 |
|
15 |
// TODO we should use an inference endpoint instead (or a space which bakes generation + upscale at the same time)
|
|
|
9 |
import { moveFileFromTmpToPending } from "../utils/moveFileFromTmpToPending.mts"
|
10 |
|
11 |
const instances: string[] = [
|
12 |
+
process.env.VC_VIDEO_UPSCALE_SPACE_API_URL
|
13 |
]
|
14 |
|
15 |
// TODO we should use an inference endpoint instead (or a space which bakes generation + upscale at the same time)
|
src/tests/submitVideo.mts
CHANGED
@@ -8,7 +8,7 @@ const response = await fetch(`${server}/`, {
|
|
8 |
"Content-Type": "application/json"
|
9 |
},
|
10 |
body: JSON.stringify({
|
11 |
-
token: process.env.
|
12 |
sequence: {
|
13 |
id: videoId,
|
14 |
},
|
|
|
8 |
"Content-Type": "application/json"
|
9 |
},
|
10 |
body: JSON.stringify({
|
11 |
+
token: process.env.VC_SECRET_ACCESS_TOKEN,
|
12 |
sequence: {
|
13 |
id: videoId,
|
14 |
},
|