jbilcke-hf HF staff commited on
Commit
8e3d721
·
1 Parent(s): 5ffd931
src/app/api/v1/edit/dialogues/processShot.ts ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import { ClapProject, ClapSegment, getClapAssetSourceType } from "@aitube/clap"
3
+ import { getSpeechBackgroundAudioPrompt } from "@aitube/engine"
4
+
5
+ import { startOfSegment1IsWithinSegment2 } from "@/lib/utils/startOfSegment1IsWithinSegment2"
6
+ import { generateSpeechWithParlerTTS } from "@/app/api/generators/speech/generateVoiceWithParlerTTS"
7
+
8
+ export async function processShot({
9
+ shotSegment,
10
+ clap
11
+ }: {
12
+ shotSegment: ClapSegment
13
+ clap: ClapProject
14
+ }): Promise<void> {
15
+
16
+ const shotSegments: ClapSegment[] = clap.segments.filter(s =>
17
+ startOfSegment1IsWithinSegment2(s, shotSegment)
18
+ )
19
+
20
+ const shotDialogueSegments: ClapSegment[] = shotSegments.filter(s =>
21
+ s.category === "dialogue"
22
+ )
23
+
24
+ let shotDialogueSegment: ClapSegment | undefined = shotDialogueSegments.at(0)
25
+
26
+ console.log(`[api/generate/dialogues] processShot: shot [${shotSegment.startTimeInMs}:${shotSegment.endTimeInMs}] has ${shotSegments.length} segments (${shotDialogueSegments.length} dialogues)`)
27
+
28
+ if (shotDialogueSegment && !shotDialogueSegment.assetUrl) {
29
+ // console.log(`[api/generate/dialogues] generating audio..`)
30
+
31
+ try {
32
+ // this generates a mp3
33
+ shotDialogueSegment.assetUrl = await generateSpeechWithParlerTTS({
34
+ text: shotDialogueSegment.prompt,
35
+ audioId: getSpeechBackgroundAudioPrompt(shotSegments, clap.entityIndex, ["high quality", "crisp", "detailed"]),
36
+ debug: true,
37
+ })
38
+ shotDialogueSegment.assetSourceType = getClapAssetSourceType(shotDialogueSegment.assetUrl)
39
+
40
+ } catch (err) {
41
+ console.log(`[api/generate/dialogues] processShot: failed to generate audio: ${err}`)
42
+ throw err
43
+ }
44
+
45
+ console.log(`[api/generate/dialogues] processShot: generated dialogue audio: ${shotDialogueSegment?.assetUrl?.slice?.(0, 50)}...`)
46
+ } else {
47
+ console.log(`[api/generate/dialogues] processShot: there is already a dialogue audio: ${shotDialogueSegment?.assetUrl?.slice?.(0, 50)}...`)
48
+ }
49
+ }
src/app/api/v1/edit/dialogues/route.ts CHANGED
@@ -1,11 +1,10 @@
1
  import { NextResponse, NextRequest } from "next/server"
2
 
3
- import { ClapProject, ClapSegment, getClapAssetSourceType, parseClap, serializeClap } from "@aitube/clap"
4
 
5
- import { startOfSegment1IsWithinSegment2 } from "@/lib/utils/startOfSegment1IsWithinSegment2"
6
  import { getToken } from "@/app/api/auth/getToken"
7
- import { generateSpeechWithParlerTTS } from "@/app/api/generators/speech/generateVoiceWithParlerTTS"
8
- import { getSpeechBackgroundAudioPrompt } from "@aitube/engine"
9
 
10
  // a helper to generate speech for a Clap
11
  export async function POST(req: NextRequest) {
@@ -27,43 +26,13 @@ export async function POST(req: NextRequest) {
27
  throw new Error(`Error, this endpoint being synchronous, it is designed for short stories only (max 32 shots).`)
28
  }
29
 
30
-
31
- for (const shotSegment of shotsSegments) {
32
-
33
- const shotSegments: ClapSegment[] = clap.segments.filter(s =>
34
- startOfSegment1IsWithinSegment2(s, shotSegment)
35
- )
36
-
37
- const shotDialogueSegments: ClapSegment[] = shotSegments.filter(s =>
38
- s.category === "dialogue"
39
- )
40
-
41
- let shotDialogueSegment: ClapSegment | undefined = shotDialogueSegments.at(0)
42
-
43
- console.log(`[api/generate/dialogues] shot [${shotSegment.startTimeInMs}:${shotSegment.endTimeInMs}] has ${shotSegments.length} segments (${shotDialogueSegments.length} dialogues)`)
44
-
45
- if (shotDialogueSegment && !shotDialogueSegment.assetUrl) {
46
- // console.log(`[api/generate/dialogues] generating audio..`)
47
-
48
- try {
49
- // this generates a mp3
50
- shotDialogueSegment.assetUrl = await generateSpeechWithParlerTTS({
51
- text: shotDialogueSegment.prompt,
52
- audioId: getSpeechBackgroundAudioPrompt(shotSegments, clap.entityIndex, ["high quality", "crisp", "detailed"]),
53
- debug: true,
54
- })
55
- shotDialogueSegment.assetSourceType = getClapAssetSourceType(shotDialogueSegment.assetUrl)
56
-
57
- } catch (err) {
58
- console.log(`[api/generate/dialogues] failed to generate audio: ${err}`)
59
- throw err
60
- }
61
-
62
- console.log(`[api/generate/dialogues] generated dialogue audio: ${shotDialogueSegment?.assetUrl?.slice?.(0, 50)}...`)
63
- } else {
64
- console.log(`[api/generate/dialogues] there is already a dialogue audio: ${shotDialogueSegment?.assetUrl?.slice?.(0, 50)}...`)
65
- }
66
- }
67
 
68
  // console.log(`[api/generate/dialogues] returning the clap augmented with dialogues`)
69
 
 
1
  import { NextResponse, NextRequest } from "next/server"
2
 
3
+ import { ClapProject, ClapSegment, parseClap, serializeClap } from "@aitube/clap"
4
 
 
5
  import { getToken } from "@/app/api/auth/getToken"
6
+
7
+ import { processShot } from "./processShot"
8
 
9
  // a helper to generate speech for a Clap
10
  export async function POST(req: NextRequest) {
 
26
  throw new Error(`Error, this endpoint being synchronous, it is designed for short stories only (max 32 shots).`)
27
  }
28
 
29
+ // we process the shots in parallel (this will increase the queue size in the Gradio spaces)
30
+ await Promise.all(shotsSegments.map(shotSegment =>
31
+ processShot({
32
+ shotSegment,
33
+ clap
34
+ })
35
+ ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  // console.log(`[api/generate/dialogues] returning the clap augmented with dialogues`)
38
 
src/app/api/v1/edit/storyboards/processShot.ts ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ClapProject, ClapSegment, getClapAssetSourceType, newSegment, parseClap, serializeClap } from "@aitube/clap"
2
+ import { getVideoPrompt } from "@aitube/engine"
3
+
4
+ import { startOfSegment1IsWithinSegment2 } from "@/lib/utils/startOfSegment1IsWithinSegment2"
5
+
6
+ import { getPositivePrompt } from "@/app/api/utils/imagePrompts"
7
+ import { generateStoryboard } from "./generateStoryboard"
8
+
9
+ export async function processShot({
10
+ shotSegment,
11
+ clap
12
+ }: {
13
+ shotSegment: ClapSegment
14
+ clap: ClapProject
15
+ }): Promise<void> {
16
+
17
+ const shotSegments: ClapSegment[] = clap.segments.filter(s =>
18
+ startOfSegment1IsWithinSegment2(s, shotSegment)
19
+ )
20
+
21
+ const shotStoryboardSegments: ClapSegment[] = shotSegments.filter(s =>
22
+ s.category === "storyboard"
23
+ )
24
+
25
+ let shotStoryboardSegment: ClapSegment | undefined = shotStoryboardSegments.at(0)
26
+
27
+ // TASK 1: GENERATE MISSING STORYBOARD SEGMENT
28
+ if (!shotStoryboardSegment) {
29
+ shotStoryboardSegment = newSegment({
30
+ track: 1,
31
+ startTimeInMs: shotSegment.startTimeInMs,
32
+ endTimeInMs: shotSegment.endTimeInMs,
33
+ assetDurationInMs: shotSegment.assetDurationInMs,
34
+ category: "storyboard",
35
+ prompt: "",
36
+ assetUrl: "",
37
+ outputType: "image"
38
+ })
39
+
40
+ if (shotStoryboardSegment) {
41
+ clap.segments.push(shotStoryboardSegment)
42
+ }
43
+
44
+ console.log(`[api/generate/storyboards] processShot: generated storyboard segment [${shotSegment.startTimeInMs}:${shotSegment.endTimeInMs}]`)
45
+ }
46
+ if (!shotStoryboardSegment) { throw new Error(`failed to generate a newSegment`) }
47
+
48
+ // TASK 2: GENERATE MISSING STORYBOARD PROMPT
49
+ if (!shotStoryboardSegment?.prompt) {
50
+ // storyboard is missing, let's generate it
51
+ shotStoryboardSegment.prompt = getVideoPrompt(shotSegments, clap.entityIndex, ["high quality", "crisp", "detailed"])
52
+ console.log(`[api/generate/storyboards] processShot: generating storyboard prompt: ${shotStoryboardSegment.prompt}`)
53
+ }
54
+
55
+ // TASK 3: GENERATE MISSING STORYBOARD BITMAP
56
+ if (!shotStoryboardSegment.assetUrl) {
57
+ // console.log(`[api/generate/storyboards] generating image..`)
58
+
59
+ try {
60
+ shotStoryboardSegment.assetUrl = await generateStoryboard({
61
+ prompt: getPositivePrompt(shotStoryboardSegment.prompt),
62
+ width: clap.meta.width,
63
+ height: clap.meta.height,
64
+ })
65
+ shotStoryboardSegment.assetSourceType = getClapAssetSourceType(shotStoryboardSegment.assetUrl)
66
+ } catch (err) {
67
+ console.log(`[api/generate/storyboards] processShot: failed to generate an image: ${err}`)
68
+ throw err
69
+ }
70
+
71
+ console.log(`[api/generate/storyboards] processShot: generated storyboard image: ${shotStoryboardSegment?.assetUrl?.slice?.(0, 50)}...`)
72
+ } else {
73
+ console.log(`[api/generate/storyboards] processShot: there is already a storyboard image: ${shotStoryboardSegment?.assetUrl?.slice?.(0, 50)}...`)
74
+ }
75
+
76
+ }
src/app/api/v1/edit/storyboards/route.ts CHANGED
@@ -1,13 +1,10 @@
1
  import { NextResponse, NextRequest } from "next/server"
2
 
3
- import { ClapProject, ClapSegment, getClapAssetSourceType, newSegment, parseClap, serializeClap } from "@aitube/clap"
4
- import { getVideoPrompt } from "@aitube/engine"
5
 
6
- import { startOfSegment1IsWithinSegment2 } from "@/lib/utils/startOfSegment1IsWithinSegment2"
7
  import { getToken } from "@/app/api/auth/getToken"
8
 
9
- import { getPositivePrompt } from "@/app/api/utils/imagePrompts"
10
- import { generateStoryboard } from "./generateStoryboard"
11
 
12
  // a helper to generate storyboards for a Clap
13
  // this is mostly used by external apps such as the Stories Factory
@@ -35,63 +32,13 @@ export async function POST(req: NextRequest) {
35
  throw new Error(`Error, this endpoint being synchronous, it is designed for short stories only (max 32 shots).`)
36
  }
37
 
38
- for (const shotSegment of shotsSegments) {
39
-
40
- const shotSegments: ClapSegment[] = clap.segments.filter(s =>
41
- startOfSegment1IsWithinSegment2(s, shotSegment)
42
- )
43
-
44
- const shotStoryboardSegments: ClapSegment[] = shotSegments.filter(s =>
45
- s.category === "storyboard"
46
- )
47
-
48
- let shotStoryboardSegment: ClapSegment | undefined = shotStoryboardSegments.at(0)
49
-
50
- console.log(`[api/generate/storyboards] shot [${shotSegment.startTimeInMs}:${shotSegment.endTimeInMs}] has ${shotSegments.length} segments (${shotStoryboardSegments.length} storyboards)`)
51
-
52
- // TASK 1: GENERATE MISSING STORYBOARD SEGMENT
53
- if (!shotStoryboardSegment) {
54
- shotStoryboardSegment = newSegment({
55
- track: 1,
56
- startTimeInMs: shotSegment.startTimeInMs,
57
- endTimeInMs: shotSegment.endTimeInMs,
58
- assetDurationInMs: shotSegment.assetDurationInMs,
59
- category: "storyboard",
60
- prompt: "",
61
- assetUrl: "",
62
- outputType: "image"
63
- })
64
- console.log(`[api/generate/storyboards] generated storyboard segment [${shotSegment.startTimeInMs}:${shotSegment.endTimeInMs}]`)
65
- }
66
-
67
- // TASK 2: GENERATE MISSING STORYBOARD PROMPT
68
- if (shotStoryboardSegment && !shotStoryboardSegment?.prompt) {
69
- // storyboard is missing, let's generate it
70
- shotStoryboardSegment.prompt = getVideoPrompt(shotSegments, clap.entityIndex, ["high quality", "crisp", "detailed"])
71
- console.log(`[api/generate/storyboards] generating storyboard prompt: ${shotStoryboardSegment.prompt}`)
72
- }
73
-
74
- // TASK 3: GENERATE MISSING STORYBOARD BITMAP
75
- if (shotStoryboardSegment && !shotStoryboardSegment.assetUrl) {
76
- // console.log(`[api/generate/storyboards] generating image..`)
77
-
78
- try {
79
- shotStoryboardSegment.assetUrl = await generateStoryboard({
80
- prompt: getPositivePrompt(shotStoryboardSegment.prompt),
81
- width: clap.meta.width,
82
- height: clap.meta.height,
83
- })
84
- shotStoryboardSegment.assetSourceType = getClapAssetSourceType(shotStoryboardSegment.assetUrl)
85
- } catch (err) {
86
- console.log(`[api/generate/storyboards] failed to generate an image: ${err}`)
87
- throw err
88
- }
89
-
90
- console.log(`[api/generate/storyboards] generated storyboard image: ${shotStoryboardSegment?.assetUrl?.slice?.(0, 50)}...`)
91
- } else {
92
- console.log(`[api/generate/storyboards] there is already a storyboard image: ${shotStoryboardSegment?.assetUrl?.slice?.(0, 50)}...`)
93
- }
94
- }
95
 
96
  // console.log(`[api/generate/storyboards] returning the clap augmented with storyboards`)
97
 
 
1
  import { NextResponse, NextRequest } from "next/server"
2
 
3
+ import { ClapProject, ClapSegment, parseClap, serializeClap } from "@aitube/clap"
 
4
 
 
5
  import { getToken } from "@/app/api/auth/getToken"
6
 
7
+ import { processShot } from "./processShot"
 
8
 
9
  // a helper to generate storyboards for a Clap
10
  // this is mostly used by external apps such as the Stories Factory
 
32
  throw new Error(`Error, this endpoint being synchronous, it is designed for short stories only (max 32 shots).`)
33
  }
34
 
35
+ // we process the shots in parallel (this will increase the queue size in the Gradio spaces)
36
+ await Promise.all(shotsSegments.map(shotSegment =>
37
+ processShot({
38
+ shotSegment,
39
+ clap
40
+ })
41
+ ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
  // console.log(`[api/generate/storyboards] returning the clap augmented with storyboards`)
44
 
src/app/api/v1/edit/videos/processShot.ts ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, NextRequest } from "next/server"
2
+
3
+ import { ClapProject, ClapSegment, getClapAssetSourceType, newSegment, parseClap, serializeClap } from "@aitube/clap"
4
+ import { getVideoPrompt } from "@aitube/engine"
5
+
6
+ import { startOfSegment1IsWithinSegment2 } from "@/lib/utils/startOfSegment1IsWithinSegment2"
7
+ import { getToken } from "@/app/api/auth/getToken"
8
+ import { getPositivePrompt } from "@/app/api/utils/imagePrompts"
9
+
10
+ import { generateVideo } from "./generateVideo"
11
+
12
+ export async function processShot({
13
+ shotSegment,
14
+ clap
15
+ }: {
16
+ shotSegment: ClapSegment
17
+ clap: ClapProject
18
+ }): Promise<void> {
19
+ const shotSegments: ClapSegment[] = clap.segments.filter(s =>
20
+ startOfSegment1IsWithinSegment2(s, shotSegment)
21
+ )
22
+
23
+ const shotVideoSegments: ClapSegment[] = shotSegments.filter(s =>
24
+ s.category === "video"
25
+ )
26
+
27
+ let shotVideoSegment: ClapSegment | undefined = shotVideoSegments.at(0)
28
+
29
+ console.log(`[api/generate/videos] processShot: shot [${shotSegment.startTimeInMs}:${shotSegment.endTimeInMs}] has ${shotSegments.length} segments (${shotVideoSegments.length} videos)`)
30
+
31
+ // TASK 1: GENERATE MISSING VIDEO SEGMENT
32
+ if (!shotVideoSegment) {
33
+ shotVideoSegment = newSegment({
34
+ track: 1,
35
+ startTimeInMs: shotSegment.startTimeInMs,
36
+ endTimeInMs: shotSegment.endTimeInMs,
37
+ assetDurationInMs: shotSegment.assetDurationInMs,
38
+ category: "video",
39
+ prompt: "",
40
+ assetUrl: "",
41
+ outputType: "video"
42
+ })
43
+
44
+ if (shotVideoSegment) {
45
+ clap.segments.push(shotVideoSegment)
46
+ }
47
+
48
+ console.log(`[api/generate/videos] processShot: generated video segment [${shotSegment.startTimeInMs}:${shotSegment.endTimeInMs}]`)
49
+ }
50
+
51
+ if (!shotVideoSegment) {
52
+ throw new Error(`failed to generate a new segment`)
53
+ }
54
+
55
+ // TASK 2: GENERATE MISSING VIDEO PROMPT
56
+ if (!shotVideoSegment?.prompt) {
57
+ // video is missing, let's generate it
58
+ shotVideoSegment.prompt = getVideoPrompt(shotSegments, clap.entityIndex, ["high quality", "crisp", "detailed"])
59
+ console.log(`[api/generate/videos] processShot: generating video prompt: ${shotVideoSegment.prompt}`)
60
+ }
61
+
62
+ // TASK 3: GENERATE MISSING VIDEO FILE
63
+ if (!shotVideoSegment.assetUrl) {
64
+ console.log(`[api/generate/videos] processShot: generating video file..`)
65
+
66
+ try {
67
+ shotVideoSegment.assetUrl = await generateVideo({
68
+ prompt: getPositivePrompt(shotVideoSegment.prompt),
69
+ width: clap.meta.width,
70
+ height: clap.meta.height,
71
+ })
72
+ shotVideoSegment.assetSourceType = getClapAssetSourceType(shotVideoSegment.assetUrl)
73
+ } catch (err) {
74
+ console.log(`[api/generate/videos] processShot: failed to generate a video file: ${err}`)
75
+ throw err
76
+ }
77
+
78
+ console.log(`[api/generate/videos] processShot: generated video files: ${shotVideoSegment?.assetUrl?.slice?.(0, 50)}...`)
79
+ } else {
80
+ console.log(`[api/generate/videos] processShot: there is already a video file: ${shotVideoSegment?.assetUrl?.slice?.(0, 50)}...`)
81
+ }
82
+ }
src/app/api/v1/edit/videos/route.ts CHANGED
@@ -1,13 +1,10 @@
1
  import { NextResponse, NextRequest } from "next/server"
2
 
3
- import { ClapProject, ClapSegment, getClapAssetSourceType, newSegment, parseClap, serializeClap } from "@aitube/clap"
4
- import { getVideoPrompt } from "@aitube/engine"
5
 
6
- import { startOfSegment1IsWithinSegment2 } from "@/lib/utils/startOfSegment1IsWithinSegment2"
7
  import { getToken } from "@/app/api/auth/getToken"
8
- import { getPositivePrompt } from "@/app/api/utils/imagePrompts"
9
 
10
- import { generateVideo } from "./generateVideo"
11
 
12
 
13
  // a helper to generate videos for a Clap
@@ -36,63 +33,13 @@ export async function POST(req: NextRequest) {
36
  throw new Error(`Error, this endpoint being synchronous, it is designed for short stories only (max 32 shots).`)
37
  }
38
 
39
- for (const shotSegment of shotsSegments) {
40
-
41
- const shotSegments: ClapSegment[] = clap.segments.filter(s =>
42
- startOfSegment1IsWithinSegment2(s, shotSegment)
43
- )
44
-
45
- const shotVideoSegments: ClapSegment[] = shotSegments.filter(s =>
46
- s.category === "video"
47
- )
48
-
49
- let shotVideoSegment: ClapSegment | undefined = shotVideoSegments.at(0)
50
-
51
- console.log(`[api/generate/videos] shot [${shotSegment.startTimeInMs}:${shotSegment.endTimeInMs}] has ${shotSegments.length} segments (${shotVideoSegments.length} videos)`)
52
-
53
- // TASK 1: GENERATE MISSING VIDEO SEGMENT
54
- if (!shotVideoSegment) {
55
- shotVideoSegment = newSegment({
56
- track: 1,
57
- startTimeInMs: shotSegment.startTimeInMs,
58
- endTimeInMs: shotSegment.endTimeInMs,
59
- assetDurationInMs: shotSegment.assetDurationInMs,
60
- category: "video",
61
- prompt: "",
62
- assetUrl: "",
63
- outputType: "video"
64
- })
65
- console.log(`[api/generate/videos] generated video segment [${shotSegment.startTimeInMs}:${shotSegment.endTimeInMs}]`)
66
- }
67
-
68
- // TASK 2: GENERATE MISSING VIDEO PROMPT
69
- if (shotVideoSegment && !shotVideoSegment?.prompt) {
70
- // video is missing, let's generate it
71
- shotVideoSegment.prompt = getVideoPrompt(shotSegments, clap.entityIndex, ["high quality", "crisp", "detailed"])
72
- console.log(`[api/generate/videos] generating video prompt: ${shotVideoSegment.prompt}`)
73
- }
74
-
75
- // TASK 3: GENERATE MISSING VIDEO FILE
76
- if (shotVideoSegment && !shotVideoSegment.assetUrl) {
77
- console.log(`[api/generate/videos] generating video file..`)
78
-
79
- try {
80
- shotVideoSegment.assetUrl = await generateVideo({
81
- prompt: getPositivePrompt(shotVideoSegment.prompt),
82
- width: clap.meta.width,
83
- height: clap.meta.height,
84
- })
85
- shotVideoSegment.assetSourceType = getClapAssetSourceType(shotVideoSegment.assetUrl)
86
- } catch (err) {
87
- console.log(`[api/generate/videos] failed to generate a video file: ${err}`)
88
- throw err
89
- }
90
-
91
- console.log(`[api/generate/videos] generated video files: ${shotVideoSegment?.assetUrl?.slice?.(0, 50)}...`)
92
- } else {
93
- console.log(`[api/generate/videos] there is already a video file: ${shotVideoSegment?.assetUrl?.slice?.(0, 50)}...`)
94
- }
95
- }
96
 
97
  console.log(`[api/generate/videos] returning the clap augmented with videos`)
98
 
 
1
  import { NextResponse, NextRequest } from "next/server"
2
 
3
+ import { ClapProject, ClapSegment, parseClap, serializeClap } from "@aitube/clap"
 
4
 
 
5
  import { getToken } from "@/app/api/auth/getToken"
 
6
 
7
+ import { processShot } from "./processShot"
8
 
9
 
10
  // a helper to generate videos for a Clap
 
33
  throw new Error(`Error, this endpoint being synchronous, it is designed for short stories only (max 32 shots).`)
34
  }
35
 
36
+ // we process the shots in parallel (this will increase the queue size in the Gradio spaces)
37
+ await Promise.all(shotsSegments.map(shotSegment =>
38
+ processShot({
39
+ shotSegment,
40
+ clap
41
+ })
42
+ ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
  console.log(`[api/generate/videos] returning the clap augmented with videos`)
45