Spaces:
Running
Running
Commit
·
03644bc
1
Parent(s):
e35eddb
investigating..
Browse files- src/app/api/resolvers/video/route.ts +7 -1
- src/app/api/v1/edit/videos/processShot.ts +1 -1
- src/app/api/v1/render/animatediff-lightning/cluster.ts +1 -1
- src/app/api/v1/render/route.ts +0 -1
- src/app/latent/search/page.tsx +3 -2
- src/app/page.tsx +7 -3
- src/components/interface/latent-engine/core/engine.tsx +3 -2
- src/components/interface/latent-engine/core/generateClapFromPrompt.ts +124 -0
- src/components/interface/latent-engine/core/generators/fetchLatentClap.ts +2 -2
- src/components/interface/latent-engine/core/samples.ts +28 -0
- src/components/interface/latent-engine/core/useLatentEngine.ts +56 -12
src/app/api/resolvers/video/route.ts
CHANGED
@@ -39,6 +39,7 @@ export async function GET(req: NextRequest) {
|
|
39 |
return NextResponse.json({ error: `access denied ${err}` }, { status: 400 });
|
40 |
}
|
41 |
*/
|
|
|
42 |
|
43 |
let prompt = ""
|
44 |
try {
|
@@ -71,8 +72,13 @@ export async function GET(req: NextRequest) {
|
|
71 |
prompt = getPositivePrompt(prompt)
|
72 |
const negativePrompt = getNegativePrompt()
|
73 |
|
74 |
-
// console.log("calling await newRender")
|
75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
let render = await newRender({
|
77 |
prompt,
|
78 |
negativePrompt,
|
|
|
39 |
return NextResponse.json({ error: `access denied ${err}` }, { status: 400 });
|
40 |
}
|
41 |
*/
|
42 |
+
console.log("[API] /api/resolvers/video")
|
43 |
|
44 |
let prompt = ""
|
45 |
try {
|
|
|
72 |
prompt = getPositivePrompt(prompt)
|
73 |
const negativePrompt = getNegativePrompt()
|
74 |
|
|
|
75 |
|
76 |
+
console.log("calling await newRender with", {
|
77 |
+
prompt,
|
78 |
+
negativePrompt,
|
79 |
+
})
|
80 |
+
|
81 |
+
throw new Error("no! use render()!")
|
82 |
let render = await newRender({
|
83 |
prompt,
|
84 |
negativePrompt,
|
src/app/api/v1/edit/videos/processShot.ts
CHANGED
@@ -17,7 +17,6 @@ import { getVideoPrompt } from "@aitube/engine"
|
|
17 |
import { getPositivePrompt } from "@/app/api/utils/imagePrompts"
|
18 |
|
19 |
import { render } from "@/app/api/v1/render/animatediff-lcm-svd"
|
20 |
-
// import { render } from "@/app/api/v1/render/animatediff-lightning"
|
21 |
import { extractFirstFrame } from "@/app/api/utils/extractFirstFrame"
|
22 |
|
23 |
export async function processShot({
|
@@ -33,6 +32,7 @@ export async function processShot({
|
|
33 |
mode: ClapCompletionMode
|
34 |
turbo: boolean
|
35 |
}): Promise<void> {
|
|
|
36 |
const shotSegments: ClapSegment[] = filterSegments(
|
37 |
ClapSegmentFilteringMode.BOTH,
|
38 |
shotSegment,
|
|
|
17 |
import { getPositivePrompt } from "@/app/api/utils/imagePrompts"
|
18 |
|
19 |
import { render } from "@/app/api/v1/render/animatediff-lcm-svd"
|
|
|
20 |
import { extractFirstFrame } from "@/app/api/utils/extractFirstFrame"
|
21 |
|
22 |
export async function processShot({
|
|
|
32 |
mode: ClapCompletionMode
|
33 |
turbo: boolean
|
34 |
}): Promise<void> {
|
35 |
+
// console.log(`processShot()`)
|
36 |
const shotSegments: ClapSegment[] = filterSegments(
|
37 |
ClapSegmentFilteringMode.BOTH,
|
38 |
shotSegment,
|
src/app/api/v1/render/animatediff-lightning/cluster.ts
CHANGED
@@ -3,7 +3,7 @@ import { ClusterMachine } from "../../types"
|
|
3 |
|
4 |
|
5 |
// video generation requires A100s so we need to be parcimonous here,
|
6 |
-
// we shouldn't burn too many GPUs
|
7 |
export const nbClusterMachines = 2
|
8 |
// make sure the machines are running!!
|
9 |
|
|
|
3 |
|
4 |
|
5 |
// video generation requires A100s so we need to be parcimonous here,
|
6 |
+
// we shouldn't burn too many GPUs - 3 seems like a good number
|
7 |
export const nbClusterMachines = 2
|
8 |
// make sure the machines are running!!
|
9 |
|
src/app/api/v1/render/route.ts
CHANGED
@@ -5,7 +5,6 @@ import { getValidNumber } from "@aitube/clap"
|
|
5 |
import { throwIfInvalidToken } from "@/app/api/v1/auth/throwIfInvalidToken"
|
6 |
import { getContentType } from "@/lib/data/getContentType"
|
7 |
|
8 |
-
// import { render } from "./animatediff-lcm-svd"
|
9 |
import { render } from "./animatediff-lightning"
|
10 |
|
11 |
export async function POST(req: NextRequest, res: NextResponse) {
|
|
|
5 |
import { throwIfInvalidToken } from "@/app/api/v1/auth/throwIfInvalidToken"
|
6 |
import { getContentType } from "@/lib/data/getContentType"
|
7 |
|
|
|
8 |
import { render } from "./animatediff-lightning"
|
9 |
|
10 |
export async function POST(req: NextRequest, res: NextResponse) {
|
src/app/latent/search/page.tsx
CHANGED
@@ -1,11 +1,12 @@
|
|
1 |
import { encode, decode } from 'js-base64'
|
2 |
-
import { clapToDataUri
|
3 |
|
4 |
import { LatentQueryProps } from "@/types/general"
|
5 |
|
6 |
import { Main } from "../../main"
|
7 |
import { getNewMediaInfo } from "../../api/generators/search/getNewMediaInfo"
|
8 |
import { getToken } from "../../api/v1/auth/getToken"
|
|
|
9 |
|
10 |
// https://jmswrnr.com/blog/protecting-next-js-api-routes-query-parameters
|
11 |
|
@@ -28,7 +29,7 @@ export default async function LatentSearchPage({
|
|
28 |
const latentMedia = getNewMediaInfo()
|
29 |
|
30 |
latentMedia.clapUrl = await clapToDataUri(
|
31 |
-
|
32 |
showIntroPoweredByEngine: false,
|
33 |
showIntroDisclaimerAboutAI: false
|
34 |
})
|
|
|
1 |
import { encode, decode } from 'js-base64'
|
2 |
+
import { clapToDataUri } from "@aitube/clap"
|
3 |
|
4 |
import { LatentQueryProps } from "@/types/general"
|
5 |
|
6 |
import { Main } from "../../main"
|
7 |
import { getNewMediaInfo } from "../../api/generators/search/getNewMediaInfo"
|
8 |
import { getToken } from "../../api/v1/auth/getToken"
|
9 |
+
import { generateClapFromPrompt } from '@/components/interface/latent-engine/core/generateClapFromPrompt'
|
10 |
|
11 |
// https://jmswrnr.com/blog/protecting-next-js-api-routes-query-parameters
|
12 |
|
|
|
29 |
const latentMedia = getNewMediaInfo()
|
30 |
|
31 |
latentMedia.clapUrl = await clapToDataUri(
|
32 |
+
generateClapFromPrompt({
|
33 |
showIntroPoweredByEngine: false,
|
34 |
showIntroDisclaimerAboutAI: false
|
35 |
})
|
src/app/page.tsx
CHANGED
@@ -83,9 +83,13 @@ export default async function Page({ searchParams: { v: videoId } }: AppQueryPro
|
|
83 |
})
|
84 |
return (
|
85 |
<div className="flex flex-col items-center justify-center h-screen v-screen bg-stone-900">
|
86 |
-
<div className="
|
87 |
-
|
88 |
-
|
|
|
|
|
|
|
|
|
89 |
</div>
|
90 |
</div>
|
91 |
)
|
|
|
83 |
})
|
84 |
return (
|
85 |
<div className="flex flex-col items-center justify-center h-screen v-screen bg-stone-900">
|
86 |
+
<div className="
|
87 |
+
flex flex-col items-center justify-center text-center
|
88 |
+
w-3/4 h-full
|
89 |
+
|
90 |
+
">
|
91 |
+
<h1 className="text-stone-200 text-6xl font-thin">Say goodbye to static videos.</h1>
|
92 |
+
<p className="mt-12 text-stone-400 text-xl font-thin">Beta planned for Winter 2024. Follow <a href="x.com/@flngr" className="font-normal font-mono text-stone-400 hover:text-stone-300 hover:underline hover:underline-offset-2" target="_blank">@flngr</a> for updates.</p>
|
93 |
</div>
|
94 |
</div>
|
95 |
)
|
src/components/interface/latent-engine/core/engine.tsx
CHANGED
@@ -2,6 +2,7 @@
|
|
2 |
|
3 |
import React, { MouseEventHandler, useEffect, useRef, useState } from "react"
|
4 |
import { useLocalStorage } from "usehooks-ts"
|
|
|
5 |
|
6 |
import { cn } from "@/lib/utils/cn"
|
7 |
import { MediaInfo } from "@/types/general"
|
@@ -13,8 +14,8 @@ import { ContentLayer } from "../components/content-layer"
|
|
13 |
import { localStorageKeys } from "@/app/state/localStorageKeys"
|
14 |
import { defaultSettings } from "@/app/state/defaultSettings"
|
15 |
import { useStore } from "@/app/state/useStore"
|
16 |
-
import { ClapProject, generateClapFromSimpleStory, serializeClap } from "@aitube/clap"
|
17 |
import { theSimps } from "@/app/latent/samples"
|
|
|
18 |
|
19 |
function LatentEngine({
|
20 |
media,
|
@@ -86,7 +87,7 @@ function LatentEngine({
|
|
86 |
|
87 |
// TODO Julian work on the chunk mechanism
|
88 |
|
89 |
-
const mockClap: ClapProject =
|
90 |
story: theSimps,
|
91 |
showIntroPoweredByEngine: false,
|
92 |
showIntroDisclaimerAboutAI: false
|
|
|
2 |
|
3 |
import React, { MouseEventHandler, useEffect, useRef, useState } from "react"
|
4 |
import { useLocalStorage } from "usehooks-ts"
|
5 |
+
import { ClapProject } from "@aitube/clap"
|
6 |
|
7 |
import { cn } from "@/lib/utils/cn"
|
8 |
import { MediaInfo } from "@/types/general"
|
|
|
14 |
import { localStorageKeys } from "@/app/state/localStorageKeys"
|
15 |
import { defaultSettings } from "@/app/state/defaultSettings"
|
16 |
import { useStore } from "@/app/state/useStore"
|
|
|
17 |
import { theSimps } from "@/app/latent/samples"
|
18 |
+
import { generateClapFromPrompt } from "./generateClapFromPrompt"
|
19 |
|
20 |
function LatentEngine({
|
21 |
media,
|
|
|
87 |
|
88 |
// TODO Julian work on the chunk mechanism
|
89 |
|
90 |
+
const mockClap: ClapProject = generateClapFromPrompt({
|
91 |
story: theSimps,
|
92 |
showIntroPoweredByEngine: false,
|
93 |
showIntroDisclaimerAboutAI: false
|
src/components/interface/latent-engine/core/generateClapFromPrompt.ts
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { ClapOutputType, ClapProject, ClapSegmentCategory, newClap, newSegment } from "@aitube/clap"
|
2 |
+
|
3 |
+
import { defaultSegmentDurationInMs, demoStory } from "./samples"
|
4 |
+
|
5 |
+
export function generateClapFromPrompt({
|
6 |
+
story = demoStory,
|
7 |
+
showIntroPoweredByEngine = false,
|
8 |
+
showIntroDisclaimerAboutAI = false,
|
9 |
+
}: {
|
10 |
+
story?: string[]
|
11 |
+
showIntroPoweredByEngine?: boolean
|
12 |
+
showIntroDisclaimerAboutAI?: boolean
|
13 |
+
} = {
|
14 |
+
story: demoStory,
|
15 |
+
showIntroPoweredByEngine: false,
|
16 |
+
showIntroDisclaimerAboutAI: false,
|
17 |
+
}): ClapProject {
|
18 |
+
|
19 |
+
const clap = newClap({
|
20 |
+
meta: {
|
21 |
+
title: "Interactive Demo",
|
22 |
+
isInteractive: true,
|
23 |
+
isLoop: true,
|
24 |
+
description: story,
|
25 |
+
synopsis: story,
|
26 |
+
}
|
27 |
+
})
|
28 |
+
|
29 |
+
let startTimeInMs = 0
|
30 |
+
let endTimeInMs = defaultSegmentDurationInMs
|
31 |
+
|
32 |
+
if (showIntroPoweredByEngine) {
|
33 |
+
clap.segments.push(newSegment({
|
34 |
+
startTimeInMs,
|
35 |
+
endTimeInMs,
|
36 |
+
category: ClapSegmentCategory.INTERFACE,
|
37 |
+
prompt: "<BUILTIN:POWERED_BY_ENGINE>",
|
38 |
+
label: "disclaimer",
|
39 |
+
outputType: ClapOutputType.INTERFACE,
|
40 |
+
}))
|
41 |
+
startTimeInMs += defaultSegmentDurationInMs
|
42 |
+
endTimeInMs += defaultSegmentDurationInMs
|
43 |
+
}
|
44 |
+
|
45 |
+
if (showIntroDisclaimerAboutAI) {
|
46 |
+
clap.segments.push(newSegment({
|
47 |
+
startTimeInMs,
|
48 |
+
endTimeInMs,
|
49 |
+
category:ClapSegmentCategory.INTERFACE,
|
50 |
+
prompt: "<BUILTIN:DISCLAIMER_ABOUT_AI>",
|
51 |
+
label: "disclaimer",
|
52 |
+
outputType: ClapOutputType.INTERFACE,
|
53 |
+
}))
|
54 |
+
startTimeInMs += defaultSegmentDurationInMs
|
55 |
+
endTimeInMs += defaultSegmentDurationInMs
|
56 |
+
}
|
57 |
+
|
58 |
+
/*
|
59 |
+
clap.segments.push(
|
60 |
+
newSegment({
|
61 |
+
// id: string
|
62 |
+
// track: number
|
63 |
+
startTimeInMs,
|
64 |
+
endTimeInMs,
|
65 |
+
category: ClapSegmentCategory.INTERFACE,
|
66 |
+
// entityId: string
|
67 |
+
// sceneId: string
|
68 |
+
prompt: "a hello world",
|
69 |
+
label: "hello world",
|
70 |
+
outputType: ClapOutputType.INTERFACE,
|
71 |
+
// renderId: string
|
72 |
+
// status: ClapSegmentStatus
|
73 |
+
// assetUrl: string
|
74 |
+
// assetDurationInMs: number
|
75 |
+
// createdBy: ClapAuthor
|
76 |
+
// editedBy: ClapAuthor
|
77 |
+
// outputGain: number
|
78 |
+
// seed: number
|
79 |
+
})
|
80 |
+
)
|
81 |
+
startTimeInMs += defaultSegmentDurationInMs
|
82 |
+
endTimeInMs += defaultSegmentDurationInMs
|
83 |
+
*/
|
84 |
+
|
85 |
+
|
86 |
+
|
87 |
+
for (let prompt of story) {
|
88 |
+
|
89 |
+
clap.segments.push(newSegment({
|
90 |
+
track: 0,
|
91 |
+
startTimeInMs,
|
92 |
+
endTimeInMs,
|
93 |
+
category: ClapSegmentCategory.VIDEO,
|
94 |
+
prompt: "",
|
95 |
+
label: "video",
|
96 |
+
outputType: ClapOutputType.VIDEO,
|
97 |
+
}))
|
98 |
+
clap.segments.push(newSegment({
|
99 |
+
track: 1,
|
100 |
+
startTimeInMs,
|
101 |
+
endTimeInMs,
|
102 |
+
category: ClapSegmentCategory.GENERIC,
|
103 |
+
prompt,
|
104 |
+
label: prompt,
|
105 |
+
outputType: ClapOutputType.TEXT,
|
106 |
+
}))
|
107 |
+
clap.segments.push(newSegment({
|
108 |
+
track: 2,
|
109 |
+
startTimeInMs,
|
110 |
+
endTimeInMs,
|
111 |
+
category: ClapSegmentCategory.CAMERA,
|
112 |
+
prompt: "medium-shot",
|
113 |
+
label: "medium-shot",
|
114 |
+
outputType: ClapOutputType.TEXT,
|
115 |
+
}))
|
116 |
+
|
117 |
+
startTimeInMs += defaultSegmentDurationInMs
|
118 |
+
endTimeInMs += defaultSegmentDurationInMs
|
119 |
+
}
|
120 |
+
|
121 |
+
clap.meta.durationInMs = endTimeInMs
|
122 |
+
|
123 |
+
return clap
|
124 |
+
}
|
src/components/interface/latent-engine/core/generators/fetchLatentClap.ts
CHANGED
@@ -4,7 +4,7 @@ export async function fetchLatentClap(prompt: string): Promise<ClapProject> {
|
|
4 |
|
5 |
const requestUri = `/api/resolvers/clap?p=${encodeURIComponent(prompt)}`
|
6 |
|
7 |
-
console.log(`fetchLatentClap: calling ${requestUri}`)
|
8 |
|
9 |
const res = await fetch(requestUri)
|
10 |
|
@@ -12,7 +12,7 @@ export async function fetchLatentClap(prompt: string): Promise<ClapProject> {
|
|
12 |
|
13 |
const clap = await parseClap(blob)
|
14 |
|
15 |
-
console.log(`fetchLatentClap: received = `, clap)
|
16 |
|
17 |
return clap
|
18 |
}
|
|
|
4 |
|
5 |
const requestUri = `/api/resolvers/clap?p=${encodeURIComponent(prompt)}`
|
6 |
|
7 |
+
// console.log(`fetchLatentClap: calling ${requestUri}`)
|
8 |
|
9 |
const res = await fetch(requestUri)
|
10 |
|
|
|
12 |
|
13 |
const clap = await parseClap(blob)
|
14 |
|
15 |
+
// console.log(`fetchLatentClap: received = `, clap)
|
16 |
|
17 |
return clap
|
18 |
}
|
src/components/interface/latent-engine/core/samples.ts
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export const defaultSegmentDurationInMs = 2500 // 2584
|
2 |
+
|
3 |
+
export const fishDemoStory = [
|
4 |
+
"Siamese fighting fish, bokeh, underwater, coral, lively, bubbles, translucency, perfect",
|
5 |
+
|
6 |
+
// this one is magnificient!
|
7 |
+
"princess parrot fish, bokeh, underwater, coral, lively, bubbles, translucency, perfect",
|
8 |
+
|
9 |
+
"pacific ocean perch, bokeh, underwater, coral, lively, bubbles, translucency, perfect",
|
10 |
+
|
11 |
+
"Queen angelfish, bokeh, underwater, coral, lively, bubbles, translucency, perfect",
|
12 |
+
|
13 |
+
"sea turtle, bokeh, underwater, coral, lively, bubbles, translucency, perfect",
|
14 |
+
|
15 |
+
"hippocampus, bokeh, underwater, coral, lively, bubbles, translucency, perfect",
|
16 |
+
]
|
17 |
+
|
18 |
+
export const demoStory = [
|
19 |
+
...fishDemoStory,
|
20 |
+
|
21 |
+
// "portrait of one man news anchor, 60yo, thin, fit, american, mustache, beard, wearing a suit, medium-shot, central park, outside, serious, bokeh, perfect",
|
22 |
+
|
23 |
+
// "screenshot from Call of Duty, FPS game, nextgen, videogame screenshot, unreal engine, raytracing, perfect",
|
24 |
+
|
25 |
+
// "screenshot from a flight simulator, nextgen, videogame screenshot, unreal engine, raytracing, perfect",
|
26 |
+
// "screenshot from fallout3, fallout4, western, wasteland, 3rd person RPG, nextgen, videogame screenshot, unreal engine, raytracing, perfect",
|
27 |
+
// "portrait of single influencer woman, 30yo, thin, fit, american, wearing a red tshirt, medium-shot, central park, outside, serious, bokeh, perfect",
|
28 |
+
]
|
src/components/interface/latent-engine/core/useLatentEngine.ts
CHANGED
@@ -356,21 +356,36 @@ export const useLatentEngine = create<LatentEngineStore>((set, get) => ({
|
|
356 |
let toPlay: HTMLVideoElement[] = []
|
357 |
let toPreload: HTMLVideoElement[] = []
|
358 |
|
|
|
|
|
|
|
|
|
359 |
for (let i = 0; i < videosSortedByStartAt.length; i++) {
|
360 |
const video = videosSortedByStartAt[i]
|
361 |
|
362 |
const segmentStartAt = getSegmentStartAt(video)
|
363 |
const segmentEndAt = getSegmentEndAt(video)
|
364 |
|
|
|
|
|
|
|
|
|
365 |
// this segment has been spent, it should be discared
|
366 |
if (segmentEndAt < positionInMs) {
|
|
|
367 |
toRecycle.push(video)
|
368 |
} else if (segmentStartAt < positionInMs) {
|
|
|
369 |
toPlay.push(video)
|
370 |
video.play()
|
371 |
setZIndexDepthId(video, 10)
|
372 |
} else {
|
|
|
373 |
toPreload.push(video)
|
|
|
|
|
|
|
|
|
374 |
video.pause()
|
375 |
setZIndexDepthId(video, 0)
|
376 |
}
|
@@ -382,22 +397,29 @@ export const useLatentEngine = create<LatentEngineStore>((set, get) => ({
|
|
382 |
// to grab the max number of segments
|
383 |
const maxBufferDurationInMs = positionInMs + (videoDurationInMs * 4)
|
384 |
|
|
|
385 |
|
386 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
387 |
positionInMs,
|
388 |
videoModelDurationInSec,
|
389 |
videoDurationInMs,
|
390 |
"(videoDurationInMs * 4)": (videoDurationInMs * 4),
|
391 |
maxBufferDurationInMs,
|
392 |
-
segments: clap.segments
|
|
|
|
|
|
|
|
|
|
|
393 |
})
|
394 |
-
|
395 |
-
const prefilterSegmentsForPerformanceReasons: ClapSegment[] = clap.segments.filter(s =>
|
396 |
-
s.startTimeInMs >= positionInMs &&
|
397 |
-
s.startTimeInMs < maxBufferDurationInMs
|
398 |
-
)
|
399 |
|
400 |
-
console.log(`prefilterSegmentsForPerformanceReasons: `, prefilterSegmentsForPerformanceReasons)
|
401 |
|
402 |
// this tells us how much time is left
|
403 |
let remainingTimeInMs = Math.max(0, clap.meta.durationInMs - positionInMs)
|
@@ -414,7 +436,7 @@ export const useLatentEngine = create<LatentEngineStore>((set, get) => ({
|
|
414 |
let bufferAheadOfCurrentPositionInMs = positionInMs
|
415 |
|
416 |
for (let i = 0; i < toRecycle.length; i++) {
|
417 |
-
console.log(`got a spent video to recycle`)
|
418 |
|
419 |
// we select the segments in the current shot
|
420 |
|
@@ -515,7 +537,7 @@ export const useLatentEngine = create<LatentEngineStore>((set, get) => ({
|
|
515 |
|
516 |
try {
|
517 |
if (get().isPlaying) {
|
518 |
-
|
519 |
|
520 |
// note: for now we only display one panel at a time,
|
521 |
// later we can try to see if we should handle more
|
@@ -577,8 +599,12 @@ export const useLatentEngine = create<LatentEngineStore>((set, get) => ({
|
|
577 |
// list to put into the buffer the one that should be displayed
|
578 |
runRenderingLoop: () => {
|
579 |
const {
|
|
|
580 |
isLoaded,
|
581 |
isPlaying,
|
|
|
|
|
|
|
582 |
renderingIntervalId,
|
583 |
renderingIntervalDelayInMs,
|
584 |
renderingLastRenderAt,
|
@@ -597,12 +623,30 @@ export const useLatentEngine = create<LatentEngineStore>((set, get) => ({
|
|
597 |
document.querySelectorAll('.video-buffer')
|
598 |
) as HTMLVideoElement[]
|
599 |
|
|
|
|
|
|
|
|
|
600 |
const newRenderingLastRenderAt = performance.now()
|
601 |
const elapsedInMs = newRenderingLastRenderAt - renderingLastRenderAt
|
602 |
|
603 |
-
|
604 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
605 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
606 |
clearInterval(renderingIntervalId)
|
607 |
|
608 |
set({
|
|
|
356 |
let toPlay: HTMLVideoElement[] = []
|
357 |
let toPreload: HTMLVideoElement[] = []
|
358 |
|
359 |
+
console.log("clap: ", clap)
|
360 |
+
|
361 |
+
;(window as any).debugJuju = clap;
|
362 |
+
|
363 |
for (let i = 0; i < videosSortedByStartAt.length; i++) {
|
364 |
const video = videosSortedByStartAt[i]
|
365 |
|
366 |
const segmentStartAt = getSegmentStartAt(video)
|
367 |
const segmentEndAt = getSegmentEndAt(video)
|
368 |
|
369 |
+
console.log(`segs: `, {
|
370 |
+
segmentStartAt,
|
371 |
+
segmentEndAt
|
372 |
+
})
|
373 |
// this segment has been spent, it should be discared
|
374 |
if (segmentEndAt < positionInMs) {
|
375 |
+
console.log("A")
|
376 |
toRecycle.push(video)
|
377 |
} else if (segmentStartAt < positionInMs) {
|
378 |
+
console.log("B")
|
379 |
toPlay.push(video)
|
380 |
video.play()
|
381 |
setZIndexDepthId(video, 10)
|
382 |
} else {
|
383 |
+
console.log("C")
|
384 |
toPreload.push(video)
|
385 |
+
|
386 |
+
// Julian: I'm adding that to see if it's the problem
|
387 |
+
//toRecycle.push(video)
|
388 |
+
|
389 |
video.pause()
|
390 |
setZIndexDepthId(video, 0)
|
391 |
}
|
|
|
397 |
// to grab the max number of segments
|
398 |
const maxBufferDurationInMs = positionInMs + (videoDurationInMs * 4)
|
399 |
|
400 |
+
|
401 |
|
402 |
+
const prefilterSegmentsForPerformanceReasons: ClapSegment[] = clap.segments.filter(s =>
|
403 |
+
s.startTimeInMs >= positionInMs &&
|
404 |
+
s.startTimeInMs < maxBufferDurationInMs
|
405 |
+
)
|
406 |
+
|
407 |
+
|
408 |
+
console.log(`runVideoSimulationLoop: `, {
|
409 |
positionInMs,
|
410 |
videoModelDurationInSec,
|
411 |
videoDurationInMs,
|
412 |
"(videoDurationInMs * 4)": (videoDurationInMs * 4),
|
413 |
maxBufferDurationInMs,
|
414 |
+
segments: clap.segments,
|
415 |
+
prefilterSegmentsForPerformanceReasons,
|
416 |
+
videosSortedByStartAt,
|
417 |
+
toPlay,
|
418 |
+
toPreload,
|
419 |
+
toRecycle
|
420 |
})
|
|
|
|
|
|
|
|
|
|
|
421 |
|
422 |
+
// console.log(`prefilterSegmentsForPerformanceReasons: `, prefilterSegmentsForPerformanceReasons)
|
423 |
|
424 |
// this tells us how much time is left
|
425 |
let remainingTimeInMs = Math.max(0, clap.meta.durationInMs - positionInMs)
|
|
|
436 |
let bufferAheadOfCurrentPositionInMs = positionInMs
|
437 |
|
438 |
for (let i = 0; i < toRecycle.length; i++) {
|
439 |
+
// console.log(`got a spent video to recycle`)
|
440 |
|
441 |
// we select the segments in the current shot
|
442 |
|
|
|
537 |
|
538 |
try {
|
539 |
if (get().isPlaying) {
|
540 |
+
console.log(`runSimulationLoop: rendering UI layer..`)
|
541 |
|
542 |
// note: for now we only display one panel at a time,
|
543 |
// later we can try to see if we should handle more
|
|
|
599 |
// list to put into the buffer the one that should be displayed
|
600 |
runRenderingLoop: () => {
|
601 |
const {
|
602 |
+
clap,
|
603 |
isLoaded,
|
604 |
isPlaying,
|
605 |
+
isInteractive,
|
606 |
+
isLive,
|
607 |
+
isLoop,
|
608 |
renderingIntervalId,
|
609 |
renderingIntervalDelayInMs,
|
610 |
renderingLastRenderAt,
|
|
|
623 |
document.querySelectorAll('.video-buffer')
|
624 |
) as HTMLVideoElement[]
|
625 |
|
626 |
+
// ------------ TIMELINE CURSOR PROGRESSION CYCLE -------------
|
627 |
+
// the following implements the mechanism of moving the cursor
|
628 |
+
// within the timeline
|
629 |
+
|
630 |
const newRenderingLastRenderAt = performance.now()
|
631 |
const elapsedInMs = newRenderingLastRenderAt - renderingLastRenderAt
|
632 |
|
633 |
+
let newPositionInMs = positionInMs
|
634 |
+
// this is were we decide what to do based on the current mode
|
635 |
+
if (isInteractive) {
|
636 |
+
console.log("interactive mode: nothing to do")
|
637 |
+
|
638 |
+
} else {
|
639 |
+
|
640 |
+
newPositionInMs += elapsedInMs
|
641 |
|
642 |
+
if (isLoop) {
|
643 |
+
if (newPositionInMs > 5000) {
|
644 |
+
console.log("end of loop detected! going back")
|
645 |
+
newPositionInMs = 0
|
646 |
+
}
|
647 |
+
}
|
648 |
+
}
|
649 |
+
|
650 |
clearInterval(renderingIntervalId)
|
651 |
|
652 |
set({
|