File size: 3,067 Bytes
1185ec1
f42b4a1
1185ec1
0d218b1
 
ac7030c
1185ec1
 
f24ad59
3d4392e
1185ec1
 
 
 
 
e40bd21
 
1185ec1
 
 
e40bd21
1185ec1
 
ac7030c
1185ec1
 
 
 
 
 
 
 
 
e40bd21
1185ec1
 
 
0d218b1
1185ec1
b3c7e0f
 
1185ec1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ac7030c
1185ec1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import { v4 as uuidv4 } from "uuid"
import { Credentials } from "@/lib/huggingface/hub/src"

import { ClapProject, parseClap } from "@aitube/clap"

import { ChannelInfo, MediaInfo, VideoRequest } from "@/types/general"
import { defaultVideoModel } from "@/app/config"

import { parseVideoModelName } from "../../parsers/parseVideoModelName"
import { computeOrientationProjectionWidthHeight } from "../../utils/computeOrientationProjectionWidthHeight"

import { downloadFileAsBlob } from "./downloadFileAsBlob"

export async function downloadClapProject({
  path,
  channel,
  credentials,
}: {
  path: string
  channel: ChannelInfo
  credentials: Credentials
}): Promise<{
  videoRequest: VideoRequest
  videoInfo: MediaInfo
  clapProject: ClapProject
}> {
  // we recover the repo from the cnannel info
  const repo = `datasets/${channel.datasetUser}/${channel.datasetName}`

  // we download the clap file (which might be in a private repo)
  const clapString = await downloadFileAsBlob({
    repo,
    path,
    apiKey: credentials.accessToken,
    expectedMimeType: "application/gzip"
  })

  const clapProject: ClapProject = await parseClap(clapString)

  const id = clapProject.meta.id || uuidv4()
  
  const videoRequest: VideoRequest = {
    id,
    label: clapProject.meta.title || "Untitled",
    description: clapProject.meta.description || "",
    prompt: "", // there is no prompt - instead we use segments
    model: parseVideoModelName(clapProject.meta.defaultVideoModel, channel.model),
    style: channel.style,
    lora: channel.lora,
    voice: channel.voice,
    music: channel.music,
    thumbnailUrl: "",
    clapUrl: `https://huggingface.co/${repo}/resolve/main/${path}`,
    updatedAt: new Date().toISOString(),
    tags: channel.tags,
    channel,
    duration: 0, // will be computed automatically
    ...computeOrientationProjectionWidthHeight({
      lora: "",
      orientation: clapProject.meta.orientation,
      // projection, // <- will be extrapolated from the LoRA for now
    }),
  }

  const videoInfo: MediaInfo = {
    id,
    status: "submitted",
    label: videoRequest.label || "",
    description: videoRequest.description || "",
    prompt: videoRequest.prompt || "",
    model: videoRequest.model || defaultVideoModel,
    style: videoRequest.style || "",
    lora: videoRequest.lora || "",
    voice: videoRequest.voice || "",
    music: videoRequest.music || "",
    thumbnailUrl: videoRequest.thumbnailUrl || "", // will be generated in async
    clapUrl: videoRequest.clapUrl || "",
    assetUrl: "", // will be generated in async
    assetUrlHd: "",
    numberOfViews: 0,
    numberOfLikes: 0,
    numberOfDislikes: 0,
    updatedAt: new Date().toISOString(),
    tags: videoRequest.tags,
    channel,
    duration: videoRequest.duration || 0,
    ...computeOrientationProjectionWidthHeight({
      lora: videoRequest.lora,
      orientation: videoRequest.orientation,
      // projection, // <- will be extrapolated from the LoRA for now
    }),
  }

  return {
    videoRequest,
    videoInfo,
    clapProject
  }
}