Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -87,12 +87,13 @@ def generate_image_from_text(prompt, seed=0):
|
|
87 |
).images[0]
|
88 |
return image
|
89 |
|
90 |
-
|
|
|
91 |
def infer(image, prompt, steps=50, cfg_scale=7.5, eta=1.0, seed=123, video_length=2, fs=8):
|
92 |
translated_prompt = translate_prompt(prompt)
|
93 |
print(f"Translated prompt: {translated_prompt}")
|
94 |
resolution = (576, 1024)
|
95 |
-
save_fs = fs # fs를
|
96 |
seed_everything(seed)
|
97 |
transform = transforms.Compose([
|
98 |
transforms.Resize(min(resolution), antialias=True),
|
@@ -105,7 +106,7 @@ def infer(image, prompt, steps=50, cfg_scale=7.5, eta=1.0, seed=123, video_lengt
|
|
105 |
steps = 60
|
106 |
batch_size = 1
|
107 |
channels = model.model.diffusion_model.out_channels
|
108 |
-
frames = int(video_length *
|
109 |
h, w = resolution[0] // 8, resolution[1] // 8
|
110 |
noise_shape = [batch_size, channels, frames, h, w]
|
111 |
with torch.no_grad(), torch.cuda.amp.autocast():
|
@@ -121,9 +122,10 @@ def infer(image, prompt, steps=50, cfg_scale=7.5, eta=1.0, seed=123, video_lengt
|
|
121 |
cond = {"c_crossattn": [imtext_cond], "c_concat": [img_tensor_repeat], "fs": save_fs}
|
122 |
batch_samples = batch_ddim_sampling(model, cond, noise_shape, n_samples=1, ddim_steps=steps, ddim_eta=eta, cfg_scale=cfg_scale)
|
123 |
video_path = './output.mp4'
|
124 |
-
save_videos(batch_samples, './', filenames=['output'], fps=
|
125 |
return video_path
|
126 |
|
|
|
127 |
css = """
|
128 |
.tab-nav {
|
129 |
border-bottom: 2px solid #ddd;
|
@@ -158,7 +160,7 @@ css = """
|
|
158 |
/* 탭별 색상 */
|
159 |
.tab-nav button:nth-child(1) { border-top: 3px solid #ff6b6b; }
|
160 |
.tab-nav button:nth-child(2) { border-top: 3px solid #4ecdc4; }
|
161 |
-
.tab-nav button:nth-child(3) { border-top: 3px solid #
|
162 |
"""
|
163 |
|
164 |
def infer_t2v(prompt, seed=123, steps=50, cfg_scale=7.5, eta=1.0, fs=8, video_length=2):
|
|
|
87 |
).images[0]
|
88 |
return image
|
89 |
|
90 |
+
import torch
|
91 |
+
|
92 |
def infer(image, prompt, steps=50, cfg_scale=7.5, eta=1.0, seed=123, video_length=2, fs=8):
|
93 |
translated_prompt = translate_prompt(prompt)
|
94 |
print(f"Translated prompt: {translated_prompt}")
|
95 |
resolution = (576, 1024)
|
96 |
+
save_fs = torch.tensor(fs) # fs를 tensor로 변환
|
97 |
seed_everything(seed)
|
98 |
transform = transforms.Compose([
|
99 |
transforms.Resize(min(resolution), antialias=True),
|
|
|
106 |
steps = 60
|
107 |
batch_size = 1
|
108 |
channels = model.model.diffusion_model.out_channels
|
109 |
+
frames = int(video_length * fs)
|
110 |
h, w = resolution[0] // 8, resolution[1] // 8
|
111 |
noise_shape = [batch_size, channels, frames, h, w]
|
112 |
with torch.no_grad(), torch.cuda.amp.autocast():
|
|
|
122 |
cond = {"c_crossattn": [imtext_cond], "c_concat": [img_tensor_repeat], "fs": save_fs}
|
123 |
batch_samples = batch_ddim_sampling(model, cond, noise_shape, n_samples=1, ddim_steps=steps, ddim_eta=eta, cfg_scale=cfg_scale)
|
124 |
video_path = './output.mp4'
|
125 |
+
save_videos(batch_samples, './', filenames=['output'], fps=fs)
|
126 |
return video_path
|
127 |
|
128 |
+
|
129 |
css = """
|
130 |
.tab-nav {
|
131 |
border-bottom: 2px solid #ddd;
|
|
|
160 |
/* 탭별 색상 */
|
161 |
.tab-nav button:nth-child(1) { border-top: 3px solid #ff6b6b; }
|
162 |
.tab-nav button:nth-child(2) { border-top: 3px solid #4ecdc4; }
|
163 |
+
.tab-nav button:nth-child(3) { border-top: 3px solid #f7b731; }
|
164 |
"""
|
165 |
|
166 |
def infer_t2v(prompt, seed=123, steps=50, cfg_scale=7.5, eta=1.0, fs=8, video_length=2):
|