Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import sys
|
|
5 |
import argparse
|
6 |
import random
|
7 |
import time
|
|
|
8 |
from omegaconf import OmegaConf
|
9 |
import torch
|
10 |
import torchvision
|
@@ -21,22 +22,15 @@ from funcs import (
|
|
21 |
save_videos
|
22 |
)
|
23 |
from transformers import pipeline
|
|
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
torch.cuda.empty_cache()
|
29 |
-
torch.cuda.set_per_process_memory_fraction(0.7)
|
30 |
|
|
|
31 |
def download_model():
|
32 |
-
|
33 |
-
filename_list = ['model.ckpt']
|
34 |
-
if not os.path.exists('./checkpoints/dynamicrafter_1024_v1/'):
|
35 |
-
os.makedirs('./checkpoints/dynamicrafter_1024_v1/')
|
36 |
-
for filename in filename_list:
|
37 |
-
local_file = os.path.join('./checkpoints/dynamicrafter_1024_v1/', filename)
|
38 |
-
if not os.path.exists(local_file):
|
39 |
-
hf_hub_download(repo_id=REPO_ID, filename=filename, local_dir='./checkpoints/dynamicrafter_1024_v1/', force_download=True)
|
40 |
|
41 |
download_model()
|
42 |
ckpt_path='checkpoints/dynamicrafter_1024_v1/model.ckpt'
|
@@ -57,7 +51,7 @@ translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
|
57 |
device = "cpu" # 초기에 CPU에 로드
|
58 |
dtype = torch.float32 # float32 사용
|
59 |
|
60 |
-
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype
|
61 |
|
62 |
# 메모리 사용량 최적화
|
63 |
if torch.cuda.is_available():
|
@@ -85,6 +79,7 @@ def infer_t2i(prompt, seed=42, randomize_seed=False, width=1024, height=1024, gu
|
|
85 |
|
86 |
torch.cuda.empty_cache()
|
87 |
return image, seed
|
|
|
88 |
|
89 |
@spaces.GPU(duration=300)
|
90 |
def infer(image, prompt, steps=50, cfg_scale=7.5, eta=1.0, fs=3, seed=123, video_length=2):
|
|
|
5 |
import argparse
|
6 |
import random
|
7 |
import time
|
8 |
+
import numpy as np
|
9 |
from omegaconf import OmegaConf
|
10 |
import torch
|
11 |
import torchvision
|
|
|
22 |
save_videos
|
23 |
)
|
24 |
from transformers import pipeline
|
25 |
+
from diffusers import DiffusionPipeline
|
26 |
|
27 |
+
# 상수 정의
|
28 |
+
MAX_SEED = np.iinfo(np.int32).max
|
29 |
+
MAX_IMAGE_SIZE = 2048
|
|
|
|
|
30 |
|
31 |
+
# DynamiCrafter 모델 설정
|
32 |
def download_model():
|
33 |
+
# ... (기존 코드 유지)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
download_model()
|
36 |
ckpt_path='checkpoints/dynamicrafter_1024_v1/model.ckpt'
|
|
|
51 |
device = "cpu" # 초기에 CPU에 로드
|
52 |
dtype = torch.float32 # float32 사용
|
53 |
|
54 |
+
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype)
|
55 |
|
56 |
# 메모리 사용량 최적화
|
57 |
if torch.cuda.is_available():
|
|
|
79 |
|
80 |
torch.cuda.empty_cache()
|
81 |
return image, seed
|
82 |
+
|
83 |
|
84 |
@spaces.GPU(duration=300)
|
85 |
def infer(image, prompt, steps=50, cfg_scale=7.5, eta=1.0, fs=3, seed=123, video_length=2):
|