Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -22,6 +22,10 @@ from funcs import (
|
|
22 |
)
|
23 |
from transformers import pipeline
|
24 |
|
|
|
|
|
|
|
|
|
25 |
def download_model():
|
26 |
REPO_ID = 'Doubiiu/DynamiCrafter_1024'
|
27 |
filename_list = ['model.ckpt']
|
@@ -47,6 +51,30 @@ model = model.cuda()
|
|
47 |
# 번역 모델 초기화
|
48 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
@spaces.GPU(duration=300)
|
51 |
def infer(image, prompt, steps=50, cfg_scale=7.5, eta=1.0, fs=3, seed=123, video_length=2):
|
52 |
# 한글 입력 감지 및 번역
|
@@ -182,4 +210,28 @@ with gr.Blocks(analytics_enabled=False, css=css) as dynamicrafter_iface:
|
|
182 |
fn = infer
|
183 |
)
|
184 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
dynamicrafter_iface.queue(max_size=12).launch(show_api=True)
|
|
|
22 |
)
|
23 |
from transformers import pipeline
|
24 |
|
25 |
+
import numpy as np
|
26 |
+
from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler
|
27 |
+
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
|
28 |
+
|
29 |
def download_model():
|
30 |
REPO_ID = 'Doubiiu/DynamiCrafter_1024'
|
31 |
filename_list = ['model.ckpt']
|
|
|
51 |
# 번역 모델 초기화
|
52 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
53 |
|
54 |
+
# FLUX 모델 설정
|
55 |
+
dtype = torch.bfloat16
|
56 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
57 |
+
|
58 |
+
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to(device)
|
59 |
+
|
60 |
+
MAX_SEED = np.iinfo(np.int32).max
|
61 |
+
MAX_IMAGE_SIZE = 2048
|
62 |
+
|
63 |
+
@spaces.GPU(duration=190)
|
64 |
+
def infer_t2i(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
|
65 |
+
if randomize_seed:
|
66 |
+
seed = random.randint(0, MAX_SEED)
|
67 |
+
generator = torch.Generator().manual_seed(seed)
|
68 |
+
image = pipe(
|
69 |
+
prompt = prompt,
|
70 |
+
width = width,
|
71 |
+
height = height,
|
72 |
+
num_inference_steps = num_inference_steps,
|
73 |
+
generator = generator,
|
74 |
+
guidance_scale=guidance_scale
|
75 |
+
).images[0]
|
76 |
+
return image, seed
|
77 |
+
|
78 |
@spaces.GPU(duration=300)
|
79 |
def infer(image, prompt, steps=50, cfg_scale=7.5, eta=1.0, fs=3, seed=123, video_length=2):
|
80 |
# 한글 입력 감지 및 번역
|
|
|
210 |
fn = infer
|
211 |
)
|
212 |
|
213 |
+
|
214 |
+
with gr.Tab(label='T2I'):
|
215 |
+
with gr.Column():
|
216 |
+
with gr.Row():
|
217 |
+
t2i_input_text = gr.Text(label='Prompt')
|
218 |
+
with gr.Row():
|
219 |
+
t2i_seed = gr.Slider(label='Seed', minimum=0, maximum=MAX_SEED, step=1, value=42)
|
220 |
+
t2i_randomize_seed = gr.Checkbox(label='Randomize seed', value=False)
|
221 |
+
with gr.Row():
|
222 |
+
t2i_width = gr.Slider(label='Width', minimum=256, maximum=MAX_IMAGE_SIZE, step=64, value=1024)
|
223 |
+
t2i_height = gr.Slider(label='Height', minimum=256, maximum=MAX_IMAGE_SIZE, step=64, value=1024)
|
224 |
+
with gr.Row():
|
225 |
+
t2i_guidance_scale = gr.Slider(label='Guidance Scale', minimum=1.0, maximum=20.0, step=0.1, value=5.0)
|
226 |
+
t2i_num_inference_steps = gr.Slider(label='Inference Steps', minimum=1, maximum=100, step=1, value=28)
|
227 |
+
t2i_generate_btn = gr.Button("Generate")
|
228 |
+
t2i_output_image = gr.Image(label="Generated Image", elem_id="t2i_output_img")
|
229 |
+
t2i_output_seed = gr.Number(label="Used Seed", elem_id="t2i_output_seed")
|
230 |
+
|
231 |
+
t2i_generate_btn.click(
|
232 |
+
fn=infer_t2i,
|
233 |
+
inputs=[t2i_input_text, t2i_seed, t2i_randomize_seed, t2i_width, t2i_height, t2i_guidance_scale, t2i_num_inference_steps],
|
234 |
+
outputs=[t2i_output_image, t2i_output_seed]
|
235 |
+
)
|
236 |
+
|
237 |
dynamicrafter_iface.queue(max_size=12).launch(show_api=True)
|