Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -11,6 +11,7 @@ pipe_14b = Cosmos2TextToImagePipeline.from_pretrained(model_14b_id,
|
|
11 |
revision="refs/pr/1",
|
12 |
torch_dtype=torch.bfloat16
|
13 |
)
|
|
|
14 |
pipe_2b = Cosmos2TextToImagePipeline.from_pretrained(model_2b_id,
|
15 |
revision="refs/pr/2",
|
16 |
text_encoder=pipe_14b.text_encoder,
|
@@ -35,7 +36,7 @@ scheduler = EDMEulerScheduler(
|
|
35 |
)
|
36 |
|
37 |
pipe_14b.scheduler = scheduler
|
38 |
-
|
39 |
|
40 |
@spaces.GPU(duration=75)
|
41 |
def generate_image(prompt,
|
@@ -48,13 +49,12 @@ def generate_image(prompt,
|
|
48 |
|
49 |
if model_choice == "14B":
|
50 |
pipe = pipe_14b
|
51 |
-
|
52 |
torch.cuda.empty_cache()
|
53 |
|
54 |
-
|
55 |
else:
|
56 |
pipe = pipe_2b
|
57 |
-
|
58 |
torch.cuda.empty_cache()
|
59 |
|
60 |
if randomize_seed:
|
|
|
11 |
revision="refs/pr/1",
|
12 |
torch_dtype=torch.bfloat16
|
13 |
)
|
14 |
+
|
15 |
pipe_2b = Cosmos2TextToImagePipeline.from_pretrained(model_2b_id,
|
16 |
revision="refs/pr/2",
|
17 |
text_encoder=pipe_14b.text_encoder,
|
|
|
36 |
)
|
37 |
|
38 |
pipe_14b.scheduler = scheduler
|
39 |
+
pipe_2b.scheduler = scheduler
|
40 |
|
41 |
@spaces.GPU(duration=75)
|
42 |
def generate_image(prompt,
|
|
|
49 |
|
50 |
if model_choice == "14B":
|
51 |
pipe = pipe_14b
|
52 |
+
pipe_2b.to("cpu")
|
53 |
torch.cuda.empty_cache()
|
54 |
|
|
|
55 |
else:
|
56 |
pipe = pipe_2b
|
57 |
+
pipe_14b.to("cpu")
|
58 |
torch.cuda.empty_cache()
|
59 |
|
60 |
if randomize_seed:
|