Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -19,12 +19,12 @@ with open('loras.json', 'r') as f:
|
|
19 |
|
20 |
# Initialize the base model
|
21 |
dtype = torch.bfloat16
|
22 |
-
device = "
|
23 |
base_model = "black-forest-labs/FLUX.1-dev"
|
24 |
|
25 |
-
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(
|
26 |
-
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(
|
27 |
-
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(
|
28 |
pipe_i2i = AutoPipelineForImage2Image.from_pretrained(base_model,
|
29 |
vae=good_vae,
|
30 |
transformer=pipe.transformer,
|
@@ -33,7 +33,7 @@ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(base_model,
|
|
33 |
text_encoder_2=pipe.text_encoder_2,
|
34 |
tokenizer_2=pipe.tokenizer_2,
|
35 |
torch_dtype=dtype
|
36 |
-
)
|
37 |
|
38 |
MAX_SEED = 2**32-1
|
39 |
|
@@ -80,8 +80,8 @@ def update_selection(evt: gr.SelectData, width, height):
|
|
80 |
|
81 |
@spaces.GPU(duration=70)
|
82 |
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress):
|
83 |
-
pipe.to("
|
84 |
-
generator = torch.Generator(device="
|
85 |
with calculateDuration("Generating image"):
|
86 |
# Generate image
|
87 |
for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
|
@@ -98,8 +98,8 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
|
|
98 |
yield img
|
99 |
|
100 |
def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, lora_scale, seed):
|
101 |
-
generator = torch.Generator(device="
|
102 |
-
pipe_i2i.to("
|
103 |
image_input = load_image(image_input_path)
|
104 |
final_image = pipe_i2i(
|
105 |
prompt=prompt_mash,
|
|
|
19 |
|
20 |
# Initialize the base model
|
21 |
dtype = torch.bfloat16
|
22 |
+
device = "cpu"
|
23 |
base_model = "black-forest-labs/FLUX.1-dev"
|
24 |
|
25 |
+
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to("cpu")
|
26 |
+
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to("cpu")
|
27 |
+
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to("cpu")
|
28 |
pipe_i2i = AutoPipelineForImage2Image.from_pretrained(base_model,
|
29 |
vae=good_vae,
|
30 |
transformer=pipe.transformer,
|
|
|
33 |
text_encoder_2=pipe.text_encoder_2,
|
34 |
tokenizer_2=pipe.tokenizer_2,
|
35 |
torch_dtype=dtype
|
36 |
+
).to("cpu")
|
37 |
|
38 |
MAX_SEED = 2**32-1
|
39 |
|
|
|
80 |
|
81 |
@spaces.GPU(duration=70)
|
82 |
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress):
|
83 |
+
pipe.to("cpu")
|
84 |
+
generator = torch.Generator(device="cpu").manual_seed(seed)
|
85 |
with calculateDuration("Generating image"):
|
86 |
# Generate image
|
87 |
for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
|
|
|
98 |
yield img
|
99 |
|
100 |
def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, lora_scale, seed):
|
101 |
+
generator = torch.Generator(device="cpu").manual_seed(seed)
|
102 |
+
pipe_i2i.to("cpu")
|
103 |
image_input = load_image(image_input_path)
|
104 |
final_image = pipe_i2i(
|
105 |
prompt=prompt_mash,
|