JiantaoLin
commited on
Commit
Β·
5d625d7
1
Parent(s):
dfb9c8a
new
Browse files- app.py +1 -1
- pipeline/kiss3d_wrapper.py +1 -0
app.py
CHANGED
@@ -153,7 +153,7 @@ def text_to_detailed(prompt, seed=None):
|
|
153 |
def text_to_image(prompt, seed=None, strength=1.0,lora_scale=1.0, num_inference_steps=20, redux_hparam=None, init_image=None, **kwargs):
|
154 |
# subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
|
155 |
# print(f"Before text_to_image: {torch.cuda.memory_allocated() / 1024**3} GB")
|
156 |
-
k3d_wrapper.flux_pipeline.enable_xformers_memory_efficient_attention()
|
157 |
k3d_wrapper.renew_uuid()
|
158 |
init_image = None
|
159 |
# if init_image_path is not None:
|
|
|
153 |
def text_to_image(prompt, seed=None, strength=1.0,lora_scale=1.0, num_inference_steps=20, redux_hparam=None, init_image=None, **kwargs):
|
154 |
# subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
|
155 |
# print(f"Before text_to_image: {torch.cuda.memory_allocated() / 1024**3} GB")
|
156 |
+
# k3d_wrapper.flux_pipeline.enable_xformers_memory_efficient_attention()
|
157 |
k3d_wrapper.renew_uuid()
|
158 |
init_image = None
|
159 |
# if init_image_path is not None:
|
pipeline/kiss3d_wrapper.py
CHANGED
@@ -75,6 +75,7 @@ def init_wrapper_from_config(config_path):
|
|
75 |
else:
|
76 |
flux_pipe = FluxImg2ImgPipeline.from_pretrained(flux_base_model_pth, torch_dtype=dtype_[flux_dtype], token=access_token)
|
77 |
# flux_pipe.enable_vae_slicing()
|
|
|
78 |
|
79 |
# flux_pipe.enable_sequential_cpu_offload()
|
80 |
# load flux model and controlnet
|
|
|
75 |
else:
|
76 |
flux_pipe = FluxImg2ImgPipeline.from_pretrained(flux_base_model_pth, torch_dtype=dtype_[flux_dtype], token=access_token)
|
77 |
# flux_pipe.enable_vae_slicing()
|
78 |
+
flux_pipe.enable_vae_tiling()
|
79 |
|
80 |
# flux_pipe.enable_sequential_cpu_offload()
|
81 |
# load flux model and controlnet
|