Requirements updates -testing
Browse files- app.py +2 -1
- requirements.txt +13 -14
- src/condition.py +1 -0
- utils/ai_generator.py +2 -0
- utils/ai_generator_diffusers_flux.py +8 -3
- utils/constants.py +7 -4
- utils/version_info.py +4 -1
app.py
CHANGED
@@ -7,6 +7,7 @@ from pathlib import Path
|
|
7 |
import atexit
|
8 |
import random
|
9 |
import spaces
|
|
|
10 |
# Import constants
|
11 |
import utils.constants as constants
|
12 |
|
@@ -160,7 +161,7 @@ def get_model_and_lora(model_textbox):
|
|
160 |
default_model = model_textbox
|
161 |
return default_model, []
|
162 |
|
163 |
-
|
164 |
def generate_input_image_click(map_option, prompt_textbox_value, negative_prompt_textbox_value, model_textbox_value, use_conditioned_image=False, strength=0.5, image_format="16:9", scale_factor=3):
|
165 |
# Get the model and LoRA weights
|
166 |
model, lora_weights = get_model_and_lora(model_textbox_value)
|
|
|
7 |
import atexit
|
8 |
import random
|
9 |
import spaces
|
10 |
+
|
11 |
# Import constants
|
12 |
import utils.constants as constants
|
13 |
|
|
|
161 |
default_model = model_textbox
|
162 |
return default_model, []
|
163 |
|
164 |
+
#@spaces.GPU(duration=256)
|
165 |
def generate_input_image_click(map_option, prompt_textbox_value, negative_prompt_textbox_value, model_textbox_value, use_conditioned_image=False, strength=0.5, image_format="16:9", scale_factor=3):
|
166 |
# Get the model and LoRA weights
|
167 |
model, lora_weights = get_model_and_lora(model_textbox_value)
|
requirements.txt
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
accelerate
|
2 |
invisible_watermark
|
3 |
# Updated versions 2.4.0+cu118
|
4 |
-
torch==2.4.0 --index-url https://download.pytorch.org/whl/cu118
|
5 |
-
torchvision
|
6 |
-
torchaudio
|
7 |
-
xformers==0.0.27.post2 --index-url https://download.pytorch.org/whl/cu118
|
8 |
|
9 |
# Other dependencies
|
10 |
Haishoku
|
@@ -12,14 +12,12 @@ pybind11>=2.12
|
|
12 |
huggingface_hub
|
13 |
# git+https://github.com/huggingface/[email protected]#egg=transformers
|
14 |
transformers==4.48.1
|
15 |
-
gradio==5.12.0
|
16 |
gradio[oauth]
|
17 |
Pillow
|
18 |
-
numpy
|
19 |
requests
|
20 |
-
# git+https://github.com/huggingface/diffusers
|
21 |
-
diffusers
|
22 |
-
accelerate
|
23 |
peft
|
24 |
opencv-python
|
25 |
open3d
|
@@ -27,12 +25,13 @@ protobuf
|
|
27 |
safetensors
|
28 |
sentencepiece
|
29 |
git+https://github.com/asomoza/image_gen_aux.git
|
30 |
-
#
|
31 |
-
#
|
32 |
tiktoken
|
33 |
-
pilmoji[requests]==2.0.4
|
34 |
-
emoji==2.2.0
|
35 |
pycairo
|
36 |
cairocffi
|
37 |
pangocffi
|
38 |
-
pangocairocffi
|
|
|
|
1 |
accelerate
|
2 |
invisible_watermark
|
3 |
# Updated versions 2.4.0+cu118
|
4 |
+
torch==2.4.0 --index-url https://download.pytorch.org/whl/cu118/torch-2.4.0%2Bcu118-cp310-cp310-linux_x86_64.whl#sha256=80f75f98282dfcca50a013ce14ee6a4385680e1c15cb0e9b376612442137ead5
|
5 |
+
torchvision --index-url https://download.pytorch.org/whl/cu118
|
6 |
+
torchaudio --index-url https://download.pytorch.org/whl/cu118
|
7 |
+
xformers==0.0.27.post2 --index-url https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl#sha256=b3cdeeb9eae4547805ab8c3c645ac2fa9c6da85b46c039d9befa117e9f6f22fe
|
8 |
|
9 |
# Other dependencies
|
10 |
Haishoku
|
|
|
12 |
huggingface_hub
|
13 |
# git+https://github.com/huggingface/[email protected]#egg=transformers
|
14 |
transformers==4.48.1
|
|
|
15 |
gradio[oauth]
|
16 |
Pillow
|
17 |
+
numpy
|
18 |
requests
|
19 |
+
# git+https://github.com/huggingface/diffusers
|
20 |
+
diffusers
|
|
|
21 |
peft
|
22 |
opencv-python
|
23 |
open3d
|
|
|
25 |
safetensors
|
26 |
sentencepiece
|
27 |
git+https://github.com/asomoza/image_gen_aux.git
|
28 |
+
#git+https://github.com/huggingface/optimum.git
|
29 |
+
#git+https://github.com/triton-lang/triton.git -not windows supported --disable in environment variable
|
30 |
tiktoken
|
31 |
+
#pilmoji[requests]==2.0.4
|
32 |
+
#emoji==2.2.0
|
33 |
pycairo
|
34 |
cairocffi
|
35 |
pangocffi
|
36 |
+
pangocairocffi
|
37 |
+
tensorflow
|
src/condition.py
CHANGED
@@ -13,6 +13,7 @@ condition_dict = {
|
|
13 |
"deblurring": 7,
|
14 |
"fill": 9,
|
15 |
}
|
|
|
16 |
class Condition(object):
|
17 |
def __init__(
|
18 |
self,
|
|
|
13 |
"deblurring": 7,
|
14 |
"fill": 9,
|
15 |
}
|
16 |
+
|
17 |
class Condition(object):
|
18 |
def __init__(
|
19 |
self,
|
utils/ai_generator.py
CHANGED
@@ -3,6 +3,7 @@
|
|
3 |
import os
|
4 |
import time
|
5 |
from turtle import width # Added for implementing delays
|
|
|
6 |
import torch
|
7 |
import random
|
8 |
from utils.ai_generator_diffusers_flux import generate_ai_image_local
|
@@ -14,6 +15,7 @@ from PIL import Image
|
|
14 |
from tempfile import NamedTemporaryFile
|
15 |
import utils.constants as constants
|
16 |
|
|
|
17 |
def generate_image_from_text(text, model_name="flax-community/dalle-mini", image_width=768, image_height=512):
|
18 |
# Initialize the InferenceClient
|
19 |
client = InferenceClient()
|
|
|
3 |
import os
|
4 |
import time
|
5 |
from turtle import width # Added for implementing delays
|
6 |
+
import spaces
|
7 |
import torch
|
8 |
import random
|
9 |
from utils.ai_generator_diffusers_flux import generate_ai_image_local
|
|
|
15 |
from tempfile import NamedTemporaryFile
|
16 |
import utils.constants as constants
|
17 |
|
18 |
+
|
19 |
def generate_image_from_text(text, model_name="flax-community/dalle-mini", image_width=768, image_height=512):
|
20 |
# Initialize the InferenceClient
|
21 |
client = InferenceClient()
|
utils/ai_generator_diffusers_flux.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
# utils/ai_generator_diffusers_flux.py
|
2 |
import os
|
|
|
3 |
import torch
|
4 |
from diffusers import FluxPipeline,FluxImg2ImgPipeline,FluxControlPipeline
|
5 |
import accelerate
|
@@ -26,6 +27,7 @@ from utils.color_utils import detect_color_format
|
|
26 |
import utils.misc as misc
|
27 |
from pathlib import Path
|
28 |
import warnings
|
|
|
29 |
warnings.filterwarnings("ignore", message=".*Torch was not compiled with flash attention.*")
|
30 |
#print(torch.__version__) # Ensure it's 2.0 or newer
|
31 |
#print(torch.cuda.is_available()) # Ensure CUDA is available
|
@@ -35,7 +37,7 @@ PIPELINE_CLASSES = {
|
|
35 |
"FluxImg2ImgPipeline": FluxImg2ImgPipeline
|
36 |
}
|
37 |
|
38 |
-
|
39 |
def generate_image_from_text(
|
40 |
text,
|
41 |
model_name="black-forest-labs/FLUX.1-dev",
|
@@ -95,6 +97,7 @@ def generate_image_from_text(
|
|
95 |
pipe.unload_lora_weights()
|
96 |
return image
|
97 |
|
|
|
98 |
def generate_image_lowmem(
|
99 |
text,
|
100 |
neg_prompt=None,
|
@@ -112,13 +115,14 @@ def generate_image_lowmem(
|
|
112 |
additional_parameters=None
|
113 |
):
|
114 |
# Retrieve the pipeline class from the mapping
|
|
|
|
|
115 |
pipeline_class = PIPELINE_CLASSES.get(pipeline_name)
|
116 |
if not pipeline_class:
|
117 |
raise ValueError(f"Unsupported pipeline type '{pipeline_name}'. "
|
118 |
f"Available options: {list(PIPELINE_CLASSES.keys())}")
|
119 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
120 |
-
|
121 |
-
print(f"\n {get_torch_info()}\n")
|
122 |
# Disable gradient calculations
|
123 |
with torch.no_grad():
|
124 |
# Initialize the pipeline inside the context manager
|
@@ -343,6 +347,7 @@ def generate_ai_image_local (
|
|
343 |
return None
|
344 |
|
345 |
# does not work
|
|
|
346 |
def merge_LoRA_weights(model="black-forest-labs/FLUX.1-dev",
|
347 |
lora_weights="Borcherding/FLUX.1-dev-LoRA-FractalLand-v0.1"):
|
348 |
|
|
|
1 |
# utils/ai_generator_diffusers_flux.py
|
2 |
import os
|
3 |
+
import spaces
|
4 |
import torch
|
5 |
from diffusers import FluxPipeline,FluxImg2ImgPipeline,FluxControlPipeline
|
6 |
import accelerate
|
|
|
27 |
import utils.misc as misc
|
28 |
from pathlib import Path
|
29 |
import warnings
|
30 |
+
|
31 |
warnings.filterwarnings("ignore", message=".*Torch was not compiled with flash attention.*")
|
32 |
#print(torch.__version__) # Ensure it's 2.0 or newer
|
33 |
#print(torch.cuda.is_available()) # Ensure CUDA is available
|
|
|
37 |
"FluxImg2ImgPipeline": FluxImg2ImgPipeline
|
38 |
}
|
39 |
|
40 |
+
@spaces.GPU(duration=140)
|
41 |
def generate_image_from_text(
|
42 |
text,
|
43 |
model_name="black-forest-labs/FLUX.1-dev",
|
|
|
97 |
pipe.unload_lora_weights()
|
98 |
return image
|
99 |
|
100 |
+
@spaces.GPU(duration=140)
|
101 |
def generate_image_lowmem(
|
102 |
text,
|
103 |
neg_prompt=None,
|
|
|
115 |
additional_parameters=None
|
116 |
):
|
117 |
# Retrieve the pipeline class from the mapping
|
118 |
+
print(f"device:{device}\nmodel_name:{model_name}\nlora_weights:{lora_weights}\n")
|
119 |
+
print(f"\n {get_torch_info()}\n")
|
120 |
pipeline_class = PIPELINE_CLASSES.get(pipeline_name)
|
121 |
if not pipeline_class:
|
122 |
raise ValueError(f"Unsupported pipeline type '{pipeline_name}'. "
|
123 |
f"Available options: {list(PIPELINE_CLASSES.keys())}")
|
124 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
125 |
+
|
|
|
126 |
# Disable gradient calculations
|
127 |
with torch.no_grad():
|
128 |
# Initialize the pipeline inside the context manager
|
|
|
347 |
return None
|
348 |
|
349 |
# does not work
|
350 |
+
#@spaces.GPU(duration=256)
|
351 |
def merge_LoRA_weights(model="black-forest-labs/FLUX.1-dev",
|
352 |
lora_weights="Borcherding/FLUX.1-dev-LoRA-FractalLand-v0.1"):
|
353 |
|
utils/constants.py
CHANGED
@@ -5,11 +5,14 @@ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:256,expandable_segmen
|
|
5 |
IS_SHARED_SPACE = "Surn/HexaGrid" in os.environ.get('SPACE_ID', '')
|
6 |
|
7 |
# Set the temporary folder location
|
8 |
-
os.environ['TEMP'] = r'e:\\TMP'
|
9 |
-
os.environ['TMPDIR'] = r'e:\\TMP'
|
10 |
-
os.environ['XDG_CACHE_HOME'] = r'E:\\cache'
|
11 |
os.environ['USE_FLASH_ATTENTION'] = '1'
|
12 |
-
os.environ['XFORMERS_FORCE_DISABLE_TRITON']= '1'
|
|
|
|
|
|
|
13 |
|
14 |
# constants.py contains all the constants used in the project
|
15 |
os.environ["HF_TOKEN"] = ""
|
|
|
5 |
IS_SHARED_SPACE = "Surn/HexaGrid" in os.environ.get('SPACE_ID', '')
|
6 |
|
7 |
# Set the temporary folder location
|
8 |
+
#os.environ['TEMP'] = r'e:\\TMP'
|
9 |
+
#os.environ['TMPDIR'] = r'e:\\TMP'
|
10 |
+
#os.environ['XDG_CACHE_HOME'] = r'E:\\cache'
|
11 |
os.environ['USE_FLASH_ATTENTION'] = '1'
|
12 |
+
#os.environ['XFORMERS_FORCE_DISABLE_TRITON']= '1'
|
13 |
+
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
|
14 |
+
os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = "1"
|
15 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
|
16 |
|
17 |
# constants.py contains all the constants used in the project
|
18 |
os.environ["HF_TOKEN"] = ""
|
utils/version_info.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2 |
|
3 |
import subprocess
|
4 |
import os
|
|
|
5 |
import torch
|
6 |
import sys
|
7 |
import gradio as gr
|
@@ -45,7 +46,7 @@ def get_diffusers_version():
|
|
45 |
return diffusers.__version__
|
46 |
except Exception:
|
47 |
return "<none>"
|
48 |
-
|
49 |
def get_torch_info():
|
50 |
try:
|
51 |
return [torch.__version__, f"CUDA Version:{torch.version.cuda}", f"Available:{torch.cuda.is_available()}", f"flash attention enabled: {torch.backends.cuda.flash_sdp_enabled()}", f"Capabilities: {torch.cuda.get_device_capability(0)}", f"Device Name: {torch.cuda.get_device_name(0)}"]
|
@@ -78,4 +79,6 @@ def versions_html():
|
|
78 |
gradio: {gr.__version__}
|
79 |
 • 
|
80 |
{toggle_dark_link}
|
|
|
|
|
81 |
"""
|
|
|
2 |
|
3 |
import subprocess
|
4 |
import os
|
5 |
+
import spaces
|
6 |
import torch
|
7 |
import sys
|
8 |
import gradio as gr
|
|
|
46 |
return diffusers.__version__
|
47 |
except Exception:
|
48 |
return "<none>"
|
49 |
+
#@spaces.GPU()
|
50 |
def get_torch_info():
|
51 |
try:
|
52 |
return [torch.__version__, f"CUDA Version:{torch.version.cuda}", f"Available:{torch.cuda.is_available()}", f"flash attention enabled: {torch.backends.cuda.flash_sdp_enabled()}", f"Capabilities: {torch.cuda.get_device_capability(0)}", f"Device Name: {torch.cuda.get_device_name(0)}"]
|
|
|
79 |
gradio: {gr.__version__}
|
80 |
 • 
|
81 |
{toggle_dark_link}
|
82 |
+
<br>
|
83 |
+
Full GPU Info:{get_torch_info()}
|
84 |
"""
|