Surn commited on
Commit
8534154
·
1 Parent(s): 527fcc4

Get GPU Count, update Cuda version, remove xformers cuda

Browse files
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
  python_version: 3.10.13
8
- sdk_version: 5.12.0
9
  app_file: app.py
10
  pinned: false
11
  short_description: Transform Your Images into Mesmerizing Hexagon Grids
 
5
  colorTo: purple
6
  sdk: gradio
7
  python_version: 3.10.13
8
+ sdk_version: 5.15.0
9
  app_file: app.py
10
  pinned: false
11
  short_description: Transform Your Images into Mesmerizing Hexagon Grids
requirements.txt CHANGED
@@ -1,10 +1,15 @@
1
  accelerate
2
  invisible_watermark
3
- # Updated versions 2.4.0+cu118
4
- torch==2.4.0 --index-url https://download.pytorch.org/whl/cu118/torch-2.4.0%2Bcu118-cp310-cp310-linux_x86_64.whl#sha256=80f75f98282dfcca50a013ce14ee6a4385680e1c15cb0e9b376612442137ead5
5
- torchvision --index-url https://download.pytorch.org/whl/cu118
6
- torchaudio --index-url https://download.pytorch.org/whl/cu118
7
- xformers==0.0.27.post2 --index-url https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl#sha256=b3cdeeb9eae4547805ab8c3c645ac2fa9c6da85b46c039d9befa117e9f6f22fe
 
 
 
 
 
8
 
9
  # Other dependencies
10
  Haishoku
@@ -13,11 +18,11 @@ huggingface_hub
13
  # git+https://github.com/huggingface/[email protected]#egg=transformers
14
  transformers==4.48.1
15
  gradio[oauth]
16
- Pillow>=11.1.0
17
  numpy
18
  requests
19
- # git+https://github.com/huggingface/diffusers
20
- diffusers[torch]
21
  peft
22
  opencv-python
23
  open3d
@@ -34,6 +39,6 @@ pycairo
34
  cairocffi
35
  pangocffi
36
  pangocairocffi
37
- tensorflow
38
  cairosvg
39
  python-dotenv
 
1
  accelerate
2
  invisible_watermark
3
+ # Updated versions 2.4.0+cu124
4
+ #torch==2.4.0 --index-url https://download.pytorch.org/whl/cu124/torch-2.4.0%2Bcu124-cp310-cp310-linux_x86_64.whl#sha256=2cb28155635e3d3d0be198e3f3e7457a1d7b99e8c2eedc73fe22fab574d11a4c
5
+ #torchvision --index-url https://download.pytorch.org/whl/cu124
6
+ #torchaudio --index-url https://download.pytorch.org/whl/cu124
7
+ #xformers --index-url https://download.pytorch.org/whl/cu124
8
+ # ==0.0.27.post2 --index-url https://download.pytorch.org/whl/cu118/xformers-0.0.27.post2%2Bcu118-cp310-cp310-manylinux2014_x86_64.whl#sha256=b3cdeeb9eae4547805ab8c3c645ac2fa9c6da85b46c039d9befa117e9f6f22fe
9
+
10
+ #generic Torch versions
11
+ torch
12
+ xformers
13
 
14
  # Other dependencies
15
  Haishoku
 
18
  # git+https://github.com/huggingface/[email protected]#egg=transformers
19
  transformers==4.48.1
20
  gradio[oauth]
21
+ Pillow>=11.0.0
22
  numpy
23
  requests
24
+ git+https://github.com/huggingface/diffusers
25
+ #diffusers[torch]
26
  peft
27
  opencv-python
28
  open3d
 
39
  cairocffi
40
  pangocffi
41
  pangocairocffi
42
+ #tensorflow
43
  cairosvg
44
  python-dotenv
utils/ai_generator_diffusers_flux.py CHANGED
@@ -3,7 +3,7 @@ import os
3
  import utils.constants as constants
4
  import spaces
5
  import torch
6
- from diffusers import FluxPipeline,FluxImg2ImgPipeline,FluxControlPipeline
7
  import accelerate
8
  import transformers
9
  import safetensors
@@ -128,7 +128,7 @@ def generate_image_lowmem(
128
  # Initialize the pipeline inside the context manager
129
  pipe = pipeline_class.from_pretrained(
130
  model_name,
131
- torch_dtype=torch.bfloat16 if device == "cuda" else torch.bfloat32
132
  ).to(device)
133
  # Optionally, don't use CPU offload if not necessary
134
  pipe.enable_model_cpu_offload()
 
3
  import utils.constants as constants
4
  import spaces
5
  import torch
6
+ from diffusers import FluxPipeline,FluxImg2ImgPipeline,FluxControlPipeline, DiffusionPipeline
7
  import accelerate
8
  import transformers
9
  import safetensors
 
128
  # Initialize the pipeline inside the context manager
129
  pipe = pipeline_class.from_pretrained(
130
  model_name,
131
+ torch_dtype=torch.bfloat16 if device == "cuda" else torch.bfloat32
132
  ).to(device)
133
  # Optionally, don't use CPU offload if not necessary
134
  pipe.enable_model_cpu_offload()
utils/constants.py CHANGED
@@ -14,7 +14,7 @@ os.environ['USE_FLASH_ATTENTION'] = '1'
14
  #os.environ['XFORMERS_FORCE_DISABLE_TORCHSCRIPT']= '1'
15
  os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
16
  os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = "1"
17
- os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
18
 
19
  IS_SHARED_SPACE = "Surn/HexaGrid" in os.environ.get('SPACE_ID', '')
20
 
 
14
  #os.environ['XFORMERS_FORCE_DISABLE_TORCHSCRIPT']= '1'
15
  os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
16
  os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = "1"
17
+ os.environ["CUDA_VISIBLE_DEVICES"] = "0"
18
 
19
  IS_SHARED_SPACE = "Surn/HexaGrid" in os.environ.get('SPACE_ID', '')
20
 
utils/version_info.py CHANGED
@@ -49,7 +49,7 @@ def get_diffusers_version():
49
  #@spaces.GPU()
50
  def get_torch_info():
51
  try:
52
- return [torch.__version__, f"CUDA Version:{torch.version.cuda}", f"Available:{torch.cuda.is_available()}", f"flash attention enabled: {torch.backends.cuda.flash_sdp_enabled()}", f"Capabilities: {torch.cuda.get_device_capability(0)}", f"Device Name: {torch.cuda.get_device_name(0)}"]
53
  except Exception:
54
  return "<none>"
55
 
 
49
  #@spaces.GPU()
50
  def get_torch_info():
51
  try:
52
+ return [torch.__version__, f"CUDA Version:{torch.version.cuda}", f"Available:{torch.cuda.is_available()}", f"flash attention enabled: {torch.backends.cuda.flash_sdp_enabled()}", f"Capabilities: {torch.cuda.get_device_capability(0)}", f"Device Name: {torch.cuda.get_device_name(0)}", f"Device Count: {torch.cuda.device_count()}"]
53
  except Exception:
54
  return "<none>"
55