1inkusFace commited on
Commit
9a7a252
·
verified ·
1 Parent(s): ddd7baa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -1,4 +1,6 @@
1
- import spaces
 
 
2
  import os
3
 
4
  os.putenv('PYTORCH_NVML_BASED_CUDA_CHECK','1')
@@ -11,6 +13,8 @@ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = ','.join(alloc_conf_parts)
11
  os.environ["SAFETENSORS_FAST_GPU"] = "1"
12
  os.putenv('HF_HUB_ENABLE_HF_TRANSFER','1')
13
 
 
 
14
  import gradio as gr
15
  import numpy as np
16
  import random
@@ -93,7 +97,7 @@ pipe = StableDiffusion3Pipeline.from_pretrained(
93
  transformer=None,
94
  tokenizer_3=T5TokenizerFast.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", add_prefix_space=False, use_fast=True, subfolder="tokenizer_3", token=True),
95
  #torch_dtype=torch.bfloat16,
96
- #use_safetensors=False,
97
  )
98
  text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(torch.device("cuda:0"), dtype=torch.bfloat16)
99
  text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(torch.device("cuda:0"), dtype=torch.bfloat16)
 
1
+ import subprocess
2
+ subprocess.run(['sh', './spaces.sh'])
3
+
4
  import os
5
 
6
  os.putenv('PYTORCH_NVML_BASED_CUDA_CHECK','1')
 
13
  os.environ["SAFETENSORS_FAST_GPU"] = "1"
14
  os.putenv('HF_HUB_ENABLE_HF_TRANSFER','1')
15
 
16
+ import spaces
17
+
18
  import gradio as gr
19
  import numpy as np
20
  import random
 
97
  transformer=None,
98
  tokenizer_3=T5TokenizerFast.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", add_prefix_space=False, use_fast=True, subfolder="tokenizer_3", token=True),
99
  #torch_dtype=torch.bfloat16,
100
+ use_safetensors=True,
101
  )
102
  text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(torch.device("cuda:0"), dtype=torch.bfloat16)
103
  text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(torch.device("cuda:0"), dtype=torch.bfloat16)