man08man commited on
Commit
19f9a8b
Β·
verified Β·
1 Parent(s): c861875

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -21
app.py CHANGED
@@ -1,33 +1,24 @@
1
- # Add these imports FIRST
2
- import sys
3
- import warnings
4
- warnings.filterwarnings("ignore", message=".*cached_download.*")
5
-
6
- # Workaround for huggingface_hub compatibility
7
- from huggingface_hub import cached_download as hf_hub_download
8
- sys.modules['huggingface_hub'].cached_download = hf_hub_download
9
-
10
- # Now import other libraries
11
- from diffusers import StableDiffusionPipeline
12
- import torch
13
- import random
14
  from diffusers import StableDiffusionPipeline
15
  import torch
16
  import random
17
 
18
  def generate_image(prompt):
19
- # Use updated model loading syntax
20
  pipe = StableDiffusionPipeline.from_pretrained(
21
  "stabilityai/stable-diffusion-2-1",
22
- torch_dtype=torch.float16, # Add this for better performance
23
- use_auth_token="YOUR_HF_TOKEN", # Replace with your token
24
- safety_checker=None # Optional: disable safety filter
25
  )
26
 
27
- # Generate random seed
28
  seed = random.randint(0, 1000000)
29
  generator = torch.Generator().manual_seed(seed)
30
 
31
- # Generate image
32
- image = pipe(prompt, generator=generator).images[0]
33
- return image
 
 
 
 
 
 
1
+ # Replace ALL code with this updated version
 
 
 
 
 
 
 
 
 
 
 
 
2
  from diffusers import StableDiffusionPipeline
3
  import torch
4
  import random
5
 
6
  def generate_image(prompt):
 
7
  pipe = StableDiffusionPipeline.from_pretrained(
8
  "stabilityai/stable-diffusion-2-1",
9
+ torch_dtype=torch.float16,
10
+ use_safetensors=True, # Add this
11
+ variant="fp16" # Add this
12
  )
13
 
 
14
  seed = random.randint(0, 1000000)
15
  generator = torch.Generator().manual_seed(seed)
16
 
17
+ image = pipe(
18
+ prompt,
19
+ generator=generator,
20
+ num_inference_steps=25
21
+ ).images[0]
22
+ return image
23
+
24
+ # Rest of your Gradio code remains the same