Muhammad Anas Akhtar commited on
Commit
a7e6201
·
verified ·
1 Parent(s): 62a64ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -21
app.py CHANGED
@@ -1,34 +1,18 @@
1
  import gradio as gr
2
  import torch
3
- #from diffusers import StableDiffusionPipeline
4
-
5
-
6
  from diffusers import DiffusionPipeline
7
 
8
-
9
-
10
-
11
  def image_generation(prompt):
12
  # Check if GPU is available
13
  device = "cuda" if torch.cuda.is_available() else "cpu"
14
 
15
- # Load the Stable Diffusion 3 pipeline
16
  pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1",
17
- torch_dtype=torch.float16 if device == "cuda" else torch.float32,
18
- text_encoder_3=None,
19
- tokenizer_3=None)
20
 
21
- #pipeline = StableDiffusionPipeline.from_pretrained(
22
- # "stabilityai/stable-diffusion-3-medium-diffusers",
23
- # torch_dtype=torch.float16 if device == "cuda" else torch.float32,
24
- # use_auth_token=HUGGINGFACE_TOKEN, # Use the Hugging Face token for authentication
25
- # text_encoder_3=None,
26
- # tokenizer_3=None
27
- # )
28
-
29
- # Enable efficient model execution
30
- pipeline.enable_model_cpu_offload()
31
-
32
  # Generate an image based on the prompt
33
  image = pipeline(
34
  prompt=prompt,
 
1
  import gradio as gr
2
  import torch
 
 
 
3
  from diffusers import DiffusionPipeline
4
 
5
+ # Define the image generation function
 
 
6
  def image_generation(prompt):
7
  # Check if GPU is available
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
 
10
+ # Load the Stable Diffusion 2.1 pipeline (as you're using DiffusionPipeline now)
11
  pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1",
12
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
13
+ text_encoder_3=None,
14
+ tokenizer_3=None)
15
 
 
 
 
 
 
 
 
 
 
 
 
16
  # Generate an image based on the prompt
17
  image = pipeline(
18
  prompt=prompt,