amos1088 commited on
Commit
de93c44
·
1 Parent(s): 6fc2fae

test gradio

Browse files
Files changed (1) hide show
  1. app.py +20 -8
app.py CHANGED
@@ -17,21 +17,34 @@ login(token=token)
17
 
18
  # Model IDs for the base Stable Diffusion model and ControlNet variant
19
  model_id = "stabilityai/stable-diffusion-3.5-large-turbo"
20
- controlnet_id = "lllyasviel/control_v11p_sd15_inpaint" # Make sure this ControlNet is compatible
21
 
22
- # Load ControlNet model and other components
23
  controlnet = ControlNetModel.from_pretrained(controlnet_id, torch_dtype=torch.float16)
24
- pipeline = StableDiffusionControlNetPipeline.from_pretrained(
25
- model_id,
 
 
 
 
 
 
 
 
 
 
26
  controlnet=controlnet,
27
- torch_dtype=torch.float16
 
 
28
  )
 
 
29
  pipeline = pipeline.to("cuda") if torch.cuda.is_available() else pipeline
30
 
31
- # Enable CPU offloading for memory optimization
32
  pipeline.enable_model_cpu_offload()
33
 
34
-
35
  # Gradio interface function
36
  def generate_image(prompt, reference_image):
37
  # Resize and prepare reference image
@@ -47,7 +60,6 @@ def generate_image(prompt, reference_image):
47
  ).images[0]
48
  return generated_image
49
 
50
-
51
  # Set up Gradio interface
52
  interface = gr.Interface(
53
  fn=generate_image,
 
17
 
18
  # Model IDs for the base Stable Diffusion model and ControlNet variant
19
  model_id = "stabilityai/stable-diffusion-3.5-large-turbo"
20
+ controlnet_id = "lllyasviel/control_v11p_sd15_inpaint"
21
 
22
+ # Load each model component required by the pipeline
23
  controlnet = ControlNetModel.from_pretrained(controlnet_id, torch_dtype=torch.float16)
24
+ unet = UNet2DConditionModel.from_pretrained(model_id, subfolder="unet", torch_dtype=torch.float16)
25
+ vae = AutoencoderKL.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float16)
26
+ feature_extractor = CLIPFeatureExtractor.from_pretrained(model_id)
27
+ text_encoder = CLIPTextModel.from_pretrained(model_id, subfolder="text_encoder")
28
+ tokenizer = CLIPTokenizer.from_pretrained(model_id)
29
+
30
+ # Initialize the pipeline with all components
31
+ pipeline = StableDiffusionControlNetPipeline(
32
+ vae=vae,
33
+ text_encoder=text_encoder,
34
+ tokenizer=tokenizer,
35
+ unet=unet,
36
  controlnet=controlnet,
37
+ scheduler=UniPCMultistepScheduler.from_config({"name": "UniPCMultistepScheduler"}),
38
+ feature_extractor=feature_extractor,
39
+ torch_dtype=torch.float16,
40
  )
41
+
42
+ # Set device for pipeline
43
  pipeline = pipeline.to("cuda") if torch.cuda.is_available() else pipeline
44
 
45
+ # Enable model CPU offloading for memory optimization
46
  pipeline.enable_model_cpu_offload()
47
 
 
48
  # Gradio interface function
49
  def generate_image(prompt, reference_image):
50
  # Resize and prepare reference image
 
60
  ).images[0]
61
  return generated_image
62
 
 
63
  # Set up Gradio interface
64
  interface = gr.Interface(
65
  fn=generate_image,