amos1088 commited on
Commit
bcbf6e0
·
1 Parent(s): 91746d6

test gradio

Browse files
Files changed (1) hide show
  1. app.py +16 -20
app.py CHANGED
@@ -1,37 +1,33 @@
1
  import gradio as gr
 
 
2
  from huggingface_hub import login
3
  import os
4
  import spaces
5
- from diffusers.schedulers import UniPCMultistepScheduler
6
- from diffusers import StableDiffusionXLAdapterPipeline, T2IAdapter, EulerAncestralDiscreteScheduler, AutoencoderKL, \
7
- StableDiffusionXLPipeline
8
  from diffusers.utils import load_image, make_image_grid
9
  import torch
10
 
11
- # Log in to Hugging Face with your token
12
  token = os.getenv("HF_TOKEN")
13
  login(token=token)
14
 
15
- model_id = 'stabilityai/sdxl-turbo'
16
- x = StableDiffusionXLPipeline.from_pretrained(model_id)
17
-
18
- euler_a = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
19
- vae=AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix",)
20
- x.load_ip_adapter(pretrained_model_name_or_path_or_dict="TencentARC/T2I-Adapter",
21
- subfolder="models",
22
- weight_name="t2iadapter_style_sd14v1.pth")
23
 
24
- pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
25
- model_id, vae=vae, adapter=x.adapter, scheduler=euler_a, variant="fp16",
26
- )
27
- pipe.enable_xformers_memory_efficient_attention()
28
- pipe.to("cuda", torch.float16)
29
 
30
- # controlnet = SD3ControlNetModel.from_pretrained("alimama-creative/SD3-Controlnet-Softedge", torch_dtype=torch.float16)
 
 
 
 
 
31
  #
32
- # pipe = StableDiffusion3ControlNetPipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet)
 
33
  # pipe.to("cuda", torch.float16)
34
 
 
 
 
 
 
35
 
36
  @spaces.GPU
37
  def generate_image(prompt, reference_image, controlnet_conditioning_scale):
@@ -39,7 +35,7 @@ def generate_image(prompt, reference_image, controlnet_conditioning_scale):
39
  # Generate the image with ControlNet conditioning
40
  generated_image = pipe(
41
  prompt=prompt,
42
- ip_adapter_image=load_image(reference_image),
43
  controlnet_conditioning_scale=controlnet_conditioning_scale,
44
  ).images[0]
45
  return generated_image
 
1
  import gradio as gr
2
+ import torch
3
+ from diffusers import StableDiffusion3ControlNetPipeline, SD3ControlNetModel, UniPCMultistepScheduler
4
  from huggingface_hub import login
5
  import os
6
  import spaces
 
 
 
7
  from diffusers.utils import load_image, make_image_grid
8
  import torch
9
 
 
10
  token = os.getenv("HF_TOKEN")
11
  login(token=token)
12
 
 
 
 
 
 
 
 
 
13
 
 
 
 
 
 
14
 
15
+ # # Load the T2I-Style Adapter and the SDXL pipeline
16
+ # adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-style-sdxl")
17
+ # pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
18
+ # "stabilityai/stable-diffusion-xl-base-1.0",
19
+ # adapter=adapter,
20
+ # )
21
  #
22
+ # # Set up the scheduler and device
23
+ # pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
24
  # pipe.to("cuda", torch.float16)
25
 
26
+ controlnet = SD3ControlNetModel.from_pretrained("alimama-creative/SD3-Controlnet-Softedge", torch_dtype=torch.float16)
27
+
28
+ pipe = StableDiffusion3ControlNetPipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet)
29
+ pipe.to("cuda", torch.float16)
30
+
31
 
32
  @spaces.GPU
33
  def generate_image(prompt, reference_image, controlnet_conditioning_scale):
 
35
  # Generate the image with ControlNet conditioning
36
  generated_image = pipe(
37
  prompt=prompt,
38
+ control_image=load_image(reference_image),
39
  controlnet_conditioning_scale=controlnet_conditioning_scale,
40
  ).images[0]
41
  return generated_image