amos1088 commited on
Commit
b1029c2
·
1 Parent(s): e6b8562

test gradio

Browse files
Files changed (1) hide show
  1. app.py +14 -24
app.py CHANGED
@@ -7,44 +7,34 @@ from diffusers import StableDiffusionXLPipeline
7
  from PIL import Image
8
  import torch
9
  from diffusers import AutoPipelineForText2Image, DDIMScheduler
10
- from transformers import CLIPVisionModelWithProjection
11
  from diffusers.utils import load_image
 
12
 
13
  token = os.getenv("HF_TOKEN")
14
  login(token=token)
15
 
16
- image_encoder = CLIPVisionModelWithProjection.from_pretrained(
17
- "h94/IP-Adapter",
18
- subfolder="models/image_encoder",
19
- torch_dtype=torch.float16,
20
- )
21
 
22
- pipeline = AutoPipelineForText2Image.from_pretrained(
23
- "stabilityai/stable-diffusion-xl-base-1.0",
24
- torch_dtype=torch.float16,
25
- image_encoder=image_encoder,
26
- )
27
- pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
28
- pipeline.load_ip_adapter(
29
- "h94/IP-Adapter",
30
- subfolder="sdxl_models",
31
- weight_name=["ip-adapter-plus_sdxl_vit-h.safetensors"]
32
- )
33
- pipeline.set_ip_adapter_scale([0.6])
34
- pipeline.enable_model_cpu_offload()
35
 
36
 
37
  @spaces.GPU
38
  def generate_image(prompt, reference_image, controlnet_conditioning_scale):
39
- reference_image = Image.open(reference_image)
40
  # reference_image.resize((512, 512))
41
- pipeline.set_ip_adapter_scale([controlnet_conditioning_scale])
 
 
 
42
 
43
  image = pipeline(
44
  prompt=prompt,
45
- ip_adapter_image=[reference_image],
46
- negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
47
- num_inference_steps=50, num_images_per_prompt=1,
 
48
  ).images[0]
49
 
50
  return image
 
7
  from PIL import Image
8
  import torch
9
  from diffusers import AutoPipelineForText2Image, DDIMScheduler
10
+ from diffusers import AutoPipelineForText2Image
11
  from diffusers.utils import load_image
12
+ import torch
13
 
14
  token = os.getenv("HF_TOKEN")
15
  login(token=token)
16
 
 
 
 
 
 
17
 
18
+ pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda")
19
+ pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
20
+
 
 
 
 
 
 
 
 
 
 
21
 
22
 
23
  @spaces.GPU
24
  def generate_image(prompt, reference_image, controlnet_conditioning_scale):
25
+ style_image = Image.open(reference_image)
26
  # reference_image.resize((512, 512))
27
+ scale = {
28
+ "up": {"block_0": [0.0, controlnet_conditioning_scale, 0.0]},
29
+ }
30
+ pipeline.set_ip_adapter_scale(scale)
31
 
32
  image = pipeline(
33
  prompt=prompt,
34
+ ip_adapter_image=style_image,
35
+ negative_prompt="",
36
+ guidance_scale=5,
37
+ num_inference_steps=30,
38
  ).images[0]
39
 
40
  return image