Spaces:
Runtime error
Runtime error
lixiang46
commited on
Commit
·
56400db
1
Parent(s):
20df108
update
Browse files
app.py
CHANGED
@@ -17,24 +17,24 @@ ckpt_IPA_dir = '/home/lixiang46/Kolors/weights/Kolors-IP-Adapter-Plus'
|
|
17 |
# ckpt_dir = snapshot_download(repo_id="Kwai-Kolors/Kolors")
|
18 |
# ckpt_IPA_dir = snapshot_download(repo_id="Kwai-Kolors/Kolors-IP-Adapter-Plus")
|
19 |
|
20 |
-
text_encoder = ChatGLMModel.from_pretrained(f'{ckpt_dir}/text_encoder', torch_dtype=torch.float16).half()
|
21 |
-
tokenizer = ChatGLMTokenizer.from_pretrained(f'{ckpt_dir}/text_encoder')
|
22 |
-
vae = AutoencoderKL.from_pretrained(f"{ckpt_dir}/vae", revision=None).half()
|
23 |
-
scheduler = EulerDiscreteScheduler.from_pretrained(f"{ckpt_dir}/scheduler")
|
24 |
-
|
25 |
-
unet_i2i = unet_2d_condition.UNet2DConditionModel.from_pretrained(f"{ckpt_dir}/unet", revision=None).half()
|
26 |
image_encoder = CLIPVisionModelWithProjection.from_pretrained(f'{ckpt_IPA_dir}/image_encoder',ignore_mismatched_sizes=True).to(dtype=torch.float16, device=device)
|
27 |
ip_img_size = 336
|
28 |
clip_image_processor = CLIPImageProcessor(size=ip_img_size, crop_size=ip_img_size)
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
|
39 |
pipe_i2i = pipeline_stable_diffusion_xl_chatglm_256_ipadapter.StableDiffusionXLPipeline(
|
40 |
vae=vae,
|
@@ -61,7 +61,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
61 |
generator = torch.Generator().manual_seed(seed)
|
62 |
|
63 |
if ip_adapter_image is None:
|
64 |
-
image =
|
65 |
prompt = prompt,
|
66 |
negative_prompt = negative_prompt,
|
67 |
guidance_scale = guidance_scale,
|
|
|
17 |
# ckpt_dir = snapshot_download(repo_id="Kwai-Kolors/Kolors")
|
18 |
# ckpt_IPA_dir = snapshot_download(repo_id="Kwai-Kolors/Kolors-IP-Adapter-Plus")
|
19 |
|
20 |
+
text_encoder = ChatGLMModel.from_pretrained(f'{ckpt_dir}/text_encoder', torch_dtype=torch.float16).half().to(device)
|
21 |
+
tokenizer = ChatGLMTokenizer.from_pretrained(f'{ckpt_dir}/text_encoder').to(device)
|
22 |
+
vae = AutoencoderKL.from_pretrained(f"{ckpt_dir}/vae", revision=None).half().to(device)
|
23 |
+
scheduler = EulerDiscreteScheduler.from_pretrained(f"{ckpt_dir}/scheduler").to(device)
|
24 |
+
unet_t2i = UNet2DConditionModel.from_pretrained(f"{ckpt_dir}/unet", revision=None).half().to(device)
|
25 |
+
unet_i2i = unet_2d_condition.UNet2DConditionModel.from_pretrained(f"{ckpt_dir}/unet", revision=None).half().to(device)
|
26 |
image_encoder = CLIPVisionModelWithProjection.from_pretrained(f'{ckpt_IPA_dir}/image_encoder',ignore_mismatched_sizes=True).to(dtype=torch.float16, device=device)
|
27 |
ip_img_size = 336
|
28 |
clip_image_processor = CLIPImageProcessor(size=ip_img_size, crop_size=ip_img_size)
|
29 |
|
30 |
+
pipe_t2i = pipeline_stable_diffusion_xl_chatglm_256.StableDiffusionXLPipeline(
|
31 |
+
vae=vae,
|
32 |
+
text_encoder=text_encoder,
|
33 |
+
tokenizer=tokenizer,
|
34 |
+
unet=unet_t2i,
|
35 |
+
scheduler=scheduler,
|
36 |
+
force_zeros_for_empty_prompt=False
|
37 |
+
).to(device)
|
38 |
|
39 |
pipe_i2i = pipeline_stable_diffusion_xl_chatglm_256_ipadapter.StableDiffusionXLPipeline(
|
40 |
vae=vae,
|
|
|
61 |
generator = torch.Generator().manual_seed(seed)
|
62 |
|
63 |
if ip_adapter_image is None:
|
64 |
+
image = pipe_t2i(
|
65 |
prompt = prompt,
|
66 |
negative_prompt = negative_prompt,
|
67 |
guidance_scale = guidance_scale,
|