Libra7578 justinpinkney commited on
Commit
a1e76d0
·
0 Parent(s):

Duplicate from lambdalabs/image-mixer-demo

Browse files

Co-authored-by: Justin Pinkney <[email protected]>

Files changed (12) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +245 -0
  4. blonder.jpeg +0 -0
  5. ex1-1.jpeg +0 -0
  6. ex1-2.jpeg +0 -0
  7. ex1-3.jpeg +0 -0
  8. ex2-1.jpeg +0 -0
  9. ex2-2.jpeg +0 -0
  10. ex2-3.jpeg +0 -0
  11. gainsborough.jpeg +0 -0
  12. requirements.txt +25 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Image Mixer Demo
3
+ emoji: 🌀
4
+ colorFrom: purple
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 3.15
8
+ app_file: app.py
9
+ pinned: false
10
+ license: openrail
11
+ duplicated_from: lambdalabs/image-mixer-demo
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ import torch
3
+ import numpy as np
4
+ from PIL import Image
5
+ from einops import rearrange
6
+ from torch import autocast
7
+ from contextlib import nullcontext
8
+ import requests
9
+ import functools
10
+
11
+ from ldm.models.diffusion.ddim import DDIMSampler
12
+ from ldm.models.diffusion.plms import PLMSSampler
13
+ from ldm.extras import load_model_from_config, load_training_dir
14
+ import clip
15
+
16
+ from PIL import Image
17
+
18
+ from huggingface_hub import hf_hub_download
19
+ ckpt = hf_hub_download(repo_id="lambdalabs/image-mixer", filename="image-mixer-pruned.ckpt")
20
+ config = hf_hub_download(repo_id="lambdalabs/image-mixer", filename="image-mixer-config.yaml")
21
+
22
+ device = "cuda:0"
23
+ model = load_model_from_config(config, ckpt, device=device, verbose=False)
24
+ model = model.to(device).half()
25
+
26
+ clip_model, preprocess = clip.load("ViT-L/14", device=device)
27
+
28
+ n_inputs = 5
29
+
30
+ torch.cuda.empty_cache()
31
+
32
+ @functools.lru_cache()
33
+ def get_url_im(t):
34
+ user_agent = {'User-agent': 'gradio-app'}
35
+ response = requests.get(t, headers=user_agent)
36
+ return Image.open(BytesIO(response.content))
37
+
38
+ @torch.no_grad()
39
+ def get_im_c(im_path, clip_model):
40
+ # im = Image.open(im_path).convert("RGB")
41
+ prompts = preprocess(im_path).to(device).unsqueeze(0)
42
+ return clip_model.encode_image(prompts).float()
43
+
44
+ @torch.no_grad()
45
+ def get_txt_c(txt, clip_model):
46
+ text = clip.tokenize([txt,]).to(device)
47
+ return clip_model.encode_text(text)
48
+
49
+ def get_txt_diff(txt1, txt2, clip_model):
50
+ return get_txt_c(txt1, clip_model) - get_txt_c(txt2, clip_model)
51
+
52
+ def to_im_list(x_samples_ddim):
53
+ x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
54
+ ims = []
55
+ for x_sample in x_samples_ddim:
56
+ x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
57
+ ims.append(Image.fromarray(x_sample.astype(np.uint8)))
58
+ return ims
59
+
60
+ @torch.no_grad()
61
+ def sample(sampler, model, c, uc, scale, start_code, h=512, w=512, precision="autocast",ddim_steps=50):
62
+ ddim_eta=0.0
63
+ precision_scope = autocast if precision=="autocast" else nullcontext
64
+ with precision_scope("cuda"):
65
+ shape = [4, h // 8, w // 8]
66
+ samples_ddim, _ = sampler.sample(S=ddim_steps,
67
+ conditioning=c,
68
+ batch_size=c.shape[0],
69
+ shape=shape,
70
+ verbose=False,
71
+ unconditional_guidance_scale=scale,
72
+ unconditional_conditioning=uc,
73
+ eta=ddim_eta,
74
+ x_T=start_code)
75
+
76
+ x_samples_ddim = model.decode_first_stage(samples_ddim)
77
+ return to_im_list(x_samples_ddim)
78
+
79
+ def run(*args):
80
+
81
+ inps = []
82
+ for i in range(0, len(args)-4, n_inputs):
83
+ inps.append(args[i:i+n_inputs])
84
+
85
+ scale, n_samples, seed, steps = args[-4:]
86
+ h = w = 640
87
+
88
+ sampler = DDIMSampler(model)
89
+ # sampler = PLMSSampler(model)
90
+
91
+ torch.manual_seed(seed)
92
+ start_code = torch.randn(n_samples, 4, h//8, w//8, device=device)
93
+ conds = []
94
+
95
+ for b, t, im, s in zip(*inps):
96
+ if b == "Image":
97
+ this_cond = s*get_im_c(im, clip_model)
98
+ elif b == "Text/URL":
99
+ if t.startswith("http"):
100
+ im = get_url_im(t)
101
+ this_cond = s*get_im_c(im, clip_model)
102
+ else:
103
+ this_cond = s*get_txt_c(t, clip_model)
104
+ else:
105
+ this_cond = torch.zeros((1, 768), device=device)
106
+ conds.append(this_cond)
107
+ conds = torch.cat(conds, dim=0).unsqueeze(0)
108
+ conds = conds.tile(n_samples, 1, 1)
109
+
110
+ ims = sample(sampler, model, conds, 0*conds, scale, start_code, ddim_steps=steps)
111
+ # return make_row(ims)
112
+
113
+ # Clear GPU memory cache so less likely to OOM
114
+ torch.cuda.empty_cache()
115
+ return ims
116
+
117
+
118
+ import gradio as gr
119
+ from functools import partial
120
+ from itertools import chain
121
+
122
+ def change_visible(txt1, im1, val):
123
+ outputs = {}
124
+ if val == "Image":
125
+ outputs[im1] = gr.update(visible=True)
126
+ outputs[txt1] = gr.update(visible=False)
127
+ elif val == "Text/URL":
128
+ outputs[im1] = gr.update(visible=False)
129
+ outputs[txt1] = gr.update(visible=True)
130
+ elif val == "Nothing":
131
+ outputs[im1] = gr.update(visible=False)
132
+ outputs[txt1] = gr.update(visible=False)
133
+ return outputs
134
+
135
+
136
+ with gr.Blocks(title="Image Mixer", css=".gr-box {border-color: #8136e2}") as demo:
137
+
138
+ gr.Markdown("")
139
+ gr.Markdown(
140
+ """
141
+ # Image Mixer
142
+
143
+ _Created by [Justin Pinkney](https://www.justinpinkney.com) at [Lambda Labs](https://lambdalabs.com/)_
144
+
145
+ To skip the queue you can try it on <a href="https://cloud.lambdalabs.com/demos/ml/image-mixer-demo" style="display:inline-block;position: relative;"><img style="margin-top: 0;margin-bottom: 0;margin-left: .25em;" src="https://img.shields.io/badge/-Lambda%20Cloud-blueviolet"></a>, or <a href="https://huggingface.co/spaces/lambdalabs/image-mixer-demo?duplicate=true" style="display:inline-block;position: relative;"><img style="margin-top: 0;margin-bottom: 0;margin-left: .25em;" src="https://bit.ly/3gLdBN6"></a>
146
+
147
+ ### __Provide one or more images to be mixed together by a fine-tuned Stable Diffusion model (see tips and advice below👇).__
148
+
149
+ ![banner-large.jpeg](https://s3.amazonaws.com/moonup/production/uploads/1674039767068-62bd5f951e22ec84279820e8.jpeg)
150
+
151
+ """)
152
+
153
+ btns = []
154
+ txts = []
155
+ ims = []
156
+ strengths = []
157
+
158
+ with gr.Row():
159
+ for i in range(n_inputs):
160
+ with gr.Box():
161
+ with gr.Column():
162
+ btn1 = gr.Radio(
163
+ choices=["Image", "Text/URL", "Nothing"],
164
+ label=f"Input {i} type",
165
+ interactive=True,
166
+ value="Nothing",
167
+ )
168
+ txt1 = gr.Textbox(label="Text or Image URL", visible=False, interactive=True)
169
+ im1 = gr.Image(label="Image", interactive=True, visible=False, type="pil")
170
+ strength = gr.Slider(label="Strength", minimum=0, maximum=5, step=0.05, value=1, interactive=True)
171
+
172
+ fn = partial(change_visible, txt1, im1)
173
+ btn1.change(fn=fn, inputs=[btn1], outputs=[txt1, im1], queue=False)
174
+
175
+ btns.append(btn1)
176
+ txts.append(txt1)
177
+ ims.append(im1)
178
+ strengths.append(strength)
179
+ with gr.Row():
180
+ cfg_scale = gr.Slider(label="CFG scale", value=3, minimum=1, maximum=10, step=0.5)
181
+ n_samples = gr.Slider(label="Num samples", value=1, minimum=1, maximum=1, step=1)
182
+ seed = gr.Slider(label="Seed", value=0, minimum=0, maximum=10000, step=1)
183
+ steps = gr.Slider(label="Steps", value=30, minimum=10, maximum=100, step=5)
184
+
185
+ with gr.Row():
186
+ submit = gr.Button("Generate")
187
+ output = gr.Gallery().style(grid=[1,2], height="640px")
188
+
189
+ inps = list(chain(btns, txts, ims, strengths))
190
+ inps.extend([cfg_scale,n_samples,seed, steps,])
191
+ submit.click(fn=run, inputs=inps, outputs=[output])
192
+
193
+ ex = gr.Examples([
194
+ [
195
+ "Image", "Image", "Text/URL", "Nothing", "Nothing",
196
+ "","","central symmetric figure detailed artwork","","",
197
+ "gainsborough.jpeg","blonder.jpeg","blonder.jpeg","blonder.jpeg","blonder.jpeg",
198
+ 1,1.35,1.4,1,1,
199
+ 3.0, 1, 0, 30,
200
+ ],
201
+ [
202
+ "Image", "Image", "Text/URL", "Image", "Nothing",
203
+ "","","flowers","","",
204
+ "ex2-1.jpeg","ex2-2.jpeg","blonder.jpeg","ex2-3.jpeg","blonder.jpeg",
205
+ 1,1,1.5,1.25,1,
206
+ 3.0, 1, 0, 30,
207
+ ],
208
+ [
209
+ "Image", "Image", "Image", "Nothing", "Nothing",
210
+ "","","","","",
211
+ "ex1-1.jpeg","ex1-2.jpeg","ex1-3.jpeg","blonder.jpeg","blonder.jpeg",
212
+ 1.1,1,1.4,1,1,
213
+ 3.0, 1, 0, 30,
214
+ ],
215
+ ],
216
+ fn=run, inputs=inps, outputs=[output], cache_examples=True)
217
+
218
+ gr.Markdown(
219
+ """
220
+
221
+ ## Tips
222
+
223
+ - You can provide between 1 and 5 inputs, these can either be an uploaded image a text prompt or a url to an image file.
224
+ - The order of the inputs shouldn't matter, any images will be centre cropped before use.
225
+ - Each input has an individual strength parameter which controls how big an influence it has on the output.
226
+ - The model was not trained using text and can not interpret complex text prompts.
227
+ - Using only text prompts doesn't work well, make sure there is at least one image or URL to an image.
228
+ - The parameters on the bottom row such as cfg scale do the same as for a normal Stable Diffusion model.
229
+ - Balancing the different inputs requires tweaking of the strengths, I suggest getting the right balance for a small number of samples and with few steps until you're
230
+ happy with the result then increase the steps for better quality.
231
+ - Outputs are 640x640 by default.
232
+ - If you want to run locally see the instruction on the [Model Card](https://huggingface.co/lambdalabs/image-mixer).
233
+
234
+ ## How does this work?
235
+
236
+ This model is based on the [Stable Diffusion Image Variations model](https://huggingface.co/lambdalabs/sd-image-variations-diffusers)
237
+ but it has been fined tuned to take multiple CLIP image embeddings. During training, up to 5 random crops were taken from the training images and
238
+ the CLIP image embeddings were computed, these were then concatenated and used as the conditioning for the model. At inference time we can combine the image
239
+ embeddings from multiple images to mix their concepts (and we can also use the text encoder to add text concepts too).
240
+
241
+ The model was trained on a subset of LAION Improved Aesthetics at a resolution of 640x640 and was trained using 8xA100 GPUs on [Lambda GPU Cloud](https://lambdalabs.com/service/gpu-cloud).
242
+
243
+ """)
244
+
245
+ demo.launch()
blonder.jpeg ADDED
ex1-1.jpeg ADDED
ex1-2.jpeg ADDED
ex1-3.jpeg ADDED
ex2-1.jpeg ADDED
ex2-2.jpeg ADDED
ex2-3.jpeg ADDED
gainsborough.jpeg ADDED
requirements.txt ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu113
2
+ torch==1.12.1
3
+ torchvision==0.13.1
4
+ albumentations==0.4.3
5
+ opencv-python==4.5.5.64
6
+ pudb==2019.2
7
+ imageio==2.9.0
8
+ imageio-ffmpeg==0.4.2
9
+ pytorch-lightning==1.4.2
10
+ omegaconf==2.1.1
11
+ test-tube>=0.7.5
12
+ streamlit>=0.73.1
13
+ einops==0.3.0
14
+ torch-fidelity==0.3.0
15
+ transformers==4.22.2
16
+ kornia==0.6
17
+ webdataset==0.2.5
18
+ torchmetrics==0.6.0
19
+ fire==0.4.0
20
+ diffusers==0.3.0
21
+ datasets[vision]==2.4.0
22
+ -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
23
+ -e git+https://github.com/openai/CLIP.git@main#egg=clip
24
+ -e git+https://github.com/justinpinkney/nomi.git@e9ded23b7e2269cc64d39683e1bf3c0319f552ab#egg=nomi
25
+ -e git+https://github.com/justinpinkney/stable-diffusion.git@8789746c05cf8ed4755ddb6aa33abdfdaa6908fa#egg=latent-diffusion