Spaces:
Runtime error
Runtime error
Matthew Trentacoste
commited on
Commit
·
71618cc
1
Parent(s):
e8a6f69
Updating app.py to support generating multiple variations and updated text
Browse files
app.py
CHANGED
@@ -10,13 +10,18 @@ def main(
|
|
10 |
edit_prompt=None,
|
11 |
edit_prompt_weight=1.0,
|
12 |
scale=3.0,
|
|
|
13 |
steps=25,
|
14 |
seed=0,
|
15 |
):
|
16 |
|
17 |
generator = torch.Generator(device=device).manual_seed(int(seed))
|
18 |
|
19 |
-
|
|
|
|
|
|
|
|
|
20 |
images_list = pipe(
|
21 |
n_samples*[input_im],
|
22 |
base_prompt=base_prompt,
|
@@ -29,26 +34,20 @@ def main(
|
|
29 |
|
30 |
return images_list.images
|
31 |
|
32 |
-
images = []
|
33 |
-
for i, image in enumerate(images_list.images):
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
return images
|
40 |
|
41 |
|
42 |
description = \
|
43 |
"""
|
44 |
-
Generate variations on an input image using a fine-tuned version of Stable Diffision.
|
45 |
-
|
46 |
-
|
47 |
-
This version has been ported to 🤗 Diffusers library, see more details on how to use this version in the [Lambda Diffusers repo](https://github.com/LambdaLabsML/lambda-diffusers).
|
48 |
-
__For the original training code see [this repo](https://github.com/justinpinkney/stable-diffusion).
|
49 |
-
|
50 |
-

|
51 |
-
|
52 |
"""
|
53 |
|
54 |
article = \
|
@@ -60,9 +59,9 @@ the CLIP _image_ encoder instead. So instead of generating images based a text i
|
|
60 |
This creates images which have the same rough style and content, but different details, in particular the composition is generally quite different.
|
61 |
This is a totally different approach to the img2img script of the original Stable Diffusion and gives very different results.
|
62 |
|
|
|
63 |
The model was fine tuned on the [LAION aethetics v2 6+ dataset](https://laion.ai/blog/laion-aesthetics/) to accept the new conditioning.
|
64 |
Training was done on 4xA6000 GPUs on [Lambda GPU Cloud](https://lambdalabs.com/service/gpu-cloud).
|
65 |
-
More details on the method and training will come in a future blog post.
|
66 |
"""
|
67 |
|
68 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
@@ -80,18 +79,19 @@ inputs = [
|
|
80 |
gr.Image(),
|
81 |
gr.Textbox(label="Base prompt"),
|
82 |
gr.Textbox(label="Edit prompt"),
|
83 |
-
gr.Slider(0.
|
84 |
gr.Slider(0, 25, value=3, step=1, label="Guidance scale"),
|
85 |
-
gr.Slider(
|
86 |
-
gr.
|
|
|
87 |
]
|
88 |
output = gr.Gallery(label="Generated variations")
|
89 |
output.style(grid=2)
|
90 |
|
91 |
examples = [
|
92 |
-
["examples/painted ladies.png",
|
93 |
-
["examples/painted ladies.png", "a color photograph", "a black and white photograph", 1.0, 3, 25, 0],
|
94 |
-
["examples/painted ladies.png", "a color photograph", "a brightly colored oil painting", 1.0, 3, 25, 0],
|
95 |
]
|
96 |
|
97 |
demo = gr.Interface(
|
|
|
10 |
edit_prompt=None,
|
11 |
edit_prompt_weight=1.0,
|
12 |
scale=3.0,
|
13 |
+
n_samples=4,
|
14 |
steps=25,
|
15 |
seed=0,
|
16 |
):
|
17 |
|
18 |
generator = torch.Generator(device=device).manual_seed(int(seed))
|
19 |
|
20 |
+
if len(base_prompt) == 0:
|
21 |
+
base_prompt = None
|
22 |
+
if len(edit_prompt) == 0:
|
23 |
+
edit_prompt = None
|
24 |
+
|
25 |
images_list = pipe(
|
26 |
n_samples*[input_im],
|
27 |
base_prompt=base_prompt,
|
|
|
34 |
|
35 |
return images_list.images
|
36 |
|
37 |
+
# images = []
|
38 |
+
# for i, image in enumerate(images_list.images):
|
39 |
+
# if(images_list["nsfw_content_detected"][i]):
|
40 |
+
# safe_image = Image.open(r"unsafe.png")
|
41 |
+
# images.append(safe_image)
|
42 |
+
# else:
|
43 |
+
# images.append(image)
|
44 |
+
# return images
|
45 |
|
46 |
|
47 |
description = \
|
48 |
"""
|
49 |
+
Generate variations on an input image using a fine-tuned version of Stable Diffision. Edit images by applying an "edit" vector to the image embedding,
|
50 |
+
created by taking the difference between a base prompt describing an attribute of the image and an edit prompt describing the desired attribute of the edit.
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
"""
|
52 |
|
53 |
article = \
|
|
|
59 |
This creates images which have the same rough style and content, but different details, in particular the composition is generally quite different.
|
60 |
This is a totally different approach to the img2img script of the original Stable Diffusion and gives very different results.
|
61 |
|
62 |
+
Original model trained by [Justin Pinkney](https://www.justinpinkney.com) ([@Buntworthy](https://twitter.com/Buntworthy)).
|
63 |
The model was fine tuned on the [LAION aethetics v2 6+ dataset](https://laion.ai/blog/laion-aesthetics/) to accept the new conditioning.
|
64 |
Training was done on 4xA6000 GPUs on [Lambda GPU Cloud](https://lambdalabs.com/service/gpu-cloud).
|
|
|
65 |
"""
|
66 |
|
67 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
79 |
gr.Image(),
|
80 |
gr.Textbox(label="Base prompt"),
|
81 |
gr.Textbox(label="Edit prompt"),
|
82 |
+
gr.Slider(0.1, 2.0, value=1.0, step=0.1, label="Edit prompt weight"),
|
83 |
gr.Slider(0, 25, value=3, step=1, label="Guidance scale"),
|
84 |
+
gr.Slider(1, 4, value=1, step=1, label="Number images"),
|
85 |
+
gr.Slider(5, 100, value=25, step=5, label="Steps"),
|
86 |
+
gr.Number(0, label="Seed", precision=0)
|
87 |
]
|
88 |
output = gr.Gallery(label="Generated variations")
|
89 |
output.style(grid=2)
|
90 |
|
91 |
examples = [
|
92 |
+
["examples/painted ladies.png", "", "", 1.0, 3, 4, 25, 0],
|
93 |
+
["examples/painted ladies.png", "a color photograph", "a black and white photograph", 1.0, 3, 1, 25, 0],
|
94 |
+
["examples/painted ladies.png", "a color photograph", "a brightly colored oil painting", 1.0, 3, 1, 25, 0],
|
95 |
]
|
96 |
|
97 |
demo = gr.Interface(
|