Spaces:
Build error
Build error
Nupur Kumari
commited on
Commit
•
1fa5d2c
1
Parent(s):
258d687
custom-diffusion-space
Browse files- app.py +35 -26
- custom-diffusion-models/cat.bin +3 -0
- inference.py +8 -4
- lora +0 -1
- requirements.txt +1 -0
- trainer.py +10 -12
app.py
CHANGED
@@ -19,7 +19,7 @@ from trainer import Trainer
|
|
19 |
from uploader import upload
|
20 |
|
21 |
TITLE = '# Custom Diffusion + StableDiffusion Training UI'
|
22 |
-
DESCRIPTION = 'This is
|
23 |
|
24 |
ORIGINAL_SPACE_ID = 'nupurkmr9/custom-diffusion'
|
25 |
SPACE_ID = os.getenv('SPACE_ID', ORIGINAL_SPACE_ID)
|
@@ -57,26 +57,27 @@ def create_training_demo(trainer: Trainer,
|
|
57 |
pipe: InferencePipeline) -> gr.Blocks:
|
58 |
with gr.Blocks() as demo:
|
59 |
base_model = gr.Dropdown(
|
60 |
-
choices=['stabilityai/stable-diffusion-2-1-base'],
|
61 |
-
value='
|
62 |
label='Base Model',
|
63 |
-
visible=
|
64 |
-
resolution = gr.Dropdown(choices=['512'],
|
65 |
value='512',
|
66 |
label='Resolution',
|
67 |
-
visible=
|
68 |
|
69 |
with gr.Row():
|
70 |
with gr.Box():
|
71 |
gr.Markdown('Training Data')
|
72 |
concept_images = gr.Files(label='Images for your concept')
|
73 |
concept_prompt = gr.Textbox(label='Concept Prompt',
|
74 |
-
max_lines=1)
|
75 |
class_prompt = gr.Textbox(label='Regularization set Prompt',
|
76 |
-
max_lines=1)
|
77 |
gr.Markdown('''
|
78 |
-
-
|
79 |
-
- For a concept
|
|
|
80 |
''')
|
81 |
with gr.Box():
|
82 |
gr.Markdown('Training Parameters')
|
@@ -84,16 +85,15 @@ def create_training_demo(trainer: Trainer,
|
|
84 |
label='Number of Training Steps', value=1000, precision=0)
|
85 |
learning_rate = gr.Number(label='Learning Rate', value=0.00001)
|
86 |
train_text_encoder = gr.Checkbox(label='Train Text Encoder',
|
87 |
-
value=
|
88 |
modifier_token = gr.Checkbox(label='modifier token',
|
89 |
value=True)
|
90 |
-
|
91 |
-
label='
|
92 |
gradient_accumulation = gr.Number(
|
93 |
label='Number of Gradient Accumulation',
|
94 |
value=1,
|
95 |
precision=0)
|
96 |
-
fp16 = gr.Checkbox(label='FP16', value=True)
|
97 |
use_8bit_adam = gr.Checkbox(label='Use 8bit Adam', value=True)
|
98 |
gr.Markdown('''
|
99 |
- It will take about 8 minutes to train for 1000 steps with a T4 GPU.
|
@@ -111,19 +111,22 @@ def create_training_demo(trainer: Trainer,
|
|
111 |
training_status = gr.Markdown()
|
112 |
output_files = gr.Files(label='Trained Weight Files')
|
113 |
|
114 |
-
run_button.click(fn=pipe.clear
|
|
|
|
|
115 |
run_button.click(fn=trainer.run,
|
116 |
inputs=[
|
117 |
base_model,
|
118 |
resolution,
|
119 |
concept_images,
|
120 |
concept_prompt,
|
|
|
121 |
num_training_steps,
|
122 |
learning_rate,
|
123 |
train_text_encoder,
|
124 |
-
|
125 |
gradient_accumulation,
|
126 |
-
|
127 |
use_8bit_adam,
|
128 |
],
|
129 |
outputs=[
|
@@ -144,8 +147,7 @@ def create_training_demo(trainer: Trainer,
|
|
144 |
|
145 |
def find_weight_files() -> list[str]:
|
146 |
curr_dir = pathlib.Path(__file__).parent
|
147 |
-
paths = sorted(curr_dir.rglob('*.
|
148 |
-
paths = [path for path in paths if not path.stem.endswith('.text_encoder')]
|
149 |
return [path.relative_to(curr_dir).as_posix() for path in paths]
|
150 |
|
151 |
|
@@ -158,18 +160,18 @@ def create_inference_demo(pipe: InferencePipeline) -> gr.Blocks:
|
|
158 |
with gr.Row():
|
159 |
with gr.Column():
|
160 |
base_model = gr.Dropdown(
|
161 |
-
choices=['stabilityai/stable-diffusion-2-1-base'],
|
162 |
-
value='
|
163 |
label='Base Model',
|
164 |
-
visible=
|
165 |
reload_button = gr.Button('Reload Weight List')
|
166 |
weight_name = gr.Dropdown(choices=find_weight_files(),
|
167 |
-
value='custom-diffusion/cat.
|
168 |
label='Custom Diffusion Weight File')
|
169 |
prompt = gr.Textbox(
|
170 |
label='Prompt',
|
171 |
max_lines=1,
|
172 |
-
placeholder='Example: "
|
173 |
seed = gr.Slider(label='Seed',
|
174 |
minimum=0,
|
175 |
maximum=100000,
|
@@ -186,16 +188,21 @@ def create_inference_demo(pipe: InferencePipeline) -> gr.Blocks:
|
|
186 |
maximum=50,
|
187 |
step=0.1,
|
188 |
value=6)
|
189 |
-
eta = gr.Slider(label='
|
190 |
minimum=0,
|
191 |
maximum=1.,
|
192 |
step=0.1,
|
193 |
value=1.)
|
|
|
|
|
|
|
|
|
|
|
194 |
|
195 |
run_button = gr.Button('Generate')
|
196 |
|
197 |
gr.Markdown('''
|
198 |
-
- Models with names starting with "custom-diffusion/" are the pretrained models provided in the [original repo](https://github.com/adobe-research/custom-diffusion), and the ones with names starting with "results/" are your trained models.
|
199 |
- After training, you can press "Reload Weight List" button to load your trained model names.
|
200 |
''')
|
201 |
with gr.Column():
|
@@ -213,6 +220,7 @@ def create_inference_demo(pipe: InferencePipeline) -> gr.Blocks:
|
|
213 |
num_steps,
|
214 |
guidance_scale,
|
215 |
eta,
|
|
|
216 |
],
|
217 |
outputs=result,
|
218 |
queue=False)
|
@@ -225,6 +233,7 @@ def create_inference_demo(pipe: InferencePipeline) -> gr.Blocks:
|
|
225 |
num_steps,
|
226 |
guidance_scale,
|
227 |
eta,
|
|
|
228 |
],
|
229 |
outputs=result,
|
230 |
queue=False)
|
|
|
19 |
from uploader import upload
|
20 |
|
21 |
TITLE = '# Custom Diffusion + StableDiffusion Training UI'
|
22 |
+
DESCRIPTION = 'This is a demo for [https://github.com/adobe-research/custom-diffusion](https://github.com/adobe-research/custom-diffusion).'
|
23 |
|
24 |
ORIGINAL_SPACE_ID = 'nupurkmr9/custom-diffusion'
|
25 |
SPACE_ID = os.getenv('SPACE_ID', ORIGINAL_SPACE_ID)
|
|
|
57 |
pipe: InferencePipeline) -> gr.Blocks:
|
58 |
with gr.Blocks() as demo:
|
59 |
base_model = gr.Dropdown(
|
60 |
+
choices=['stabilityai/stable-diffusion-2-1-base', 'CompVis/stable-diffusion-v1-4'],
|
61 |
+
value='CompVis/stable-diffusion-v1-4',
|
62 |
label='Base Model',
|
63 |
+
visible=True)
|
64 |
+
resolution = gr.Dropdown(choices=['512', '768'],
|
65 |
value='512',
|
66 |
label='Resolution',
|
67 |
+
visible=True)
|
68 |
|
69 |
with gr.Row():
|
70 |
with gr.Box():
|
71 |
gr.Markdown('Training Data')
|
72 |
concept_images = gr.Files(label='Images for your concept')
|
73 |
concept_prompt = gr.Textbox(label='Concept Prompt',
|
74 |
+
max_lines=1, placeholder='Example: "photo of a \<new1\> cat"')
|
75 |
class_prompt = gr.Textbox(label='Regularization set Prompt',
|
76 |
+
max_lines=1, placeholder='Example: "cat"')
|
77 |
gr.Markdown('''
|
78 |
+
- We use "\<new1\>" appended in front of the concept. E.g. "\<new1\> cat".
|
79 |
+
- For a new concept, use "photo of a \<new1\> cat" for concept_prompt and "cat" for class_prompt.
|
80 |
+
- For a style concept, use "painting in the style of \<new1\> art" for concept_prompt and "art" for class_prompt.
|
81 |
''')
|
82 |
with gr.Box():
|
83 |
gr.Markdown('Training Parameters')
|
|
|
85 |
label='Number of Training Steps', value=1000, precision=0)
|
86 |
learning_rate = gr.Number(label='Learning Rate', value=0.00001)
|
87 |
train_text_encoder = gr.Checkbox(label='Train Text Encoder',
|
88 |
+
value=False)
|
89 |
modifier_token = gr.Checkbox(label='modifier token',
|
90 |
value=True)
|
91 |
+
batch_size = gr.Number(
|
92 |
+
label='batch_size', value=1, precision=0)
|
93 |
gradient_accumulation = gr.Number(
|
94 |
label='Number of Gradient Accumulation',
|
95 |
value=1,
|
96 |
precision=0)
|
|
|
97 |
use_8bit_adam = gr.Checkbox(label='Use 8bit Adam', value=True)
|
98 |
gr.Markdown('''
|
99 |
- It will take about 8 minutes to train for 1000 steps with a T4 GPU.
|
|
|
111 |
training_status = gr.Markdown()
|
112 |
output_files = gr.Files(label='Trained Weight Files')
|
113 |
|
114 |
+
# run_button.click(fn=pipe.clear,
|
115 |
+
# inputs=None,
|
116 |
+
# outputs=None,)
|
117 |
run_button.click(fn=trainer.run,
|
118 |
inputs=[
|
119 |
base_model,
|
120 |
resolution,
|
121 |
concept_images,
|
122 |
concept_prompt,
|
123 |
+
class_prompt,
|
124 |
num_training_steps,
|
125 |
learning_rate,
|
126 |
train_text_encoder,
|
127 |
+
modifier_token,
|
128 |
gradient_accumulation,
|
129 |
+
batch_size,
|
130 |
use_8bit_adam,
|
131 |
],
|
132 |
outputs=[
|
|
|
147 |
|
148 |
def find_weight_files() -> list[str]:
|
149 |
curr_dir = pathlib.Path(__file__).parent
|
150 |
+
paths = sorted(curr_dir.rglob('*.bin'))
|
|
|
151 |
return [path.relative_to(curr_dir).as_posix() for path in paths]
|
152 |
|
153 |
|
|
|
160 |
with gr.Row():
|
161 |
with gr.Column():
|
162 |
base_model = gr.Dropdown(
|
163 |
+
choices=['stabilityai/stable-diffusion-2-1-base', 'CompVis/stable-diffusion-v1-4'],
|
164 |
+
value='CompVis/stable-diffusion-v1-4',
|
165 |
label='Base Model',
|
166 |
+
visible=True)
|
167 |
reload_button = gr.Button('Reload Weight List')
|
168 |
weight_name = gr.Dropdown(choices=find_weight_files(),
|
169 |
+
value='custom-diffusion-models/cat.bin',
|
170 |
label='Custom Diffusion Weight File')
|
171 |
prompt = gr.Textbox(
|
172 |
label='Prompt',
|
173 |
max_lines=1,
|
174 |
+
placeholder='Example: "\<new1\> cat in outer space"')
|
175 |
seed = gr.Slider(label='Seed',
|
176 |
minimum=0,
|
177 |
maximum=100000,
|
|
|
188 |
maximum=50,
|
189 |
step=0.1,
|
190 |
value=6)
|
191 |
+
eta = gr.Slider(label='DDIM eta',
|
192 |
minimum=0,
|
193 |
maximum=1.,
|
194 |
step=0.1,
|
195 |
value=1.)
|
196 |
+
batch_size = gr.Slider(label='Batch Size',
|
197 |
+
minimum=0,
|
198 |
+
maximum=10.,
|
199 |
+
step=1,
|
200 |
+
value=2)
|
201 |
|
202 |
run_button = gr.Button('Generate')
|
203 |
|
204 |
gr.Markdown('''
|
205 |
+
- Models with names starting with "custom-diffusion-models/" are the pretrained models provided in the [original repo](https://github.com/adobe-research/custom-diffusion), and the ones with names starting with "results/" are your trained models.
|
206 |
- After training, you can press "Reload Weight List" button to load your trained model names.
|
207 |
''')
|
208 |
with gr.Column():
|
|
|
220 |
num_steps,
|
221 |
guidance_scale,
|
222 |
eta,
|
223 |
+
batch_size,
|
224 |
],
|
225 |
outputs=result,
|
226 |
queue=False)
|
|
|
233 |
num_steps,
|
234 |
guidance_scale,
|
235 |
eta,
|
236 |
+
batch_size,
|
237 |
],
|
238 |
outputs=result,
|
239 |
queue=False)
|
custom-diffusion-models/cat.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:08754e711b9ecaa36785dc64ad0c08317a93d106629c5f42cc5b9a406fe4aefc
|
3 |
+
size 76690626
|
inference.py
CHANGED
@@ -6,11 +6,12 @@ import sys
|
|
6 |
|
7 |
import gradio as gr
|
8 |
import PIL.Image
|
|
|
|
|
9 |
import torch
|
10 |
from diffusers import StableDiffusionPipeline
|
11 |
-
|
12 |
sys.path.insert(0, 'custom-diffusion')
|
13 |
-
from src import
|
14 |
|
15 |
|
16 |
class InferencePipeline:
|
@@ -59,6 +60,7 @@ class InferencePipeline:
|
|
59 |
n_steps: int,
|
60 |
guidance_scale: float,
|
61 |
eta: float,
|
|
|
62 |
) -> PIL.Image.Image:
|
63 |
if not torch.cuda.is_available():
|
64 |
raise gr.Error('CUDA is not available.')
|
@@ -66,9 +68,11 @@ class InferencePipeline:
|
|
66 |
self.load_pipe(base_model, weight_name)
|
67 |
|
68 |
generator = torch.Generator(device=self.device).manual_seed(seed)
|
69 |
-
out = self.pipe(prompt,
|
70 |
num_inference_steps=n_steps,
|
71 |
guidance_scale=guidance_scale,
|
72 |
eta = eta,
|
73 |
generator=generator) # type: ignore
|
74 |
-
|
|
|
|
|
|
6 |
|
7 |
import gradio as gr
|
8 |
import PIL.Image
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
import torch
|
12 |
from diffusers import StableDiffusionPipeline
|
|
|
13 |
sys.path.insert(0, 'custom-diffusion')
|
14 |
+
from src import diffuser_training
|
15 |
|
16 |
|
17 |
class InferencePipeline:
|
|
|
60 |
n_steps: int,
|
61 |
guidance_scale: float,
|
62 |
eta: float,
|
63 |
+
batch_size: int,
|
64 |
) -> PIL.Image.Image:
|
65 |
if not torch.cuda.is_available():
|
66 |
raise gr.Error('CUDA is not available.')
|
|
|
68 |
self.load_pipe(base_model, weight_name)
|
69 |
|
70 |
generator = torch.Generator(device=self.device).manual_seed(seed)
|
71 |
+
out = self.pipe([prompt]*batch_size,
|
72 |
num_inference_steps=n_steps,
|
73 |
guidance_scale=guidance_scale,
|
74 |
eta = eta,
|
75 |
generator=generator) # type: ignore
|
76 |
+
out = out.images
|
77 |
+
out = PIL.Image.fromarray(np.hstack([np.array(x) for x in out]))
|
78 |
+
return out
|
lora
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
Subproject commit 26787a09bff4ebcb08f0ad4e848b67bce4389a7a
|
|
|
|
requirements.txt
CHANGED
@@ -8,3 +8,4 @@ torchvision==0.14.0
|
|
8 |
transformers==4.25.1
|
9 |
triton==2.0.0.dev20220701
|
10 |
xformers==0.0.13
|
|
|
|
8 |
transformers==4.25.1
|
9 |
triton==2.0.0.dev20220701
|
10 |
xformers==0.0.13
|
11 |
+
clip_retrieval
|
trainer.py
CHANGED
@@ -65,10 +65,9 @@ class Trainer:
|
|
65 |
n_steps: int,
|
66 |
learning_rate: float,
|
67 |
train_text_encoder: bool,
|
68 |
-
|
69 |
gradient_accumulation: int,
|
70 |
-
|
71 |
-
fp16: bool,
|
72 |
use_8bit_adam: bool,
|
73 |
) -> tuple[dict, list[pathlib.Path]]:
|
74 |
if not torch.cuda.is_available():
|
@@ -91,28 +90,27 @@ class Trainer:
|
|
91 |
accelerate launch custom-diffusion/src/diffuser_training.py \
|
92 |
--pretrained_model_name_or_path={base_model} \
|
93 |
--instance_data_dir={self.instance_data_dir} \
|
94 |
-
--class_data_dir={self.class_data_dir} \
|
95 |
--output_dir={self.output_dir} \
|
96 |
-
--with_prior_preservation --prior_loss_weight=1.0 \
|
97 |
--instance_prompt="{concept_prompt}" \
|
|
|
|
|
98 |
--class_prompt="{class_prompt}" \
|
99 |
--resolution={resolution} \
|
100 |
-
--train_batch_size={
|
101 |
--gradient_accumulation_steps={gradient_accumulation} \
|
102 |
--learning_rate={learning_rate} \
|
103 |
--lr_scheduler="constant" \
|
104 |
--lr_warmup_steps=0 \
|
105 |
--max_train_steps={n_steps} \
|
106 |
--num_class_images=200 \
|
107 |
-
--scale_lr
|
108 |
-
--modifier_token "<new1>"
|
109 |
'''
|
110 |
-
if
|
111 |
-
command += ' --
|
112 |
if use_8bit_adam:
|
113 |
command += ' --use_8bit_adam'
|
114 |
if train_text_encoder:
|
115 |
-
command += f' --train_text_encoder
|
116 |
|
117 |
with open(self.output_dir / 'train.sh', 'w') as f:
|
118 |
command_s = ' '.join(command.split())
|
@@ -126,5 +124,5 @@ class Trainer:
|
|
126 |
result_message = 'Training Completed!'
|
127 |
else:
|
128 |
result_message = 'Training Failed!'
|
129 |
-
weight_paths = sorted(self.output_dir.glob('*.
|
130 |
return gr.update(value=result_message), weight_paths
|
|
|
65 |
n_steps: int,
|
66 |
learning_rate: float,
|
67 |
train_text_encoder: bool,
|
68 |
+
modifier_token: bool,
|
69 |
gradient_accumulation: int,
|
70 |
+
batch_size: int,
|
|
|
71 |
use_8bit_adam: bool,
|
72 |
) -> tuple[dict, list[pathlib.Path]]:
|
73 |
if not torch.cuda.is_available():
|
|
|
90 |
accelerate launch custom-diffusion/src/diffuser_training.py \
|
91 |
--pretrained_model_name_or_path={base_model} \
|
92 |
--instance_data_dir={self.instance_data_dir} \
|
|
|
93 |
--output_dir={self.output_dir} \
|
|
|
94 |
--instance_prompt="{concept_prompt}" \
|
95 |
+
--class_data_dir={self.class_data_dir} \
|
96 |
+
--with_prior_preservation --real_prior --prior_loss_weight=1.0 \
|
97 |
--class_prompt="{class_prompt}" \
|
98 |
--resolution={resolution} \
|
99 |
+
--train_batch_size={batch_size} \
|
100 |
--gradient_accumulation_steps={gradient_accumulation} \
|
101 |
--learning_rate={learning_rate} \
|
102 |
--lr_scheduler="constant" \
|
103 |
--lr_warmup_steps=0 \
|
104 |
--max_train_steps={n_steps} \
|
105 |
--num_class_images=200 \
|
106 |
+
--scale_lr
|
|
|
107 |
'''
|
108 |
+
if modifier_token:
|
109 |
+
command += ' --modifier_token "<new1>"'
|
110 |
if use_8bit_adam:
|
111 |
command += ' --use_8bit_adam'
|
112 |
if train_text_encoder:
|
113 |
+
command += f' --train_text_encoder'
|
114 |
|
115 |
with open(self.output_dir / 'train.sh', 'w') as f:
|
116 |
command_s = ' '.join(command.split())
|
|
|
124 |
result_message = 'Training Completed!'
|
125 |
else:
|
126 |
result_message = 'Training Failed!'
|
127 |
+
weight_paths = sorted(self.output_dir.glob('*.bin'))
|
128 |
return gr.update(value=result_message), weight_paths
|