Spaces:
Runtime error
Runtime error
Commit
•
0e68d2d
1
Parent(s):
adcfd5e
Fixes after Slack thread
Browse files- README.md +2 -2
- app.py +53 -35
- convertosd.py +4 -1
- train_dreambooth.py +4 -1
README.md
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
---
|
2 |
title: Dreambooth Training
|
3 |
-
emoji:
|
4 |
colorFrom: pink
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 3.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
|
|
1 |
---
|
2 |
title: Dreambooth Training
|
3 |
+
emoji: ☁️
|
4 |
colorFrom: pink
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.11
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
app.py
CHANGED
@@ -11,17 +11,20 @@ import requests
|
|
11 |
import torch
|
12 |
import zipfile
|
13 |
import urllib.parse
|
|
|
14 |
from diffusers import StableDiffusionPipeline
|
|
|
15 |
|
16 |
css = '''
|
17 |
.instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
|
18 |
.arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
|
19 |
#component-4, #component-3, #component-10{min-height: 0}
|
20 |
'''
|
21 |
-
model_to_load = "multimodalart/sd-fine-tunable"
|
22 |
maximum_concepts = 3
|
|
|
23 |
#Pre download the files even if we don't use it here
|
24 |
-
|
|
|
25 |
|
26 |
def zipdir(path, ziph):
|
27 |
# ziph is zipfile handle
|
@@ -67,8 +70,14 @@ def count_files(*inputs):
|
|
67 |
return(gr.update(visible=True, value=f"You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps. This should take around {round(Training_Steps/1.5, 2)} seconds, or {round((Training_Steps/1.5)/3600, 2)} hours. As a reminder, the T4 GPU costs US$0.60 for 1h. Once training is over, don't forget to swap the hardware back to CPU."))
|
68 |
|
69 |
def train(*inputs):
|
|
|
|
|
|
|
|
|
|
|
70 |
if "IS_SHARED_UI" in os.environ:
|
71 |
raise gr.Error("This Space only works in duplicated instances")
|
|
|
72 |
if os.path.exists("output_model"): shutil.rmtree('output_model')
|
73 |
if os.path.exists("instance_images"): shutil.rmtree('instance_images')
|
74 |
if os.path.exists("diffusers_model.zip"): os.remove("diffusers_model.zip")
|
@@ -135,19 +144,24 @@ def train(*inputs):
|
|
135 |
lr_warmup_steps = 0,
|
136 |
max_train_steps=Training_Steps,
|
137 |
)
|
|
|
138 |
run_training(args_general)
|
|
|
139 |
torch.cuda.empty_cache()
|
140 |
-
|
141 |
-
|
142 |
-
|
|
|
|
|
143 |
with zipfile.ZipFile('diffusers_model.zip', 'w', zipfile.ZIP_DEFLATED) as zipf:
|
144 |
zipdir('output_model/', zipf)
|
145 |
-
|
146 |
-
return [gr.update(visible=True, value=["diffusers_model.zip"]), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)]
|
147 |
|
148 |
def generate(prompt):
|
|
|
149 |
from diffusers import StableDiffusionPipeline
|
150 |
-
|
151 |
pipe = StableDiffusionPipeline.from_pretrained("./output_model", torch_dtype=torch.float16)
|
152 |
pipe = pipe.to("cuda")
|
153 |
image = pipe(prompt).images[0]
|
@@ -180,7 +194,7 @@ def push(model_name, where_to_upload, hf_token):
|
|
180 |
else:
|
181 |
title_instance_prompt_string = ''
|
182 |
previous_instance_prompt = instance_prompt
|
183 |
-
image_string = f'''{title_instance_prompt_string}
|
184 |
{image_string}![{instance_prompt} {i}](https://huggingface.co/{model_id}/resolve/main/concept_images/{urllib.parse.quote(image)})'''
|
185 |
readme_text = f'''---
|
186 |
license: creativeml-openrail-m
|
@@ -189,13 +203,13 @@ tags:
|
|
189 |
---
|
190 |
### {model_name} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training)
|
191 |
|
192 |
-
You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb)
|
193 |
|
194 |
-
Sample pictures of
|
195 |
{image_string}
|
196 |
'''
|
197 |
#Save the readme to a file
|
198 |
-
readme_file = open("README.md", "w")
|
199 |
readme_file.write(readme_text)
|
200 |
readme_file.close()
|
201 |
#Save the token identifier to a file
|
@@ -205,7 +219,7 @@ Sample pictures of this concept:
|
|
205 |
create_repo(model_id,private=True, token=hf_token)
|
206 |
operations = [
|
207 |
CommitOperationAdd(path_in_repo="token_identifier.txt", path_or_fileobj="token_identifier.txt"),
|
208 |
-
CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
|
209 |
CommitOperationAdd(path_in_repo=f"model.ckpt",path_or_fileobj="model.ckpt")
|
210 |
]
|
211 |
api.create_commit(
|
@@ -237,17 +251,17 @@ with gr.Blocks(css=css) as demo:
|
|
237 |
gr.HTML('''
|
238 |
<div class="gr-prose" style="max-width: 80%">
|
239 |
<h2>Attention - This Space doesn't work in this shared UI</h2>
|
240 |
-
<p>For it to work, you have to duplicate the Space and run it on your own profile using a (paid) private T4 GPU for training. As each T4 costs US$0
|
241 |
-
<p>Please, duplicate this Space, then go to the Settings tab and select a T4
|
242 |
<img class="instruction" src="file/duplicate.png">
|
243 |
<img class="arrow" src="file/arrow.png" />
|
244 |
</div>
|
245 |
''')
|
246 |
else:
|
247 |
-
gr.HTML('''
|
248 |
<div class="gr-prose" style="max-width: 80%">
|
249 |
-
<h2>You have successfully
|
250 |
-
<p>If you haven't already, attribute a T4 GPU to it (via the Settings tab) and run the training below. You will be billed by the minute from when you activate the GPU until when you turn it off.</p>
|
251 |
</div>
|
252 |
''')
|
253 |
gr.Markdown("# Dreambooth training")
|
@@ -277,9 +291,9 @@ with gr.Blocks(css=css) as demo:
|
|
277 |
visible = False
|
278 |
is_visible.append(gr.State(value=False))
|
279 |
|
280 |
-
file_collection.append(gr.File(label=f
|
281 |
with gr.Column(visible=visible) as row[x]:
|
282 |
-
concept_collection.append(gr.Textbox(label=f
|
283 |
with gr.Row():
|
284 |
if(x < maximum_concepts-1):
|
285 |
buttons_collection.append(gr.Button(value="Add +1 concept", visible=visible))
|
@@ -302,9 +316,7 @@ with gr.Blocks(css=css) as demo:
|
|
302 |
if(counter_delete < len(delete_collection)+1):
|
303 |
delete_button.click(lambda:[gr.update(visible=False),gr.update(visible=False), gr.update(visible=True), False], None, [file_collection[counter_delete], row[counter_delete], buttons_collection[counter_delete-1], is_visible[counter_delete]], queue=False)
|
304 |
counter_delete += 1
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
with gr.Accordion("Custom Settings", open=False):
|
309 |
swap_auto_calculated = gr.Checkbox(label="Use custom settings")
|
310 |
gr.Markdown("If not checked, the number of steps and % of frozen encoder will be tuned automatically according to the amount of images you upload and whether you are training an `object`, `person` or `style` as follows: The number of steps is calculated by number of images uploaded multiplied by 20. The text-encoder is frozen after 10% of the steps for a style, 30% of the steps for an object and is fully trained for persons.")
|
@@ -315,27 +327,33 @@ with gr.Blocks(css=css) as demo:
|
|
315 |
training_summary = gr.Textbox("", visible=False, label="Training Summary")
|
316 |
steps.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary], queue=False)
|
317 |
perc_txt_encoder.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary], queue=False)
|
|
|
318 |
for file in file_collection:
|
319 |
file.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary], queue=False)
|
320 |
train_btn = gr.Button("Start Training")
|
321 |
-
|
322 |
-
|
323 |
-
|
|
|
|
|
|
|
324 |
prompt = gr.Textbox(label="Type your prompt")
|
325 |
result_image = gr.Image()
|
326 |
-
|
327 |
-
|
328 |
-
gr.
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
|
|
|
|
334 |
result = gr.File(label="Download the uploaded models in the diffusers format", visible=True)
|
335 |
success_message_upload = gr.Markdown(visible=False)
|
336 |
convert_button = gr.Button("Convert to CKPT", visible=False)
|
337 |
|
338 |
-
train_btn.click(fn=train, inputs=is_visible+concept_collection+file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[result, try_your_model, push_to_hub, convert_button])
|
339 |
generate_button.click(fn=generate, inputs=prompt, outputs=result_image)
|
340 |
push_button.click(fn=push, inputs=[model_name, where_to_upload, hf_token], outputs=[success_message_upload, result])
|
341 |
convert_button.click(fn=convert_to_ckpt, inputs=[], outputs=result)
|
|
|
11 |
import torch
|
12 |
import zipfile
|
13 |
import urllib.parse
|
14 |
+
import gc
|
15 |
from diffusers import StableDiffusionPipeline
|
16 |
+
from huggingface_hub import snapshot_download
|
17 |
|
18 |
css = '''
|
19 |
.instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
|
20 |
.arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
|
21 |
#component-4, #component-3, #component-10{min-height: 0}
|
22 |
'''
|
|
|
23 |
maximum_concepts = 3
|
24 |
+
|
25 |
#Pre download the files even if we don't use it here
|
26 |
+
model_to_load = snapshot_download(repo_id="multimodalart/sd-fine-tunable")
|
27 |
+
safety_checker = snapshot_download(repo_id="multimodalart/sd-sc")
|
28 |
|
29 |
def zipdir(path, ziph):
|
30 |
# ziph is zipfile handle
|
|
|
70 |
return(gr.update(visible=True, value=f"You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps. This should take around {round(Training_Steps/1.5, 2)} seconds, or {round((Training_Steps/1.5)/3600, 2)} hours. As a reminder, the T4 GPU costs US$0.60 for 1h. Once training is over, don't forget to swap the hardware back to CPU."))
|
71 |
|
72 |
def train(*inputs):
|
73 |
+
torch.cuda.empty_cache()
|
74 |
+
if 'pipe' in globals():
|
75 |
+
del pipe
|
76 |
+
gc.collect()
|
77 |
+
|
78 |
if "IS_SHARED_UI" in os.environ:
|
79 |
raise gr.Error("This Space only works in duplicated instances")
|
80 |
+
|
81 |
if os.path.exists("output_model"): shutil.rmtree('output_model')
|
82 |
if os.path.exists("instance_images"): shutil.rmtree('instance_images')
|
83 |
if os.path.exists("diffusers_model.zip"): os.remove("diffusers_model.zip")
|
|
|
144 |
lr_warmup_steps = 0,
|
145 |
max_train_steps=Training_Steps,
|
146 |
)
|
147 |
+
print("Starting training...")
|
148 |
run_training(args_general)
|
149 |
+
gc.collect()
|
150 |
torch.cuda.empty_cache()
|
151 |
+
print("Adding Safety Checker to the model...")
|
152 |
+
shutil.copytree(f"{safety_checker}/feature_extractor", "output_model/feature_extractor")
|
153 |
+
shutil.copytree(f"{safety_checker}/safety_checker", "output_model/safety_checker")
|
154 |
+
shutil.copy(f"model_index.json", "output_model/model_index.json")
|
155 |
+
print("Zipping model file...")
|
156 |
with zipfile.ZipFile('diffusers_model.zip', 'w', zipfile.ZIP_DEFLATED) as zipf:
|
157 |
zipdir('output_model/', zipf)
|
158 |
+
print("Training completed!")
|
159 |
+
return [gr.update(visible=True, value=["diffusers_model.zip"]), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)]
|
160 |
|
161 |
def generate(prompt):
|
162 |
+
torch.cuda.empty_cache()
|
163 |
from diffusers import StableDiffusionPipeline
|
164 |
+
global pipe
|
165 |
pipe = StableDiffusionPipeline.from_pretrained("./output_model", torch_dtype=torch.float16)
|
166 |
pipe = pipe.to("cuda")
|
167 |
image = pipe(prompt).images[0]
|
|
|
194 |
else:
|
195 |
title_instance_prompt_string = ''
|
196 |
previous_instance_prompt = instance_prompt
|
197 |
+
image_string = f'''{title_instance_prompt_string} (use that on your prompt)
|
198 |
{image_string}![{instance_prompt} {i}](https://huggingface.co/{model_id}/resolve/main/concept_images/{urllib.parse.quote(image)})'''
|
199 |
readme_text = f'''---
|
200 |
license: creativeml-openrail-m
|
|
|
203 |
---
|
204 |
### {model_name} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training)
|
205 |
|
206 |
+
You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts!
|
207 |
|
208 |
+
Sample pictures of:
|
209 |
{image_string}
|
210 |
'''
|
211 |
#Save the readme to a file
|
212 |
+
readme_file = open("model.README.md", "w")
|
213 |
readme_file.write(readme_text)
|
214 |
readme_file.close()
|
215 |
#Save the token identifier to a file
|
|
|
219 |
create_repo(model_id,private=True, token=hf_token)
|
220 |
operations = [
|
221 |
CommitOperationAdd(path_in_repo="token_identifier.txt", path_or_fileobj="token_identifier.txt"),
|
222 |
+
CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="model.README.md"),
|
223 |
CommitOperationAdd(path_in_repo=f"model.ckpt",path_or_fileobj="model.ckpt")
|
224 |
]
|
225 |
api.create_commit(
|
|
|
251 |
gr.HTML('''
|
252 |
<div class="gr-prose" style="max-width: 80%">
|
253 |
<h2>Attention - This Space doesn't work in this shared UI</h2>
|
254 |
+
<p>For it to work, you have to duplicate the Space and run it on your own profile using a (paid) private T4 GPU for training. As each T4 costs US$0.60/h, it should cost < US$1 to train a model with less than 100 images using default settings!</p>
|
255 |
+
<p>Please, duplicate this Space, then go to the Settings tab and select a T4 instance.</p>
|
256 |
<img class="instruction" src="file/duplicate.png">
|
257 |
<img class="arrow" src="file/arrow.png" />
|
258 |
</div>
|
259 |
''')
|
260 |
else:
|
261 |
+
gr.HTML(f'''
|
262 |
<div class="gr-prose" style="max-width: 80%">
|
263 |
+
<h2>You have successfully duplicated the Dreambooth Training Space</h2>
|
264 |
+
<p>If you haven't already, <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings">attribute a T4 GPU to it (via the Settings tab)</a> and run the training below. You will be billed by the minute from when you activate the GPU until when you turn it off.</p>
|
265 |
</div>
|
266 |
''')
|
267 |
gr.Markdown("# Dreambooth training")
|
|
|
291 |
visible = False
|
292 |
is_visible.append(gr.State(value=False))
|
293 |
|
294 |
+
file_collection.append(gr.File(label=f'''Upload the images for your {ordinal(x+1) if (x>0) else ""} concept''', file_count="multiple", interactive=True, visible=visible))
|
295 |
with gr.Column(visible=visible) as row[x]:
|
296 |
+
concept_collection.append(gr.Textbox(label=f'''{ordinal(x+1) if (x>0) else ""} concept prompt - use a unique, made up word to avoid collisions'''))
|
297 |
with gr.Row():
|
298 |
if(x < maximum_concepts-1):
|
299 |
buttons_collection.append(gr.Button(value="Add +1 concept", visible=visible))
|
|
|
316 |
if(counter_delete < len(delete_collection)+1):
|
317 |
delete_button.click(lambda:[gr.update(visible=False),gr.update(visible=False), gr.update(visible=True), False], None, [file_collection[counter_delete], row[counter_delete], buttons_collection[counter_delete-1], is_visible[counter_delete]], queue=False)
|
318 |
counter_delete += 1
|
319 |
+
|
|
|
|
|
320 |
with gr.Accordion("Custom Settings", open=False):
|
321 |
swap_auto_calculated = gr.Checkbox(label="Use custom settings")
|
322 |
gr.Markdown("If not checked, the number of steps and % of frozen encoder will be tuned automatically according to the amount of images you upload and whether you are training an `object`, `person` or `style` as follows: The number of steps is calculated by number of images uploaded multiplied by 20. The text-encoder is frozen after 10% of the steps for a style, 30% of the steps for an object and is fully trained for persons.")
|
|
|
327 |
training_summary = gr.Textbox("", visible=False, label="Training Summary")
|
328 |
steps.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary], queue=False)
|
329 |
perc_txt_encoder.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary], queue=False)
|
330 |
+
|
331 |
for file in file_collection:
|
332 |
file.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary], queue=False)
|
333 |
train_btn = gr.Button("Start Training")
|
334 |
+
|
335 |
+
completed_training = gr.Markdown("# ✅ Training completed", visible=False)
|
336 |
+
|
337 |
+
with gr.Row():
|
338 |
+
with gr.Box(visible=False) as try_your_model:
|
339 |
+
gr.Markdown("## Try your model")
|
340 |
prompt = gr.Textbox(label="Type your prompt")
|
341 |
result_image = gr.Image()
|
342 |
+
generate_button = gr.Button("Generate Image")
|
343 |
+
|
344 |
+
with gr.Box(visible=False) as push_to_hub:
|
345 |
+
gr.Markdown("## Push to Hugging Face Hub")
|
346 |
+
model_name = gr.Textbox(label="Name of your model", placeholder="Tarsila do Amaral Style")
|
347 |
+
where_to_upload = gr.Dropdown(["My personal profile", "Public Library"], label="Upload to")
|
348 |
+
gr.Markdown("[A Hugging Face write access token](https://huggingface.co/settings/tokens), go to \"New token\" -> Role : Write. A regular read token won't work here.")
|
349 |
+
hf_token = gr.Textbox(label="Hugging Face Write Token")
|
350 |
+
push_button = gr.Button("Push to the Hub")
|
351 |
+
|
352 |
result = gr.File(label="Download the uploaded models in the diffusers format", visible=True)
|
353 |
success_message_upload = gr.Markdown(visible=False)
|
354 |
convert_button = gr.Button("Convert to CKPT", visible=False)
|
355 |
|
356 |
+
train_btn.click(fn=train, inputs=is_visible+concept_collection+file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[result, try_your_model, push_to_hub, convert_button, completed_training])
|
357 |
generate_button.click(fn=generate, inputs=prompt, outputs=result_image)
|
358 |
push_button.click(fn=push, inputs=[model_name, where_to_upload, hf_token], outputs=[success_message_upload, result])
|
359 |
convert_button.click(fn=convert_to_ckpt, inputs=[], outputs=result)
|
convertosd.py
CHANGED
@@ -7,7 +7,7 @@ import argparse
|
|
7 |
import os.path as osp
|
8 |
|
9 |
import torch
|
10 |
-
|
11 |
|
12 |
# =================#
|
13 |
# UNet Conversion #
|
@@ -221,3 +221,6 @@ def convert(model_path, checkpoint_path):
|
|
221 |
state_dict = {k:v.half() for k,v in state_dict.items()}
|
222 |
state_dict = {"state_dict": state_dict}
|
223 |
torch.save(state_dict, checkpoint_path)
|
|
|
|
|
|
|
|
7 |
import os.path as osp
|
8 |
|
9 |
import torch
|
10 |
+
import gc
|
11 |
|
12 |
# =================#
|
13 |
# UNet Conversion #
|
|
|
221 |
state_dict = {k:v.half() for k,v in state_dict.items()}
|
222 |
state_dict = {"state_dict": state_dict}
|
223 |
torch.save(state_dict, checkpoint_path)
|
224 |
+
del state_dict, text_enc_dict, vae_state_dict, unet_state_dict
|
225 |
+
torch.cuda.empty_cache()
|
226 |
+
gc.collect()
|
train_dreambooth.py
CHANGED
@@ -6,6 +6,7 @@ from pathlib import Path
|
|
6 |
from typing import Optional
|
7 |
import subprocess
|
8 |
import sys
|
|
|
9 |
|
10 |
import torch
|
11 |
import torch.nn.functional as F
|
@@ -812,7 +813,9 @@ def run_training(args_imported):
|
|
812 |
repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
|
813 |
|
814 |
accelerator.end_training()
|
815 |
-
|
|
|
|
|
816 |
if __name__ == "__main__":
|
817 |
pass
|
818 |
#main()
|
|
|
6 |
from typing import Optional
|
7 |
import subprocess
|
8 |
import sys
|
9 |
+
import gc
|
10 |
|
11 |
import torch
|
12 |
import torch.nn.functional as F
|
|
|
813 |
repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
|
814 |
|
815 |
accelerator.end_training()
|
816 |
+
del pipeline
|
817 |
+
torch.cuda.empty_cache()
|
818 |
+
gc.collect()
|
819 |
if __name__ == "__main__":
|
820 |
pass
|
821 |
#main()
|