Commit
·
069eedb
1
Parent(s):
576bad0
Update app.py
Browse files
app.py
CHANGED
@@ -138,7 +138,6 @@ def create_dataset(*inputs):
|
|
138 |
print("Creating dataset")
|
139 |
images = inputs[0]
|
140 |
destination_folder = str(uuid.uuid4())
|
141 |
-
print(destination_folder)
|
142 |
if not os.path.exists(destination_folder):
|
143 |
os.makedirs(destination_folder)
|
144 |
|
@@ -306,9 +305,7 @@ git+https://github.com/huggingface/datasets.git'''
|
|
306 |
api = HfApi(token=token)
|
307 |
username = api.whoami()["name"]
|
308 |
subprocess_command = ["autotrain", "spacerunner", "--project-name", slugged_lora_name, "--script-path", spacerunner_folder, "--username", username, "--token", token, "--backend", "spaces-a10gs", "--env","HF_TOKEN=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;HF_HUB_ENABLE_HF_TRANSFER=1", "--args", spacerunner_args]
|
309 |
-
print(subprocess_command)
|
310 |
outcome = subprocess.run(subprocess_command)
|
311 |
-
print(outcome)
|
312 |
if(outcome.returncode == 0):
|
313 |
return f"""# Your training has started.
|
314 |
## - Model page: <a href='https://huggingface.co/{username}/{slugged_lora_name}'>{username}/{slugged_lora_name}</a> <small>(the model will be available when training finishes)</small>
|
@@ -377,7 +374,6 @@ def start_training_og(
|
|
377 |
progress = gr.Progress(track_tqdm=True)
|
378 |
):
|
379 |
slugged_lora_name = slugify(lora_name)
|
380 |
-
print(train_text_encoder_ti_frac)
|
381 |
commands = ["--pretrained_model_name_or_path=stabilityai/stable-diffusion-xl-base-1.0",
|
382 |
"--pretrained_vae_model_name_or_path=madebyollin/sdxl-vae-fp16-fix",
|
383 |
f"--instance_prompt={concept_sentence}",
|
@@ -446,21 +442,16 @@ def start_training_og(
|
|
446 |
shutil.copy(image, class_folder)
|
447 |
commands.append(f"--class_data_dir={class_folder}")
|
448 |
|
449 |
-
print(commands)
|
450 |
from train_dreambooth_lora_sdxl_advanced import main as train_main, parse_args as parse_train_args
|
451 |
args = parse_train_args(commands)
|
452 |
train_main(args)
|
453 |
-
#print(commands)
|
454 |
-
#subprocess.run(commands)
|
455 |
return "ok!"
|
456 |
|
457 |
@spaces.GPU()
|
458 |
def run_captioning(*inputs):
|
459 |
model.to("cuda")
|
460 |
-
print(inputs)
|
461 |
images = inputs[0]
|
462 |
training_option = inputs[-1]
|
463 |
-
print(training_option)
|
464 |
final_captions = [""] * MAX_IMAGES
|
465 |
for index, image in enumerate(images):
|
466 |
original_caption = inputs[index + 1]
|
|
|
138 |
print("Creating dataset")
|
139 |
images = inputs[0]
|
140 |
destination_folder = str(uuid.uuid4())
|
|
|
141 |
if not os.path.exists(destination_folder):
|
142 |
os.makedirs(destination_folder)
|
143 |
|
|
|
305 |
api = HfApi(token=token)
|
306 |
username = api.whoami()["name"]
|
307 |
subprocess_command = ["autotrain", "spacerunner", "--project-name", slugged_lora_name, "--script-path", spacerunner_folder, "--username", username, "--token", token, "--backend", "spaces-a10gs", "--env","HF_TOKEN=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;HF_HUB_ENABLE_HF_TRANSFER=1", "--args", spacerunner_args]
|
|
|
308 |
outcome = subprocess.run(subprocess_command)
|
|
|
309 |
if(outcome.returncode == 0):
|
310 |
return f"""# Your training has started.
|
311 |
## - Model page: <a href='https://huggingface.co/{username}/{slugged_lora_name}'>{username}/{slugged_lora_name}</a> <small>(the model will be available when training finishes)</small>
|
|
|
374 |
progress = gr.Progress(track_tqdm=True)
|
375 |
):
|
376 |
slugged_lora_name = slugify(lora_name)
|
|
|
377 |
commands = ["--pretrained_model_name_or_path=stabilityai/stable-diffusion-xl-base-1.0",
|
378 |
"--pretrained_vae_model_name_or_path=madebyollin/sdxl-vae-fp16-fix",
|
379 |
f"--instance_prompt={concept_sentence}",
|
|
|
442 |
shutil.copy(image, class_folder)
|
443 |
commands.append(f"--class_data_dir={class_folder}")
|
444 |
|
|
|
445 |
from train_dreambooth_lora_sdxl_advanced import main as train_main, parse_args as parse_train_args
|
446 |
args = parse_train_args(commands)
|
447 |
train_main(args)
|
|
|
|
|
448 |
return "ok!"
|
449 |
|
450 |
@spaces.GPU()
|
451 |
def run_captioning(*inputs):
|
452 |
model.to("cuda")
|
|
|
453 |
images = inputs[0]
|
454 |
training_option = inputs[-1]
|
|
|
455 |
final_captions = [""] * MAX_IMAGES
|
456 |
for index, image in enumerate(images):
|
457 |
original_caption = inputs[index + 1]
|