ysharma HF staff commited on
Commit
720c562
·
1 Parent(s): f746958
Files changed (1) hide show
  1. app.py +30 -22
app.py CHANGED
@@ -1,21 +1,12 @@
1
  #https://github.com/huggingface/diffusers/tree/main/examples/dreambooth
2
- #export MODEL_NAME="stabilityai/stable-diffusion-2-1-base"
3
- #export INSTANCE_DIR="./data_example"
4
- #export OUTPUT_DIR="./output_example"
5
-
6
- #accelerate launch train_lora_dreambooth.py \
7
- # --pretrained_model_name_or_path=$MODEL_NAME \
8
- # --instance_data_dir=$INSTANCE_DIR \
9
- # --output_dir=$OUTPUT_DIR \
10
- # --instance_prompt="style of sks" \
11
- # --resolution=512 \
12
- # --train_batch_size=1 \
13
- # --gradient_accumulation_steps=1 \
14
- # --learning_rate=1e-4 \
15
- # --lr_scheduler="constant" \
16
- # --lr_warmup_steps=0 \
17
- # --max_train_steps=30000
18
 
 
19
  from diffusers import StableDiffusionPipeline
20
  from lora_diffusion import monkeypatch_lora, tune_lora_scale
21
  import torch
@@ -25,7 +16,7 @@ import gradio as gr
25
  import subprocess
26
  # If your shell script has shebang,
27
  # you can omit shell=True argument.
28
- subprocess.run("./run_lora_db.sh", shell=True)
29
 
30
  #####
31
  model_id = "stabilityai/stable-diffusion-2-1-base"
@@ -38,22 +29,39 @@ finetuned_lora_weights = "./lora_weight.pt"
38
 
39
  #####
40
  #my fine tuned weights
41
- def monkeypatching( alpha): #, prompt, pipe): finetuned_lora_weights
42
  monkeypatch_lora(pipe.unet, torch.load(finetuned_lora_weights)) #"./lora_weight.pt"))
43
  tune_lora_scale(pipe.unet, alpha) #1.00)
44
  image = pipe(prompt, num_inference_steps=50, guidance_scale=7).images[0]
45
  image.save("./illust_lora.jpg") #"./contents/illust_lora.jpg")
46
  return image
47
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  with gr.Blocks() as demo:
49
  with gr.Row():
50
- in_images = gr.Image(label="Upload images to fine-tune for LORA")
51
  #in_prompt = gr.Textbox(label="Enter a ")
52
  in_steps = gr.Number(label="Enter number of steps")
53
  in_alpha = gr.Slider(0.1,1.0, step=0.01, label="Set Alpha level - higher value has more chances to overfit")
54
- b1 = gr.Button(value="Create LORA model")
 
55
  with gr.Row():
56
  out_image = gr.Image(label="Image generated by LORA model")
57
- b1.click(fn = monkeypatching, inputs=in_alpha, outputs=out_image)
 
58
 
59
  demo.launch(debug=True, show_error=True)
 
1
  #https://github.com/huggingface/diffusers/tree/main/examples/dreambooth
2
+ #export
3
+ MODEL_NAME="stabilityai/stable-diffusion-2-1-base"
4
+ #export
5
+ INSTANCE_DIR="./data_example"
6
+ #export
7
+ OUTPUT_DIR="./output_example"
 
 
 
 
 
 
 
 
 
 
8
 
9
+
10
  from diffusers import StableDiffusionPipeline
11
  from lora_diffusion import monkeypatch_lora, tune_lora_scale
12
  import torch
 
16
  import subprocess
17
  # If your shell script has shebang,
18
  # you can omit shell=True argument.
19
+ #subprocess.run("./run_lora_db.sh", shell=True)
20
 
21
  #####
22
  model_id = "stabilityai/stable-diffusion-2-1-base"
 
29
 
30
  #####
31
  #my fine tuned weights
32
+ def monkeypatching(alpha): #, prompt, pipe): finetuned_lora_weights
33
  monkeypatch_lora(pipe.unet, torch.load(finetuned_lora_weights)) #"./lora_weight.pt"))
34
  tune_lora_scale(pipe.unet, alpha) #1.00)
35
  image = pipe(prompt, num_inference_steps=50, guidance_scale=7).images[0]
36
  image.save("./illust_lora.jpg") #"./contents/illust_lora.jpg")
37
  return image
38
+
39
+ def accelerate_train_lora(steps):
40
+ accelerate launch "./train_lora_dreambooth.py" \
41
+ --pretrained_model_name_or_path=MODEL_NAME \
42
+ --instance_data_dir=INSTANCE_DIR \
43
+ --output_dir=OUTPUT_DIR \
44
+ --instance_prompt="style of sks" \
45
+ --resolution=512 \
46
+ --train_batch_size=1 \
47
+ --gradient_accumulation_steps=1 \
48
+ --learning_rate=1e-4 \
49
+ --lr_scheduler="constant" \
50
+ --lr_warmup_steps=0 \
51
+ --max_train_steps=steps #30000
52
+ return
53
+
54
  with gr.Blocks() as demo:
55
  with gr.Row():
56
+ in_images = gr.File(label="Upload images to fine-tune for LORA", file_count="multiple")
57
  #in_prompt = gr.Textbox(label="Enter a ")
58
  in_steps = gr.Number(label="Enter number of steps")
59
  in_alpha = gr.Slider(0.1,1.0, step=0.01, label="Set Alpha level - higher value has more chances to overfit")
60
+ b1 = gr.Button(value="Train LORA model")
61
+ b2 = gr.Button(value="Inference using LORA model")
62
  with gr.Row():
63
  out_image = gr.Image(label="Image generated by LORA model")
64
+ b1.click(fn = accelerate_train_lora, inputs=in_steps)
65
+ b2.click(fn = monkeypatching, inputs=in_alpha, outputs=out_image)
66
 
67
  demo.launch(debug=True, show_error=True)