ysharma HF staff commited on
Commit
2301b77
·
1 Parent(s): 5ebc581

update examples

Browse files
Files changed (1) hide show
  1. app.py +9 -8
app.py CHANGED
@@ -1,7 +1,7 @@
1
  from diffusers import StableDiffusionPipeline
2
  from lora_diffusion import monkeypatch_lora, tune_lora_scale
3
  import torch
4
- import os
5
  import gradio as gr
6
  import subprocess
7
 
@@ -41,9 +41,10 @@ def monkeypatching(alpha, in_prompt, wt): #, prompt, pipe): finetuned_lora_weigh
41
  image.save("./illust_lora.jpg") #"./contents/illust_lora.jpg")
42
  return image
43
 
44
- def accelerate_train_lora(steps):
45
  print("*********** inside accelerate_train_lora ***********")
46
- #subprocess.run(accelerate launch {"./train_lora_dreambooth.py"} \
 
47
  #subprocess.Popen(f'accelerate launch {"./train_lora_dreambooth.py"} \
48
  os.system( f'accelerate launch {"./train_lora_dreambooth.py"} \
49
  --pretrained_model_name_or_path={MODEL_NAME} \
@@ -59,7 +60,7 @@ def accelerate_train_lora(steps):
59
  --max_train_steps={int(steps)}') #,shell=True) #30000
60
  print("*********** completing accelerate_train_lora ***********")
61
  #lora_trained_weights = "./output_example/lora_weight.pt"
62
- return "./output_example/lora_weight.pt"
63
 
64
  with gr.Blocks() as demo:
65
  gr.Markdown("""<h1><center>LORA - Low-rank Adaptation for Fast Text-to-Image Diffusion Fine-tuning</center></h1>
@@ -67,7 +68,7 @@ with gr.Blocks() as demo:
67
  gr.HTML("<p>You can skip the queue by duplicating this space and upgrading to gpu in settings: <a style='display:inline-block' href='https://huggingface.co/spaces/ysharma/Low-rank-Adaptation?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a></p>")
68
  gr.Markdown("""<b>NEW!!</b> : I have fine-tuned the SD model for 15,000 steps using 100 PlaygroundAI images and LORA. You can load this trained model using the example component. Load the weight and start using the Space with the Inference button. Feel free to toggle the Alpha value.""")
69
  gr.Markdown(
70
- """**Main Features**<br>- Fine-tune Stable diffusion models twice as faster than dreambooth method, by Low-rank Adaptation.<br>- Get insanely small end result, easy to share and download.<br>- Easy to use, compatible with diffusers.<br>- Sometimes even better performance than full fine-tuning<br><br>Please refer to the GitHub repo this Space is based on, here - <a href = "https://github.com/cloneofsimo/lora">LORA</a>. You can also refer to this tweet by AK to quote/retweet/like here on <a href="https://twitter.com/_akhaliq/status/1601120767009513472">Twitter</a>.This Gradio Space is an attempt to explore this novel LORA approach to fine-tune Stable diffusion models, using the power and flexibility of Gradio! The higher number of steps results in longer training time and better fine-tuned SD models.<br><br><b>To use this Space well:</b><br>- First, upload your set of images (4-5), then enter the number of fine-tuning steps, and then press the 'Train LORA model' button. This will produce your fine-tuned model weights.<br>- Enter a prompt, set the alpha value using the Slider (nearer to 1 implies overfitting to the uploaded images), and then press the 'Inference' button. This will produce an image by the newly fine-tuned model.<br><b>Bonus:</b>You can download your fine-tuned model weights from the Gradio file component. The smaller size of LORA models (around 3-4 MB files) is the main highlight of this 'Low-rank Adaptation' approach of fine-tuning.""")
71
 
72
  with gr.Row():
73
  in_images = gr.File(label="Upload images to fine-tune for LORA", file_count="multiple")
@@ -92,13 +93,13 @@ with gr.Blocks() as demo:
92
  fn=monkeypatching,
93
  cache_examples=True,)
94
  gr.Examples(
95
- examples=[[4000]],
96
- inputs=in_steps,
97
  outputs=out_file,
98
  fn=accelerate_train_lora,
99
  cache_examples=True,)
100
 
101
- b1.click(fn = accelerate_train_lora, inputs=in_steps , outputs=out_file)
102
  b2.click(fn = monkeypatching, inputs=[in_alpha, in_prompt, out_file], outputs=out_image)
103
 
104
  demo.queue(concurrency_count=3)
 
1
  from diffusers import StableDiffusionPipeline
2
  from lora_diffusion import monkeypatch_lora, tune_lora_scale
3
  import torch
4
+ import os, shutil
5
  import gradio as gr
6
  import subprocess
7
 
 
41
  image.save("./illust_lora.jpg") #"./contents/illust_lora.jpg")
42
  return image
43
 
44
+ def accelerate_train_lora(steps, images):
45
  print("*********** inside accelerate_train_lora ***********")
46
+ for file in images:
47
+ shutil.copy( file, './data_example')
48
  #subprocess.Popen(f'accelerate launch {"./train_lora_dreambooth.py"} \
49
  os.system( f'accelerate launch {"./train_lora_dreambooth.py"} \
50
  --pretrained_model_name_or_path={MODEL_NAME} \
 
60
  --max_train_steps={int(steps)}') #,shell=True) #30000
61
  print("*********** completing accelerate_train_lora ***********")
62
  #lora_trained_weights = "./output_example/lora_weight.pt"
63
+ return f"{OUTPUT_DIR}/lora_weight.pt"
64
 
65
  with gr.Blocks() as demo:
66
  gr.Markdown("""<h1><center>LORA - Low-rank Adaptation for Fast Text-to-Image Diffusion Fine-tuning</center></h1>
 
68
  gr.HTML("<p>You can skip the queue by duplicating this space and upgrading to gpu in settings: <a style='display:inline-block' href='https://huggingface.co/spaces/ysharma/Low-rank-Adaptation?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a></p>")
69
  gr.Markdown("""<b>NEW!!</b> : I have fine-tuned the SD model for 15,000 steps using 100 PlaygroundAI images and LORA. You can load this trained model using the example component. Load the weight and start using the Space with the Inference button. Feel free to toggle the Alpha value.""")
70
  gr.Markdown(
71
+ """**Main Features**<br>- Fine-tune Stable diffusion models twice as faster as dreambooth method by Low-rank Adaptation.<br>- Get insanely small end results, easy to share and download.<br>- Easy to use, compatible with diffusers.<br>- Sometimes even better performance than full fine-tuning<br><br>Please refer to the GitHub repo this Space is based on, here - <a href = "https://github.com/cloneofsimo/lora">LORA</a>. You can also refer to this tweet by AK to quote/retweet/like here on <a href="https://twitter.com/_akhaliq/status/1601120767009513472">Twitter</a>.This Gradio Space is an attempt to explore this novel LORA approach to fine-tune Stable diffusion models, using the power and flexibility of Gradio! The higher number of steps results in longer training time and better fine-tuned SD models.<br><br><b>To use this Space well:</b><br>- First, upload your set of images (4-5), then enter the number of fine-tuning steps, and then press the 'Train LORA model' button. This will produce your fine-tuned model weights.<br>- Enter a prompt, set the alpha value using the Slider (nearer to 1 implies overfitting to the uploaded images), and then press the 'Inference' button. This will produce an image by the newly fine-tuned model.<br><b>Bonus:</b>You can download your fine-tuned model weights from the Gradio file component. The smaller size of LORA models (around 3-4 MB files) is the main highlight of this 'Low-rank Adaptation' approach of fine-tuning.""")
72
 
73
  with gr.Row():
74
  in_images = gr.File(label="Upload images to fine-tune for LORA", file_count="multiple")
 
93
  fn=monkeypatching,
94
  cache_examples=True,)
95
  gr.Examples(
96
+ examples=[[4000, ['./simba1.jpg', './simba2.jpg', './simba3.jpg', './simba4.jpg']]],
97
+ inputs=[in_steps, in_images],
98
  outputs=out_file,
99
  fn=accelerate_train_lora,
100
  cache_examples=True,)
101
 
102
+ b1.click(fn = accelerate_train_lora, inputs=[in_steps, in_images] , outputs=out_file)
103
  b2.click(fn = monkeypatching, inputs=[in_alpha, in_prompt, out_file], outputs=out_image)
104
 
105
  demo.queue(concurrency_count=3)