ysharma HF staff commited on
Commit
8c46c8a
·
1 Parent(s): aa77ba7
Files changed (1) hide show
  1. app.py +12 -4
app.py CHANGED
@@ -27,17 +27,25 @@ torch.manual_seed(1)
27
  #image # nice. diffusers are cool. #no need
28
  finetuned_lora_weights = "./lora_weight.pt"
29
 
 
 
 
30
  #####
31
  #my fine tuned weights
32
  def monkeypatching(alpha): #, prompt, pipe): finetuned_lora_weights
33
- monkeypatch_lora(pipe.unet, torch.load(finetuned_lora_weights)) #"./lora_weight.pt"))
34
- tune_lora_scale(pipe.unet, alpha) #1.00)
 
 
 
 
 
35
  image = pipe(prompt, num_inference_steps=50, guidance_scale=7).images[0]
36
  image.save("./illust_lora.jpg") #"./contents/illust_lora.jpg")
37
  return image
38
 
39
  def accelerate_train_lora(steps):
40
- print("***********inside accelerate_train_lora 11111***********")
41
  #subprocess.run(accelerate launch {"./train_lora_dreambooth.py"} \
42
  #subprocess.Popen(f'accelerate launch {"./train_lora_dreambooth.py"} \
43
  os.system( f'accelerate launch {"./train_lora_dreambooth.py"} \
@@ -52,7 +60,7 @@ def accelerate_train_lora(steps):
52
  --lr_scheduler="constant" \
53
  --lr_warmup_steps=0 \
54
  --max_train_steps={int(steps)}') #,shell=True) #30000
55
- print("***********inside accelerate_train_lora 22222***********")
56
  return
57
 
58
  with gr.Blocks() as demo:
 
27
  #image # nice. diffusers are cool. #no need
28
  finetuned_lora_weights = "./lora_weight.pt"
29
 
30
+ #global var
31
+ counter = 0
32
+
33
  #####
34
  #my fine tuned weights
35
  def monkeypatching(alpha): #, prompt, pipe): finetuned_lora_weights
36
+ global counter
37
+ if counter == 0 :
38
+ monkeypatch_lora(pipe.unet, torch.load("./output_example/lora_weight.pt")) #finetuned_lora_weights
39
+ tune_lora_scale(pipe.unet, alpha) #1.00)
40
+ counter +=1
41
+ else :
42
+ tune_lora_scale(pipe.unet, alpha) #1.00)
43
  image = pipe(prompt, num_inference_steps=50, guidance_scale=7).images[0]
44
  image.save("./illust_lora.jpg") #"./contents/illust_lora.jpg")
45
  return image
46
 
47
  def accelerate_train_lora(steps):
48
+ print("*********** inside accelerate_train_lora ***********")
49
  #subprocess.run(accelerate launch {"./train_lora_dreambooth.py"} \
50
  #subprocess.Popen(f'accelerate launch {"./train_lora_dreambooth.py"} \
51
  os.system( f'accelerate launch {"./train_lora_dreambooth.py"} \
 
60
  --lr_scheduler="constant" \
61
  --lr_warmup_steps=0 \
62
  --max_train_steps={int(steps)}') #,shell=True) #30000
63
+ print("*********** completing accelerate_train_lora ***********")
64
  return
65
 
66
  with gr.Blocks() as demo: