amildravid4292 commited on
Commit
0e00c55
·
verified ·
1 Parent(s): fb01019

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -18
app.py CHANGED
@@ -34,6 +34,12 @@ from diffusers import (
34
 
35
 
36
  device = gr.State("cuda")
 
 
 
 
 
 
37
 
38
  pretrained_model_name_or_path = "stablediffusionapi/realistic-vision-v51"
39
  revision = None
@@ -43,39 +49,35 @@ weight_dtype = torch.bfloat16
43
  pipe = StableDiffusionPipeline.from_pretrained("stablediffusionapi/realistic-vision-v51",
44
  torch_dtype=torch.float16,safety_checker = None,
45
  requires_safety_checker = False).to(device.value)
46
- noise_scheduler = pipe.scheduler
47
  del pipe
48
- tokenizer = AutoTokenizer.from_pretrained(
49
  pretrained_model_name_or_path, subfolder="tokenizer", revision=revision
50
  )
51
- text_encoder = CLIPTextModel.from_pretrained(
52
  pretrained_model_name_or_path, subfolder="text_encoder", revision=revision
53
  )
54
- vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder="vae", revision=revision)
55
- unet = UNet2DConditionModel.from_pretrained(
56
  pretrained_model_name_or_path, subfolder="unet", revision=revision
57
  )
58
 
59
- unet.requires_grad_(False)
60
- unet.to(device.value, dtype=weight_dtype)
61
- vae.requires_grad_(False)
62
 
63
- text_encoder.requires_grad_(False)
64
- vae.requires_grad_(False)
65
- vae.to(device.value, dtype=weight_dtype)
66
- text_encoder.to(device.value, dtype=weight_dtype)
67
  print("")
68
 
69
 
70
 
71
- unet = gr.State(unet)
72
- vae = gr.State(vae)
73
- text_encoder = gr.State(text_encoder)
74
- tokenizer = gr.State(tokenizer)
75
- noise_scheduler = gr.State(noise_scheduler)
76
 
77
 
78
- network = gr.State()
 
79
 
80
 
81
 
 
34
 
35
 
36
  device = gr.State("cuda")
37
+ unet = gr.State()
38
+ vae = gr.State()
39
+ text_encoder = gr.State()
40
+ tokenizer = gr.State()
41
+ noise_scheduler = gr.State()
42
+ network = gr.State()
43
 
44
  pretrained_model_name_or_path = "stablediffusionapi/realistic-vision-v51"
45
  revision = None
 
49
  pipe = StableDiffusionPipeline.from_pretrained("stablediffusionapi/realistic-vision-v51",
50
  torch_dtype=torch.float16,safety_checker = None,
51
  requires_safety_checker = False).to(device.value)
52
+ noise_scheduler.value = pipe.scheduler
53
  del pipe
54
+ tokenizer.value = AutoTokenizer.from_pretrained(
55
  pretrained_model_name_or_path, subfolder="tokenizer", revision=revision
56
  )
57
+ text_encoder.value = CLIPTextModel.from_pretrained(
58
  pretrained_model_name_or_path, subfolder="text_encoder", revision=revision
59
  )
60
+ vae.value = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder="vae", revision=revision)
61
+ unet.value = UNet2DConditionModel.from_pretrained(
62
  pretrained_model_name_or_path, subfolder="unet", revision=revision
63
  )
64
 
65
+ unet.value.requires_grad_(False)
66
+ unet.value.to(device.value, dtype=weight_dtype)
67
+ vae.value.requires_grad_(False)
68
 
69
+ text_encoder.value.requires_grad_(False)
70
+ vae.value.requires_grad_(False)
71
+ vae.value.to(device.value, dtype=weight_dtype)
72
+ text_encoder.value.to(device.value, dtype=weight_dtype)
73
  print("")
74
 
75
 
76
 
 
 
 
 
 
77
 
78
 
79
+
80
+
81
 
82
 
83