Raman Dutt commited on
Commit
8a34e9e
·
1 Parent(s): 0dc7eec

info labels removed

Browse files
Files changed (1) hide show
  1. app.py +49 -23
app.py CHANGED
@@ -12,12 +12,14 @@ import yaml
12
 
13
  warnings.filterwarnings("ignore")
14
 
 
 
 
 
15
  OUTPUT_DIR = "OUTPUT"
16
  cuda_device = 1
17
  device = f"cuda:{cuda_device}" if torch.cuda.is_available() else "cpu"
18
 
19
- print("DEVICE: ", device)
20
-
21
  TITLE = "Demo for Generating Chest X-rays using Diferent Parameter-Efficient Fine-Tuned Stable Diffusion Pipelines"
22
  INFO_ABOUT_TEXT_PROMPT = "INFO_ABOUT_TEXT_PROMPT"
23
  INFO_ABOUT_GUIDANCE_SCALE = "INFO_ABOUT_GUIDANCE_SCALE"
@@ -31,7 +33,7 @@ EXAMPLE_TEXT_PROMPTS = [
31
  "No radiographic evidence for acute cardiopulmonary process",
32
  ]
33
 
34
-
35
  def load_adapted_unet(unet_pretraining_type, pipe):
36
 
37
  """
@@ -46,7 +48,7 @@ def load_adapted_unet(unet_pretraining_type, pipe):
46
  """
47
 
48
  sd_folder_path = "runwayml/stable-diffusion-v1-5"
49
- exp_path = ''
50
 
51
  if unet_pretraining_type == "freeze":
52
  pass
@@ -71,7 +73,9 @@ def load_adapted_unet(unet_pretraining_type, pipe):
71
  else:
72
  # exp_path = unet_pretraining_type + "_" + "diffusion_pytorch_model.safetensors"
73
  # state_dict = load_file(exp_path)
74
- state_dict = load_file(unet_pretraining_type + "_" + "diffusion_pytorch_model.safetensors")
 
 
75
  print(pipe.unet.load_state_dict(state_dict, strict=False))
76
 
77
 
@@ -97,18 +101,20 @@ def loadSDModel(unet_pretraining_type, cuda_device):
97
 
98
  return pipe
99
 
 
100
  def _predict_using_default_params():
101
 
102
-
103
  # Defining the default parameters
104
- unet_pretraining_type = 'full'
105
- input_text = 'No acute cardiopulmonary abnormality.'
106
  guidance_scale = 4
107
  num_inference_steps = 75
108
- device = '0'
109
- OUTPUT_DIR = 'OUTPUT'
110
 
111
- BARPLOT_TITLE = "Tunable Parameters for {} Fine-Tuning".format(unet_pretraining_type)
 
 
112
  NUM_TUNABLE_PARAMS = {
113
  "full": 86,
114
  "attention": 26.7,
@@ -150,9 +156,9 @@ def _predict_using_default_params():
150
 
151
  print(df)
152
 
153
- df = df[df["Fine-Tuning Strategy"].isin(["full", unet_pretraining_type])].reset_index(
154
- drop=True
155
- )
156
 
157
  bar_plot = gr.BarPlot(
158
  value=df,
@@ -177,8 +183,29 @@ def predict(
177
  OUTPUT_DIR="OUTPUT",
178
  ):
179
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
  try:
181
- BARPLOT_TITLE = "Tunable Parameters for {} Fine-Tuning".format(unet_pretraining_type)
 
 
182
  NUM_TUNABLE_PARAMS = {
183
  "full": 86,
184
  "attention": 26.7,
@@ -191,7 +218,7 @@ def predict(
191
  }
192
 
193
  cuda_device = f"cuda:{device}" if torch.cuda.is_available() else "cpu"
194
-
195
  print("Loading Pipeline for {} Fine-Tuning".format(unet_pretraining_type))
196
  sd_pipeline = loadSDModel(
197
  unet_pretraining_type=unet_pretraining_type,
@@ -209,7 +236,6 @@ def predict(
209
  )
210
 
211
  result_pil_image = result_image["images"][0]
212
-
213
 
214
  # Create a Bar Plot displaying the number of tunable parameters for the selected PEFT Type
215
  df = pd.DataFrame(
@@ -221,9 +247,9 @@ def predict(
221
 
222
  print(df)
223
 
224
- df = df[df["Fine-Tuning Strategy"].isin(["full", unet_pretraining_type])].reset_index(
225
- drop=True
226
- )
227
 
228
  bar_plot = gr.BarPlot(
229
  value=df,
@@ -263,14 +289,15 @@ iface = gr.Interface(
263
  label="PEFT Type",
264
  ),
265
  gr.Dropdown(
266
- EXAMPLE_TEXT_PROMPTS, info=INFO_ABOUT_TEXT_PROMPT, label="Input Text", value=EXAMPLE_TEXT_PROMPTS[0]
 
 
267
  ),
268
  gr.Slider(
269
  minimum=1,
270
  maximum=10,
271
  value=4,
272
  step=1,
273
- info=INFO_ABOUT_GUIDANCE_SCALE,
274
  label="Guidance Scale",
275
  ),
276
  gr.Slider(
@@ -278,7 +305,6 @@ iface = gr.Interface(
278
  maximum=100,
279
  value=75,
280
  step=1,
281
- info=INFO_ABOUT_INFERENCE_STEPS,
282
  label="Num Inference Steps",
283
  ),
284
  ],
 
12
 
13
  warnings.filterwarnings("ignore")
14
 
15
+ ################################################################################
16
+
17
+
18
+ # Define the default parameters
19
  OUTPUT_DIR = "OUTPUT"
20
  cuda_device = 1
21
  device = f"cuda:{cuda_device}" if torch.cuda.is_available() else "cpu"
22
 
 
 
23
  TITLE = "Demo for Generating Chest X-rays using Diferent Parameter-Efficient Fine-Tuned Stable Diffusion Pipelines"
24
  INFO_ABOUT_TEXT_PROMPT = "INFO_ABOUT_TEXT_PROMPT"
25
  INFO_ABOUT_GUIDANCE_SCALE = "INFO_ABOUT_GUIDANCE_SCALE"
 
33
  "No radiographic evidence for acute cardiopulmonary process",
34
  ]
35
 
36
+ ################################################################################
37
  def load_adapted_unet(unet_pretraining_type, pipe):
38
 
39
  """
 
48
  """
49
 
50
  sd_folder_path = "runwayml/stable-diffusion-v1-5"
51
+ exp_path = ""
52
 
53
  if unet_pretraining_type == "freeze":
54
  pass
 
73
  else:
74
  # exp_path = unet_pretraining_type + "_" + "diffusion_pytorch_model.safetensors"
75
  # state_dict = load_file(exp_path)
76
+ state_dict = load_file(
77
+ unet_pretraining_type + "_" + "diffusion_pytorch_model.safetensors"
78
+ )
79
  print(pipe.unet.load_state_dict(state_dict, strict=False))
80
 
81
 
 
101
 
102
  return pipe
103
 
104
+
105
  def _predict_using_default_params():
106
 
 
107
  # Defining the default parameters
108
+ unet_pretraining_type = "full"
109
+ input_text = "No acute cardiopulmonary abnormality."
110
  guidance_scale = 4
111
  num_inference_steps = 75
112
+ device = "0"
113
+ OUTPUT_DIR = "OUTPUT"
114
 
115
+ BARPLOT_TITLE = "Tunable Parameters for {} Fine-Tuning".format(
116
+ unet_pretraining_type
117
+ )
118
  NUM_TUNABLE_PARAMS = {
119
  "full": 86,
120
  "attention": 26.7,
 
156
 
157
  print(df)
158
 
159
+ df = df[
160
+ df["Fine-Tuning Strategy"].isin(["full", unet_pretraining_type])
161
+ ].reset_index(drop=True)
162
 
163
  bar_plot = gr.BarPlot(
164
  value=df,
 
183
  OUTPUT_DIR="OUTPUT",
184
  ):
185
 
186
+ """
187
+ Generates a Chest X-ray using the selected PEFT Type, input text prompt, guidance scale, and number of inference steps
188
+
189
+ Parameters:
190
+ unet_pretraining_type (str): The type of PEFT to use for generating the X-ray
191
+ input_text (str): The text prompt to use for generating the X-ray
192
+ guidance_scale (int): The guidance scale to use for generating the X-ray
193
+ num_inference_steps (int): The number of inference steps to use for generating the X-ray
194
+ device (str): The CUDA device to use for generating the X-ray
195
+ OUTPUT_DIR (str): The output directory to save the generated X-ray
196
+
197
+ Returns:
198
+ result_pil_image (PIL.Image): The generated X-ray image
199
+ bar_plot (gr.BarPlot): The number of tunable parameters for the selected PEFT Type
200
+ """
201
+
202
+ # Run the _predict_using_default_params() function to generate a defualt X-ray output
203
+ # result_pil_image, bar_plot = _predict_using_default_params()
204
+
205
  try:
206
+ BARPLOT_TITLE = "Tunable Parameters for {} Fine-Tuning".format(
207
+ unet_pretraining_type
208
+ )
209
  NUM_TUNABLE_PARAMS = {
210
  "full": 86,
211
  "attention": 26.7,
 
218
  }
219
 
220
  cuda_device = f"cuda:{device}" if torch.cuda.is_available() else "cpu"
221
+
222
  print("Loading Pipeline for {} Fine-Tuning".format(unet_pretraining_type))
223
  sd_pipeline = loadSDModel(
224
  unet_pretraining_type=unet_pretraining_type,
 
236
  )
237
 
238
  result_pil_image = result_image["images"][0]
 
239
 
240
  # Create a Bar Plot displaying the number of tunable parameters for the selected PEFT Type
241
  df = pd.DataFrame(
 
247
 
248
  print(df)
249
 
250
+ df = df[
251
+ df["Fine-Tuning Strategy"].isin(["full", unet_pretraining_type])
252
+ ].reset_index(drop=True)
253
 
254
  bar_plot = gr.BarPlot(
255
  value=df,
 
289
  label="PEFT Type",
290
  ),
291
  gr.Dropdown(
292
+ EXAMPLE_TEXT_PROMPTS,
293
+ label="Input Text",
294
+ value=EXAMPLE_TEXT_PROMPTS[0],
295
  ),
296
  gr.Slider(
297
  minimum=1,
298
  maximum=10,
299
  value=4,
300
  step=1,
 
301
  label="Guidance Scale",
302
  ),
303
  gr.Slider(
 
305
  maximum=100,
306
  value=75,
307
  step=1,
 
308
  label="Num Inference Steps",
309
  ),
310
  ],