nick-leland commited on
Commit
36a356a
·
1 Parent(s): b09ff92

Updated the app to now provide a 'fixed' image

Browse files
Files changed (1) hide show
  1. app.py +32 -22
app.py CHANGED
@@ -276,8 +276,8 @@ learn_fresh = load_learner('model_fresh.pkl')
276
 
277
  # Loads the YOLO Model
278
  model_bulge = YOLO("bulge_yolo_model.pt")
279
- modelv8x = YOLO("yolov8x.pt")
280
- modelv8n = YOLO("yolov8n.pt")
281
 
282
  def predict_image(img, model, conf_threshold, iou_threshold):
283
  """Predicts objects in an image using a YOLOv8 model with adjustable confidence and IOU thresholds."""
@@ -300,7 +300,7 @@ def transform_image(image, func_choice, randomization_check, radius, center_x, c
300
  I = np.asarray(Image.open(image))
301
 
302
  # Downsample large images
303
- max_size = 1024 # Increased from 512 to allow for more detail
304
  if max(I.shape[:2]) > max_size:
305
  scale = max_size / max(I.shape[:2])
306
  new_size = (int(I.shape[1] * scale), int(I.shape[0] * scale))
@@ -352,6 +352,7 @@ def transform_image(image, func_choice, randomization_check, radius, center_x, c
352
  func = bulge
353
  edge_smoothness = 0
354
  center_smoothness = 0
 
355
  elif func_choice == "Volcano":
356
  func = bulge
357
  edge_smoothness = 0
@@ -388,11 +389,14 @@ def transform_image(image, func_choice, randomization_check, radius, center_x, c
388
  inv_gx, inv_gy = -gx, -gy
389
  x_inv = x + inv_gx
390
  y_inv = y + inv_gy
 
391
  x_inv = np.clip(x_inv, 0, cols - 1)
392
  y_inv = np.clip(y_inv, 0, rows - 1)
393
 
394
  inverse_transformed = cv2.remap(I, x_inv, y_inv, cv2.INTER_LINEAR)
395
 
 
 
396
  print(f"Transformed image shape: {transformed.shape}")
397
  print(f"Inverse transformed image shape: {inverse_transformed.shape}")
398
 
@@ -407,6 +411,8 @@ def transform_image(image, func_choice, randomization_check, radius, center_x, c
407
  original_size = np.asarray(Image.open(image)).shape[:2][::-1]
408
  transformed = cv2.resize(transformed, original_size, interpolation=cv2.INTER_LINEAR)
409
  inverse_transformed = cv2.resize(inverse_transformed, original_size, interpolation=cv2.INTER_LINEAR)
 
 
410
  vector_field = cv2.resize(vector_field, original_size, interpolation=cv2.INTER_LINEAR)
411
  inverted_vector_field = cv2.resize(inverted_vector_field, original_size, interpolation=cv2.INTER_LINEAR)
412
 
@@ -420,30 +426,33 @@ def transform_image(image, func_choice, randomization_check, radius, center_x, c
420
 
421
  result = Image.fromarray(transformed)
422
 
423
- categories = ['Distorted', 'Maze']
424
 
425
- def clean_output(result_values):
426
- pred, idx, probs = result_values
427
- return dict(zip(categories, map(float, probs)))
428
 
429
- result_bias = learn_bias.predict(result)
430
- result_fresh = learn_fresh.predict(result)
431
- result_bias_final = clean_output(result_bias)
432
- result_fresh_final = clean_output(result_fresh)
 
433
 
434
  result_localization = model_bulge.predict(transformed, save=True)
435
  print(result_localization, "bulge")
436
- result_localization1 = modelv8n.predict(transformed, save=True)
437
- print(result_localization1, "modelv8n")
438
- result_localization2 = modelv8x.predict(transformed, save=True)
439
- print(result_localization2, "modelv8x")
440
 
441
 
442
  YOLO_image = predict_image(transformed, model_bulge, 0.5, 0.5)
443
- YOLO_image1 = predict_image(transformed, modelv8n, 0.5, 0.5)
444
- YOLO_image2 = predict_image(transformed, modelv8x, 0.5, 0.5)
445
 
446
- return transformed, YOLO_image, YOLO_image1, YOLO_image2, result_bias_final, result_fresh_final, vector_field, inverse_transformed, inverted_vector_field
 
 
447
 
448
 
449
  demo = gr.Interface(
@@ -469,13 +478,14 @@ demo = gr.Interface(
469
  outputs=[
470
  gr.Image(label="Transformed Image"),
471
  gr.Image(label="bulge_model Model Classification"),
472
- gr.Image(label="yolov8n Model Classification"),
473
- gr.Image(label="yolov8x Model Classification"),
474
- gr.Label(),
475
- gr.Label(),
476
  gr.Image(label="Gradient Vector Field"),
477
  gr.Image(label="Inverse Gradient"),
478
  gr.Image(label="Inverted Vector Field"),
 
479
  ],
480
  title="Image Transformation Demo!",
481
  article="If you like this demo, please star the github repository for the project! Located [here!](https://github.com/nick-leland/DistortionML)",
 
276
 
277
  # Loads the YOLO Model
278
  model_bulge = YOLO("bulge_yolo_model.pt")
279
+ # modelv8x = YOLO("yolov8x.pt")
280
+ # modelv8n = YOLO("yolov8n.pt")
281
 
282
  def predict_image(img, model, conf_threshold, iou_threshold):
283
  """Predicts objects in an image using a YOLOv8 model with adjustable confidence and IOU thresholds."""
 
300
  I = np.asarray(Image.open(image))
301
 
302
  # Downsample large images
303
+ max_size = 640 # Increased from 512 to allow for more detail, decreased from 1024 to match YOLO model training.
304
  if max(I.shape[:2]) > max_size:
305
  scale = max_size / max(I.shape[:2])
306
  new_size = (int(I.shape[1] * scale), int(I.shape[0] * scale))
 
352
  func = bulge
353
  edge_smoothness = 0
354
  center_smoothness = 0
355
+
356
  elif func_choice == "Volcano":
357
  func = bulge
358
  edge_smoothness = 0
 
389
  inv_gx, inv_gy = -gx, -gy
390
  x_inv = x + inv_gx
391
  y_inv = y + inv_gy
392
+
393
  x_inv = np.clip(x_inv, 0, cols - 1)
394
  y_inv = np.clip(y_inv, 0, rows - 1)
395
 
396
  inverse_transformed = cv2.remap(I, x_inv, y_inv, cv2.INTER_LINEAR)
397
 
398
+ applied_transformed = cv2.remap(transformed, x_inv, y_inv, cv2.INTER_LINEAR)
399
+
400
  print(f"Transformed image shape: {transformed.shape}")
401
  print(f"Inverse transformed image shape: {inverse_transformed.shape}")
402
 
 
411
  original_size = np.asarray(Image.open(image)).shape[:2][::-1]
412
  transformed = cv2.resize(transformed, original_size, interpolation=cv2.INTER_LINEAR)
413
  inverse_transformed = cv2.resize(inverse_transformed, original_size, interpolation=cv2.INTER_LINEAR)
414
+ applied_transformed = cv2.resize(applied_transformed, original_size, interpolation=cv2.INTER_LINEAR)
415
+
416
  vector_field = cv2.resize(vector_field, original_size, interpolation=cv2.INTER_LINEAR)
417
  inverted_vector_field = cv2.resize(inverted_vector_field, original_size, interpolation=cv2.INTER_LINEAR)
418
 
 
426
 
427
  result = Image.fromarray(transformed)
428
 
429
+ # categories = ['Distorted', 'Maze']
430
 
431
+ # def clean_output(result_values):
432
+ # pred, idx, probs = result_values
433
+ # return dict(zip(categories, map(float, probs)))
434
 
435
+ # Outdated, changing to a classification basis
436
+ # result_bias = learn_bias.predict(result)
437
+ # result_fresh = learn_fresh.predict(result)
438
+ # result_bias_final = clean_output(result_bias)
439
+ # result_fresh_final = clean_output(result_fresh)
440
 
441
  result_localization = model_bulge.predict(transformed, save=True)
442
  print(result_localization, "bulge")
443
+ # result_localization1 = modelv8n.predict(transformed, save=True)
444
+ # print(result_localization1, "modelv8n")
445
+ # result_localization2 = modelv8x.predict(transformed, save=True)
446
+ # print(result_localization2, "modelv8x")
447
 
448
 
449
  YOLO_image = predict_image(transformed, model_bulge, 0.5, 0.5)
450
+ # YOLO_image1 = predict_image(transformed, modelv8n, 0.5, 0.5)
451
+ # YOLO_image2 = predict_image(transformed, modelv8x, 0.5, 0.5)
452
 
453
+ # return transformed, YOLO_image, YOLO_image1, YOLO_image2, result_bias_final, result_fresh_final, vector_field, inverse_transformed, inverted_vector_field
454
+ # return transformed, YOLO_image, result_bias_final, result_fresh_final, vector_field, inverse_transformed, inverted_vector_field
455
+ return transformed, YOLO_image, vector_field, inverse_transformed, inverted_vector_field, applied_transformed
456
 
457
 
458
  demo = gr.Interface(
 
478
  outputs=[
479
  gr.Image(label="Transformed Image"),
480
  gr.Image(label="bulge_model Model Classification"),
481
+ # gr.Image(label="yolov8n Model Classification"),
482
+ # gr.Image(label="yolov8x Model Classification"),
483
+ # gr.Label(),
484
+ # gr.Label(),
485
  gr.Image(label="Gradient Vector Field"),
486
  gr.Image(label="Inverse Gradient"),
487
  gr.Image(label="Inverted Vector Field"),
488
+ gr.Image(label="Fixed Image")
489
  ],
490
  title="Image Transformation Demo!",
491
  article="If you like this demo, please star the github repository for the project! Located [here!](https://github.com/nick-leland/DistortionML)",