Spaces:
Running
Running
update slider names, add md
Browse files- .gitattributes +1 -0
- app.py +8 -2
- description.md +40 -0
- src/rstor/analyzis/interactive/crop.py +2 -2
- src/rstor/analyzis/interactive/degradation.py +1 -1
- src/rstor/analyzis/interactive/pipelines.py +9 -3
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*png* filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
@@ -47,7 +47,7 @@ def main(argv, low_resource_demo=True):
|
|
47 |
if args.keyboard:
|
48 |
image_control = KeyboardControl(0, [0, len(img_list)-1], keydown="3", keyup="9", modulo=True)
|
49 |
else:
|
50 |
-
image_control = (0, [0, len(img_list)-1])
|
51 |
interactive(image_index=image_control)(image_selector)
|
52 |
plug_crop_selector(num_pad=args.keyboard, low_resources=low_resource_demo)
|
53 |
if not low_resource_demo:
|
@@ -55,10 +55,16 @@ def main(argv, low_resource_demo=True):
|
|
55 |
if args.backend != "gradio":
|
56 |
plug_morph_canvas()
|
57 |
model_dict = get_default_models(args.experiments, Path(args.models_storage), keyboard_control=args.keyboard)
|
|
|
|
|
|
|
58 |
interactive_pipeline(
|
59 |
gui=backend,
|
60 |
cache=True,
|
61 |
-
safe_input_buffer_deepcopy=False
|
|
|
|
|
|
|
62 |
)(natural_inference_pipeline)(
|
63 |
img_list,
|
64 |
model_dict
|
|
|
47 |
if args.keyboard:
|
48 |
image_control = KeyboardControl(0, [0, len(img_list)-1], keydown="3", keyup="9", modulo=True)
|
49 |
else:
|
50 |
+
image_control = (0, [0, len(img_list)-1], "input image selector")
|
51 |
interactive(image_index=image_control)(image_selector)
|
52 |
plug_crop_selector(num_pad=args.keyboard, low_resources=low_resource_demo)
|
53 |
if not low_resource_demo:
|
|
|
55 |
if args.backend != "gradio":
|
56 |
plug_morph_canvas()
|
57 |
model_dict = get_default_models(args.experiments, Path(args.models_storage), keyboard_control=args.keyboard)
|
58 |
+
markdown_description = "# 🔍 Blind image deblurring - READ MORE HERE \n"
|
59 |
+
markdown_description += open("description.md", 'r').read()
|
60 |
+
|
61 |
interactive_pipeline(
|
62 |
gui=backend,
|
63 |
cache=True,
|
64 |
+
safe_input_buffer_deepcopy=False,
|
65 |
+
sliders_layout="smart",
|
66 |
+
sliders_per_row_layout=3,
|
67 |
+
markdown_description=markdown_description,
|
68 |
)(natural_inference_pipeline)(
|
69 |
img_list,
|
70 |
model_dict
|
description.md
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Blind deblurring from synthetic data
|
2 |
+
MVA project 2024 on [image restoration](https://delires.wp.imt.fr/)
|
3 |
+
|
4 |
+
- Jamy Lafenetre
|
5 |
+
- Balthazar Neveu
|
6 |
+
|
7 |
+
------
|
8 |
+
|
9 |
+
In the paper [Synthetic images as a regularity prior for image
|
10 |
+
restoration neural networks](https://hal.science/hal-03186499/file/papier_SSVM%20%281%29.pdf) by
|
11 |
+
*Raphaël Achddou, Yann Gousseau, Saïd Ladjal*, it was shown that a deep neural network can be trained for denoising solely from synthetic dead leave images and does perform relatively well on natural images.
|
12 |
+
|
13 |
+
We propose to explore several tracks:
|
14 |
+
- Use a [NAFNET](https://github.com/megvii-research/NAFNet) architecture
|
15 |
+
- Extend deadleaves with extra primitives (lines, diamond shapes, colored gradients)
|
16 |
+
- Try to see if the generalization property to natural images observed in denoising holds for deblurring.
|
17 |
+
|
18 |
+
|
19 |
+

|
20 |
+
|
21 |
+
|
22 |
+
We first validated that NAFNet trained on deadleaves performed well on the blind denoising task. Below you can see that it also performs correctly on natural images, although the performances are not as good as a network purely trained on natural images.
|
23 |
+
|
24 |
+
| Qualitative results at SNR in = 20dB | Quantitative results
|
25 |
+
| :---: | :---: |
|
26 |
+
|  | 
|
28 |
+
|
29 |
+
|
30 |
+
Finally, when applying the deadleaves training to the blind deblurring problem, one of the advantage we have notticed is that the network always tries to deblur even when the level of blur is high. On the contrary, when trained on natural images, the NAFNEt does not work so well when the blur level is too big.
|
31 |
+
|Blind deblurring results|
|
32 |
+
|:----:|
|
33 |
+
|  |
|
34 |
+
|Deblurring result for different amount of blur, using Nafnet trained on Div2K or deadleaves. From left to right column: ”small”, ”mild” and ”big” blur kernels to degrade the input. **Top row**: input image. **Middle row**: output of NafNet trained on deadleaves. **Bottom row**: output of NafNet trained on Div2K.|
|
35 |
+
| |
|
36 |
+
|
37 |
+
**Conclusion** :
|
38 |
+
- Using extra primitives to pure deadleaves seems like a good idea but did not bring as much as we'd expected. A rework by adding anisotropy and extra geometric shapes could lead to significantly better results.
|
39 |
+
- Training on deadleaves images brings a lot of stability in trainings (as the image distribution is basically always the same, providing a fair amount of learning signal) and can be seen as a good pretext task before to the real dataset.
|
40 |
+
|
src/rstor/analyzis/interactive/crop.py
CHANGED
@@ -51,8 +51,8 @@ def plug_crop_selector(num_pad: bool = False, low_resources: bool = False):
|
|
51 |
else:
|
52 |
size_control = (9., [6., 13., 0.3], "crop size", ["+", "-"])
|
53 |
interactive(
|
54 |
-
center_x=(0.5, [0., 1.], "
|
55 |
-
center_y=(0.5, [0., 1.], "
|
56 |
size=size_control
|
57 |
)(crop_selector)
|
58 |
|
|
|
51 |
else:
|
52 |
size_control = (9., [6., 13., 0.3], "crop size", ["+", "-"])
|
53 |
interactive(
|
54 |
+
center_x=(0.5, [0., 1.], "crop horizontally", ["4" if num_pad else "left", "6" if num_pad else "right"]),
|
55 |
+
center_y=(0.5, [0., 1.], "crop vertically", ["8" if num_pad else "up", "2" if num_pad else "down"]),
|
56 |
size=size_control
|
57 |
)(crop_selector)
|
58 |
|
src/rstor/analyzis/interactive/degradation.py
CHANGED
@@ -50,7 +50,7 @@ def get_blur_kernel_box(ksize=3):
|
|
50 |
|
51 |
|
52 |
@interactive(
|
53 |
-
blur_index=(0, [
|
54 |
)
|
55 |
def get_blur_kernel(blur_index: int = -1, global_params={}):
|
56 |
if blur_index == -1:
|
|
|
50 |
|
51 |
|
52 |
@interactive(
|
53 |
+
blur_index=(0, [0, 2], "blur kernel selection")
|
54 |
)
|
55 |
def get_blur_kernel(blur_index: int = -1, global_params={}):
|
56 |
if blur_index == -1:
|
src/rstor/analyzis/interactive/pipelines.py
CHANGED
@@ -43,6 +43,11 @@ def morph_canvas(canvas=CANVAS[0], global_params={}):
|
|
43 |
global_params["__pipeline"].outputs = CANVAS_DICT[canvas]
|
44 |
return None
|
45 |
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
def natural_inference_pipeline(input_image_list: List[np.ndarray], models_dict: dict):
|
48 |
model = model_selector(models_dict)
|
@@ -51,12 +56,13 @@ def natural_inference_pipeline(input_image_list: List[np.ndarray], models_dict:
|
|
51 |
groundtruth = crop(img_clean)
|
52 |
blur_kernel = get_blur_kernel()
|
53 |
degraded = degrade_blur(groundtruth, blur_kernel)
|
54 |
-
degraded = degrade_noise(degraded)
|
55 |
-
|
|
|
56 |
restored = infer(degraded, model)
|
57 |
# configure_metrics()
|
58 |
# get_metrics_restored(restored, groundtruth)
|
59 |
# get_metrics_degraded(degraded, groundtruth)
|
60 |
# morph_canvas()
|
61 |
# return [[degraded, restored], [blur_kernel, groundtruth]]
|
62 |
-
return [degraded, restored, groundtruth,
|
|
|
43 |
global_params["__pipeline"].outputs = CANVAS_DICT[canvas]
|
44 |
return None
|
45 |
|
46 |
+
def visualize_kernel(kernel, global_params={}):
|
47 |
+
kernel_amplif = kernel.copy()
|
48 |
+
# kernel_amplif = kernel_amplif - kernel_amplif.min() / (kernel_amplif.max() - kernel_amplif.min())
|
49 |
+
kernel_amplif = (kernel_amplif * 10).clip(0, 1)
|
50 |
+
return kernel_amplif
|
51 |
|
52 |
def natural_inference_pipeline(input_image_list: List[np.ndarray], models_dict: dict):
|
53 |
model = model_selector(models_dict)
|
|
|
56 |
groundtruth = crop(img_clean)
|
57 |
blur_kernel = get_blur_kernel()
|
58 |
degraded = degrade_blur(groundtruth, blur_kernel)
|
59 |
+
# degraded = degrade_noise(degraded)
|
60 |
+
kernel_amplif = visualize_kernel(blur_kernel)
|
61 |
+
kernel_amplif = rescale_thumbnail(kernel_amplif)
|
62 |
restored = infer(degraded, model)
|
63 |
# configure_metrics()
|
64 |
# get_metrics_restored(restored, groundtruth)
|
65 |
# get_metrics_degraded(degraded, groundtruth)
|
66 |
# morph_canvas()
|
67 |
# return [[degraded, restored], [blur_kernel, groundtruth]]
|
68 |
+
return [degraded, restored, groundtruth, kernel_amplif]
|