Migrated from GitHub
Browse files- .gitattributes +24 -0
- data/LICENSE +21 -0
- data/data/expansion_factors.csv +3 -0
- data/data/registration/post_expansion/example1.tif +3 -0
- data/data/registration/post_expansion/example2.tif +3 -0
- data/data/registration/pre_expansion/example1.tif +3 -0
- data/data/registration/pre_expansion/example2.tif +3 -0
- data/data/segmentation_data.csv +3 -0
- data/data/trailmap_volumes/seg-example1.tif/seg-example1-1.tif +3 -0
- data/data/trailmap_volumes/seg-example1.tif/seg-example1-10.tif +3 -0
- data/data/trailmap_volumes/seg-example1.tif/seg-example1-2.tif +3 -0
- data/data/trailmap_volumes/seg-example1.tif/seg-example1-3.tif +3 -0
- data/data/trailmap_volumes/seg-example1.tif/seg-example1-4.tif +3 -0
- data/data/trailmap_volumes/seg-example1.tif/seg-example1-5.tif +3 -0
- data/data/trailmap_volumes/seg-example1.tif/seg-example1-6.tif +3 -0
- data/data/trailmap_volumes/seg-example1.tif/seg-example1-7.tif +3 -0
- data/data/trailmap_volumes/seg-example1.tif/seg-example1-8.tif +3 -0
- data/data/trailmap_volumes/seg-example1.tif/seg-example1-9.tif +3 -0
- data/data/trailmap_volumes/seg-example2.tif/seg-example2-1.tif +3 -0
- data/data/trailmap_volumes/seg-example2.tif/seg-example2-10.tif +3 -0
- data/data/trailmap_volumes/seg-example2.tif/seg-example2-2.tif +3 -0
- data/data/trailmap_volumes/seg-example2.tif/seg-example2-3.tif +3 -0
- data/data/trailmap_volumes/seg-example2.tif/seg-example2-4.tif +3 -0
- data/data/trailmap_volumes/seg-example2.tif/seg-example2-5.tif +3 -0
- data/data/trailmap_volumes/seg-example2.tif/seg-example2-6.tif +3 -0
- data/data/trailmap_volumes/seg-example2.tif/seg-example2-7.tif +3 -0
- data/data/trailmap_volumes/seg-example2.tif/seg-example2-8.tif +3 -0
- data/data/trailmap_volumes/seg-example2.tif/seg-example2-9.tif +3 -0
- data/process_batch.py +43 -0
- data/processing/__init__.py +2 -0
- data/processing/measure.py +32 -0
- data/processing/process.py +31 -0
- data/register_batch.py +34 -0
- data/registration/__init__.py +1 -0
- data/registration/rigid_registration.py +56 -0
.gitattributes
CHANGED
@@ -57,3 +57,27 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
60 |
+
data/data/registration/post_expansion/example1.tif filter=lfs diff=lfs merge=lfs -text
|
61 |
+
data/data/registration/post_expansion/example2.tif filter=lfs diff=lfs merge=lfs -text
|
62 |
+
data/data/registration/pre_expansion/example1.tif filter=lfs diff=lfs merge=lfs -text
|
63 |
+
data/data/registration/pre_expansion/example2.tif filter=lfs diff=lfs merge=lfs -text
|
64 |
+
data/data/trailmap_volumes/seg-example1.tif/seg-example1-1.tif filter=lfs diff=lfs merge=lfs -text
|
65 |
+
data/data/trailmap_volumes/seg-example1.tif/seg-example1-10.tif filter=lfs diff=lfs merge=lfs -text
|
66 |
+
data/data/trailmap_volumes/seg-example1.tif/seg-example1-2.tif filter=lfs diff=lfs merge=lfs -text
|
67 |
+
data/data/trailmap_volumes/seg-example1.tif/seg-example1-3.tif filter=lfs diff=lfs merge=lfs -text
|
68 |
+
data/data/trailmap_volumes/seg-example1.tif/seg-example1-4.tif filter=lfs diff=lfs merge=lfs -text
|
69 |
+
data/data/trailmap_volumes/seg-example1.tif/seg-example1-5.tif filter=lfs diff=lfs merge=lfs -text
|
70 |
+
data/data/trailmap_volumes/seg-example1.tif/seg-example1-6.tif filter=lfs diff=lfs merge=lfs -text
|
71 |
+
data/data/trailmap_volumes/seg-example1.tif/seg-example1-7.tif filter=lfs diff=lfs merge=lfs -text
|
72 |
+
data/data/trailmap_volumes/seg-example1.tif/seg-example1-8.tif filter=lfs diff=lfs merge=lfs -text
|
73 |
+
data/data/trailmap_volumes/seg-example1.tif/seg-example1-9.tif filter=lfs diff=lfs merge=lfs -text
|
74 |
+
data/data/trailmap_volumes/seg-example2.tif/seg-example2-1.tif filter=lfs diff=lfs merge=lfs -text
|
75 |
+
data/data/trailmap_volumes/seg-example2.tif/seg-example2-10.tif filter=lfs diff=lfs merge=lfs -text
|
76 |
+
data/data/trailmap_volumes/seg-example2.tif/seg-example2-2.tif filter=lfs diff=lfs merge=lfs -text
|
77 |
+
data/data/trailmap_volumes/seg-example2.tif/seg-example2-3.tif filter=lfs diff=lfs merge=lfs -text
|
78 |
+
data/data/trailmap_volumes/seg-example2.tif/seg-example2-4.tif filter=lfs diff=lfs merge=lfs -text
|
79 |
+
data/data/trailmap_volumes/seg-example2.tif/seg-example2-5.tif filter=lfs diff=lfs merge=lfs -text
|
80 |
+
data/data/trailmap_volumes/seg-example2.tif/seg-example2-6.tif filter=lfs diff=lfs merge=lfs -text
|
81 |
+
data/data/trailmap_volumes/seg-example2.tif/seg-example2-7.tif filter=lfs diff=lfs merge=lfs -text
|
82 |
+
data/data/trailmap_volumes/seg-example2.tif/seg-example2-8.tif filter=lfs diff=lfs merge=lfs -text
|
83 |
+
data/data/trailmap_volumes/seg-example2.tif/seg-example2-9.tif filter=lfs diff=lfs merge=lfs -text
|
data/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2021 Kamran Ahmed
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
data/data/expansion_factors.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
id,expansion_factor
|
2 |
+
example1,4.172762498448865
|
3 |
+
example2,4.234239813334196
|
data/data/registration/post_expansion/example1.tif
ADDED
|
Git LFS Details
|
data/data/registration/post_expansion/example2.tif
ADDED
|
Git LFS Details
|
data/data/registration/pre_expansion/example1.tif
ADDED
|
Git LFS Details
|
data/data/registration/pre_expansion/example2.tif
ADDED
|
Git LFS Details
|
data/data/segmentation_data.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
id,image_volume (um3),axon_volume (um3),axon_length (um),avg_axon_radius (um)
|
2 |
+
seg-example1.tif,2192.190448787476,40.14157371180198,676.9529106394549,0.10804224759340286
|
3 |
+
seg-example2.tif,2098.084266317545,97.1325822783973,1589.9739226329862,0.11063189804553986
|
data/data/trailmap_volumes/seg-example1.tif/seg-example1-1.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example1.tif/seg-example1-10.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example1.tif/seg-example1-2.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example1.tif/seg-example1-3.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example1.tif/seg-example1-4.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example1.tif/seg-example1-5.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example1.tif/seg-example1-6.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example1.tif/seg-example1-7.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example1.tif/seg-example1-8.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example1.tif/seg-example1-9.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example2.tif/seg-example2-1.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example2.tif/seg-example2-10.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example2.tif/seg-example2-2.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example2.tif/seg-example2-3.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example2.tif/seg-example2-4.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example2.tif/seg-example2-5.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example2.tif/seg-example2-6.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example2.tif/seg-example2-7.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example2.tif/seg-example2-8.tif
ADDED
|
Git LFS Details
|
data/data/trailmap_volumes/seg-example2.tif/seg-example2-9.tif
ADDED
|
Git LFS Details
|
data/process_batch.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pandas as pd
|
3 |
+
import sys
|
4 |
+
from processing import process_volume, measure
|
5 |
+
|
6 |
+
if __name__ == "__main__":
|
7 |
+
base_path = os.path.abspath(__file__ + "/..")
|
8 |
+
|
9 |
+
expansion_factors = pd.read_csv(base_path + "/data/expansion_factors.csv")
|
10 |
+
expansion_factors_dict = {}
|
11 |
+
|
12 |
+
input_batch = sys.argv[1:]
|
13 |
+
|
14 |
+
for input_folder in input_batch:
|
15 |
+
if not os.path.isdir(input_folder):
|
16 |
+
raise Exception(input_folder + " is not a directory. Inputs must be a folder of files.")
|
17 |
+
|
18 |
+
row = expansion_factors[expansion_factors["id"] == os.path.basename(input_folder)[4:-4]]
|
19 |
+
expansion_factor = row["expansion_factor"].values[0]
|
20 |
+
|
21 |
+
if not expansion_factor:
|
22 |
+
raise Exception(input_folder + " does not have an expansion factor.")
|
23 |
+
|
24 |
+
expansion_factors_dict[input_folder] = expansion_factor
|
25 |
+
|
26 |
+
assert len(input_batch) == len(expansion_factors_dict), "Collisions when creating expansion_factor dictionary (i.e. one to many relationship)."
|
27 |
+
|
28 |
+
print(f"Number of volumes: {len(expansion_factors_dict)}")
|
29 |
+
|
30 |
+
save_file = base_path + "/data/segmentation_data.csv"
|
31 |
+
|
32 |
+
with open(save_file, "w") as f:
|
33 |
+
f.write("id,image_volume (um3),axon_volume (um3),axon_length (um),avg_axon_radius (um)\n")
|
34 |
+
|
35 |
+
for input_folder, expansion_factor in expansion_factors_dict.items():
|
36 |
+
name = os.path.basename(input_folder)
|
37 |
+
print(f"Processing {name}")
|
38 |
+
|
39 |
+
vol = process_volume(input_folder)
|
40 |
+
data = measure(vol, expansion_factor)
|
41 |
+
|
42 |
+
with open(save_file, "a") as f:
|
43 |
+
f.write(f"{name},{data[0]},{data[1]},{data[2]},{data[3]}\n")
|
data/processing/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from processing.measure import measure
|
2 |
+
from processing.process import process_volume
|
data/processing/measure.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from edt import edt3d
|
3 |
+
from skan import Skeleton, summarize
|
4 |
+
from skimage.morphology import skeletonize_3d
|
5 |
+
|
6 |
+
|
7 |
+
"""
|
8 |
+
Lateral and axial lightsheet resolution (microns).
|
9 |
+
"""
|
10 |
+
x_res = 0.091000116097948115
|
11 |
+
z_res = 0.52175056847846326
|
12 |
+
|
13 |
+
|
14 |
+
def measure(vol, expansion_factor):
|
15 |
+
voxel_size = x_res * x_res * z_res
|
16 |
+
sampling = (z_res, x_res, x_res)
|
17 |
+
|
18 |
+
normalized_voxel_size = voxel_size / (expansion_factor ** 3)
|
19 |
+
normalized_sampling = tuple(dim / expansion_factor for dim in sampling)
|
20 |
+
|
21 |
+
total_img_volume = vol.size * normalized_voxel_size
|
22 |
+
total_axon_volume = np.count_nonzero(vol) * normalized_voxel_size
|
23 |
+
|
24 |
+
skeleton = skeletonize_3d(vol)
|
25 |
+
branch_data = summarize(Skeleton(skeleton, spacing=normalized_sampling))
|
26 |
+
total_axon_length = branch_data["branch-distance"].sum()
|
27 |
+
|
28 |
+
# significantly faster than scipy.ndimage.distance_transform_edt
|
29 |
+
distance_transform = edt3d(vol, anisotropy=normalized_sampling, black_border=False, order="C", parallel=10)
|
30 |
+
avg_axon_radius = np.mean(distance_transform[skeleton.astype(bool)])
|
31 |
+
|
32 |
+
return [total_img_volume, total_axon_volume, total_axon_length, avg_axon_radius]
|
data/processing/process.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import os
|
4 |
+
from skimage.morphology import remove_small_objects
|
5 |
+
|
6 |
+
|
7 |
+
def read_folder_volume(path):
|
8 |
+
tiffs = [os.path.join(path, f) for f in os.listdir(path) if f[0] != '.']
|
9 |
+
fnames = sorted(tiffs)
|
10 |
+
|
11 |
+
vol = []
|
12 |
+
|
13 |
+
for i, fname in enumerate(fnames):
|
14 |
+
img = cv2.imread(fname, cv2.COLOR_BGR2GRAY)
|
15 |
+
vol.append(img)
|
16 |
+
|
17 |
+
vol = np.array(vol)
|
18 |
+
|
19 |
+
return vol
|
20 |
+
|
21 |
+
|
22 |
+
def binarize(array, threshold_value):
|
23 |
+
return (array > threshold_value)
|
24 |
+
|
25 |
+
|
26 |
+
def process_volume(path):
|
27 |
+
vol = read_folder_volume(path)
|
28 |
+
threshold = binarize(vol, 0.7)
|
29 |
+
filtered = remove_small_objects(threshold, min_size=256, connectivity=3)
|
30 |
+
|
31 |
+
return filtered
|
data/register_batch.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import os
|
3 |
+
from registration import rigid_registration
|
4 |
+
|
5 |
+
|
6 |
+
def get_images(path):
|
7 |
+
images = [os.path.join(path, f) for f in os.listdir(path) if f[0] != "."]
|
8 |
+
return sorted(images)
|
9 |
+
|
10 |
+
|
11 |
+
if __name__ == "__main__":
|
12 |
+
base_path = os.path.abspath(__file__ + "/..")
|
13 |
+
|
14 |
+
pre_folder = get_images(base_path + "/data/registration/pre_expansion")
|
15 |
+
post_folder = get_images(base_path + "/data/registration/post_expansion")
|
16 |
+
|
17 |
+
assert len(pre_folder) == len(post_folder), "Unequal number of images. Pre- and post-expansion directories must have a 1-to-1 matching of files."
|
18 |
+
|
19 |
+
save_file = base_path + "/data/expansion_factors.csv"
|
20 |
+
|
21 |
+
with open(save_file, "w") as f:
|
22 |
+
f.write("id,expansion_factor\n")
|
23 |
+
|
24 |
+
for i in range(len(pre_folder)):
|
25 |
+
name = os.path.basename(pre_folder[i])
|
26 |
+
name, _ = os.path.splitext(name)
|
27 |
+
|
28 |
+
pre = cv2.imread(pre_folder[i], cv2.COLOR_BGR2GRAY)
|
29 |
+
post = cv2.imread(post_folder[i], cv2.COLOR_BGR2GRAY)
|
30 |
+
|
31 |
+
expansion_factor = rigid_registration(pre, post)
|
32 |
+
|
33 |
+
with open(save_file, "a") as f:
|
34 |
+
f.write(f"{name},{expansion_factor}\n")
|
data/registration/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from registration.rigid_registration import rigid_registration
|
data/registration/rigid_registration.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import cv2
|
3 |
+
|
4 |
+
|
5 |
+
def rescale_and_convert(image):
|
6 |
+
lower_bound, upper_bound = np.percentile(image, (0, 98))
|
7 |
+
|
8 |
+
image = np.clip(image, lower_bound, upper_bound)
|
9 |
+
image = (image - lower_bound) / (upper_bound - lower_bound)
|
10 |
+
|
11 |
+
return np.asarray(image * (2 ** 8 - 1), dtype=np.uint8)
|
12 |
+
|
13 |
+
|
14 |
+
def rigid_registration(pre, post, h_flip=False):
|
15 |
+
if not h_flip:
|
16 |
+
if pre.dtype != np.uint8:
|
17 |
+
pre = rescale_and_convert(pre)
|
18 |
+
if post.dtype != np.uint8:
|
19 |
+
post = rescale_and_convert(post)
|
20 |
+
|
21 |
+
sift = cv2.xfeatures2d.SIFT_create(sigma=1.6)
|
22 |
+
kp1, des1 = sift.detectAndCompute(post, None)
|
23 |
+
kp2, des2 = sift.detectAndCompute(pre, None)
|
24 |
+
|
25 |
+
bf = cv2.BFMatcher()
|
26 |
+
matches = bf.knnMatch(des1, des2, k=2)
|
27 |
+
|
28 |
+
good = []
|
29 |
+
for m, n in matches:
|
30 |
+
if m.distance < 0.7 * n.distance:
|
31 |
+
good.append(m)
|
32 |
+
|
33 |
+
MIN_MATCH_COUNT = 10
|
34 |
+
|
35 |
+
if len(good) > MIN_MATCH_COUNT:
|
36 |
+
print("\x1b[32mSuccess! Enough matches found: %d>%d\x1b[0m."% (len(good), MIN_MATCH_COUNT), "Horizontal flip:", h_flip)
|
37 |
+
|
38 |
+
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
|
39 |
+
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
|
40 |
+
|
41 |
+
M, _ = cv2.estimateAffinePartial2D(src_pts, dst_pts, method=cv2.RANSAC)
|
42 |
+
|
43 |
+
cos_scale = M[0, 0]
|
44 |
+
sin_scale = M[0, 1]
|
45 |
+
|
46 |
+
expansion_factor = 1 / ((cos_scale ** 2 + sin_scale ** 2) ** 0.5)
|
47 |
+
|
48 |
+
elif not h_flip:
|
49 |
+
print("\x1b[31mFailure! Not enough matches are found: %d<%d\x1b[0m."% (len(good), MIN_MATCH_COUNT), "Attempting horizontal flip...")
|
50 |
+
return rigid_registration(pre, post[:, ::-1], h_flip=True)
|
51 |
+
|
52 |
+
else:
|
53 |
+
print("\x1b[31mFailure! Not enough matches are found: %d<%d\x1b[0m."% (len(good), MIN_MATCH_COUNT), "Horizontal flip:", h_flip)
|
54 |
+
expansion_factor = None
|
55 |
+
|
56 |
+
return expansion_factor
|