kikuepi commited on
Commit
e73c0a8
·
verified ·
1 Parent(s): ad9b11c

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +0 -13
  2. app.py +26 -0
  3. diff.py +202 -0
  4. match.py +91 -0
  5. requirements.txt +5 -0
README.md CHANGED
@@ -1,13 +0,0 @@
1
- ---
2
- title: App
3
- emoji: 💻
4
- colorFrom: blue
5
- colorTo: pink
6
- sdk: streamlit
7
- sdk_version: 1.37.1
8
- app_file: app.py
9
- pinned: false
10
- license: other
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import numpy as np
4
+ from matplotlib import pyplot as plt
5
+ from PIL import Image
6
+ from io import BytesIO
7
+
8
+ from match import Match, Great_Match
9
+ from diff import diff
10
+
11
+ def main():
12
+ st.title("画像差分検知")
13
+ TYPE = st.sidebar.selectbox("選択", ["差分抽出", "特徴量抽出"])
14
+ image_file1 = st.sidebar.file_uploader("1枚目の画像をアップロードしてください", type=["jpg", "jpeg", "png", "PNG"])
15
+ image_file2 = st.sidebar.file_uploader("2枚目の画像をアップロードしてください", type=["jpg", "jpeg", "png", "PNG"])
16
+ if image_file1 and image_file2:
17
+ if TYPE == "差分抽出":
18
+ diff(image_file1, image_file2)
19
+ elif TYPE == "特徴量抽出":
20
+ Match(image_file1, image_file2)
21
+ else:
22
+ st.info("画像をアップロードしてください")
23
+
24
+ if __name__ == "__main__":
25
+ main()
26
+
diff.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PIL import Image
3
+ import cv2
4
+ import numpy as np
5
+
6
+ def diff(image_file1, image_file2):
7
+ TYPE = st.sidebar.selectbox("可視化手法の選択", ["矩形", "点群"])
8
+ if (TYPE == "矩形"):
9
+ rectangle_diff(image_file1, image_file2)
10
+ elif (TYPE == "点群"):
11
+ point_diff(image_file1, image_file2)
12
+
13
+ def point_diff(image_file1, image_file2):
14
+ image1 = Image.open(image_file1)
15
+ image2 = Image.open(image_file2)
16
+
17
+ col1, col2 = st.columns(2)
18
+ diff_Thresholds = st.sidebar.slider("差分の閾値処理", 10, 255, 50)
19
+ with col1:
20
+ st.image(image1, caption='1枚目の画像', use_column_width=True)
21
+
22
+ with col2:
23
+ st.image(image2, caption='2枚目の画像', use_column_width=True)
24
+
25
+ image1_cv = cv2.cvtColor(np.array(image1), cv2.COLOR_RGB2BGR)
26
+ image2_cv = cv2.cvtColor(np.array(image2), cv2.COLOR_RGB2BGR)
27
+
28
+
29
+ gray1 = cv2.cvtColor(image1_cv, cv2.COLOR_BGR2GRAY)
30
+ gray2 = cv2.cvtColor(image2_cv, cv2.COLOR_BGR2GRAY)
31
+
32
+ sift = cv2.SIFT_create()
33
+
34
+ keypoints1, descriptors1 = sift.detectAndCompute(gray1, None)
35
+ keypoints2, descriptors2 = sift.detectAndCompute(gray2, None)
36
+
37
+ FLANN_INDEX_KDTREE = 1
38
+ index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
39
+ search_params = dict(checks=50)
40
+ flann = cv2.FlannBasedMatcher(index_params, search_params)
41
+
42
+ matches = flann.knnMatch(descriptors1, descriptors2, k=2)
43
+
44
+ good_matches = []
45
+ for m, n in matches:
46
+ if m.distance < 0.8 * n.distance:
47
+ good_matches.append(m)
48
+
49
+ if len(good_matches) > 4:
50
+ src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
51
+ dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
52
+
53
+ H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
54
+
55
+ height, width, channels = image2_cv.shape
56
+ transformed_img = cv2.warpPerspective(image1_cv, H, (width, height))
57
+
58
+ transformed_gray = cv2.cvtColor(transformed_img, cv2.COLOR_BGR2GRAY)
59
+
60
+ img_diff = cv2.absdiff(transformed_gray, gray2)
61
+
62
+ _, img_th = cv2.threshold(img_diff, diff_Thresholds, 255, cv2.THRESH_BINARY)
63
+
64
+ kernel = np.ones((3, 3), np.uint8)
65
+ img_th = cv2.morphologyEx(img_th, cv2.MORPH_OPEN, kernel, iterations=2)
66
+ img_th = cv2.dilate(img_th, kernel, iterations=1)
67
+
68
+ points = np.column_stack(np.where(img_th > 0.00000001))
69
+
70
+ for point in points:
71
+ cv2.circle(transformed_img, (point[1], point[0]), 2, (0, 0, 255), -1)
72
+
73
+ st.image(cv2.cvtColor(transformed_img, cv2.COLOR_BGR2RGB), caption='変換後の画像と差異', use_column_width=True)
74
+
75
+ def merge(rectangles, image_area, dist_threshold=10, diff_rate=0.5): #矩形の計算
76
+ merged_rec = []
77
+
78
+ used = [False] * len(rectangles)
79
+
80
+ for i, rect1 in enumerate(rectangles):
81
+ if used[i]:
82
+ continue
83
+
84
+ x1, y1, w1, h1 = rect1
85
+ area1 = w1 * h1
86
+ merged = False
87
+
88
+ for j, rect2 in enumerate(rectangles):
89
+ if i == j or used[j]:
90
+ continue
91
+
92
+ x2, y2, w2, h2 = rect2
93
+ area2 = w2 * h2
94
+
95
+ center1 = np.array([x1 + w1 / 2, y1 + h1 / 2])
96
+ center2 = np.array([x2 + w2 / 2, y2 + h2 / 2])
97
+ distance = np.linalg.norm(center1 - center2)
98
+
99
+ if distance < dist_threshold and abs(area1 - area2) < diff_rate * max(area1, area2):
100
+ new_x = min(x1, x2)
101
+ new_y = min(y1, y2)
102
+ new_w = max(x1 + w1, x2 + w2) - new_x
103
+ new_h = max(y1 + h1, y2 + h2) - new_y
104
+ merged_rec.append((new_x, new_y, new_w, new_h))
105
+ used[i] = used[j] = True
106
+ merged = True
107
+ break
108
+
109
+ if not merged:
110
+ merged_rec.append(rect1)
111
+
112
+ filter_rect = []
113
+ for rect in merged_rec:
114
+ x, y, w, h = rect
115
+ area = w * h
116
+ ok = True
117
+
118
+ if area >= (1/3) * image_area:
119
+ ok = False
120
+
121
+ for other_rect in merged_rec:
122
+ if rect == other_rect:
123
+ continue
124
+ ox, oy, ow, oh = other_rect
125
+ other_area = ow * oh
126
+ if area < other_area and abs(area - other_area) > diff_rate * max(area, other_area):
127
+ ok = False
128
+ break
129
+
130
+ if ok:
131
+ filter_rect.append(rect)
132
+
133
+ return filter_rect
134
+
135
+ def rectangle_diff(image_file1, image_file2):
136
+ image1 = Image.open(image_file1)
137
+ image2 = Image.open(image_file2)
138
+
139
+ col1, col2 = st.columns(2)
140
+ diff_Thresholds = st.sidebar.slider("差分の閾値処理", 10, 255, 50)
141
+ distance_threshold = st.sidebar.slider("矩形の結合距離", 1, 50, 10)
142
+ size_difference_ratio = st.sidebar.slider("サイズ差異の割合", 0.0, 1.0, 0.5)
143
+
144
+ with col1:
145
+ st.image(image1, caption='1枚目の画像', use_column_width=True)
146
+
147
+ with col2:
148
+ st.image(image2, caption='2枚目の画像', use_column_width=True)
149
+
150
+ image1_cv = cv2.cvtColor(np.array(image1), cv2.COLOR_RGB2BGR)
151
+ image2_cv = cv2.cvtColor(np.array(image2), cv2.COLOR_RGB2BGR)
152
+
153
+ gray1 = cv2.cvtColor(image1_cv, cv2.COLOR_BGR2GRAY)
154
+ gray2 = cv2.cvtColor(image2_cv, cv2.COLOR_BGR2GRAY)
155
+
156
+ sift = cv2.SIFT_create()
157
+
158
+ keypoints1, descriptors1 = sift.detectAndCompute(gray1, None)
159
+ keypoints2, descriptors2 = sift.detectAndCompute(gray2, None)
160
+
161
+ FLANN_INDEX_KDTREE = 1
162
+ index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
163
+ search_params = dict(checks=50)
164
+ flann = cv2.FlannBasedMatcher(index_params, search_params)
165
+
166
+ matches = flann.knnMatch(descriptors1, descriptors2, k=2)
167
+
168
+ good_matches = []
169
+ for m, n in matches:
170
+ if m.distance < 0.8 * n.distance:
171
+ good_matches.append(m)
172
+
173
+ if len(good_matches) > 4:
174
+ src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
175
+ dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
176
+
177
+ H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
178
+
179
+ height, width, channels = image2_cv.shape
180
+ transformed_img = cv2.warpPerspective(image1_cv, H, (width, height))
181
+
182
+ transformed_gray = cv2.cvtColor(transformed_img, cv2.COLOR_BGR2GRAY)
183
+
184
+ img_diff = cv2.absdiff(transformed_gray, gray2)
185
+
186
+ _, img_th = cv2.threshold(img_diff, diff_Thresholds, 255, cv2.THRESH_BINARY)
187
+
188
+ kernel = np.ones((3, 3), np.uint8)
189
+ img_th = cv2.morphologyEx(img_th, cv2.MORPH_OPEN, kernel, iterations=2)
190
+ img_th = cv2.dilate(img_th, kernel, iterations=1)
191
+
192
+ contours, _ = cv2.findContours(img_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
193
+
194
+ rectangles = [cv2.boundingRect(contour) for contour in contours]
195
+
196
+ image_area = height * width
197
+ filtered_rectangles = merge(rectangles, image_area, distance_threshold, size_difference_ratio)
198
+
199
+ for x, y, w, h in filtered_rectangles:
200
+ cv2.rectangle(transformed_img, (x, y), (x + w, y + h), (0, 0, 255), 2)
201
+
202
+ st.image(cv2.cvtColor(transformed_img, cv2.COLOR_BGR2RGB), caption='変換後の画像と差異', use_column_width=True)
match.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import streamlit as st
4
+ from PIL import Image
5
+
6
+ def Match(image_file1, image_file2):
7
+ t = st.sidebar.selectbox("Good or Great", ["Good", "Great"])
8
+ rate = st.sidebar.slider("特徴量抽出の厳しさ", 0.0, 1.0, 0.8)
9
+ if (t == "Good"):
10
+ Good_Match(image_file1, image_file2, rate)
11
+ elif (t == "Great"):
12
+ Great_Match(image_file1, image_file2, rate)
13
+
14
+ def Good_Match(image_file1, image_file2,rate = 0.8):
15
+
16
+ image1 = Image.open(image_file1)
17
+ image2 = Image.open(image_file2)
18
+
19
+ image1_cv = cv2.cvtColor(np.array(image1), cv2.COLOR_RGB2BGR)
20
+ image2_cv = cv2.cvtColor(np.array(image2), cv2.COLOR_RGB2BGR)
21
+
22
+ gray1 = cv2.cvtColor(image1_cv, cv2.COLOR_BGR2GRAY)
23
+ gray2 = cv2.cvtColor(image2_cv, cv2.COLOR_BGR2GRAY)
24
+
25
+ sift = cv2.SIFT_create()
26
+ keypoints1, descriptors1 = sift.detectAndCompute(gray1, None)
27
+ keypoints2, descriptors2 = sift.detectAndCompute(gray2, None)
28
+
29
+ FLANN_INDEX_KDTREE = 1
30
+ index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
31
+ search_params = dict(checks=50)
32
+ flann = cv2.FlannBasedMatcher(index_params, search_params)
33
+
34
+ matches = flann.knnMatch(descriptors1, descriptors2, k=2)
35
+
36
+ good_matches = []
37
+ for m, n in matches:
38
+ if m.distance < rate * n.distance:
39
+ good_matches.append(m)
40
+ img_matches = cv2.drawMatches(image1_cv, keypoints1, image2_cv, keypoints2, good_matches, None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
41
+
42
+ st.image(cv2.cvtColor(img_matches, cv2.COLOR_BGR2RGB), caption='Good Matches', use_column_width=True)
43
+
44
+ def Great_Match(image_file1, image_file2, rate=0.8):
45
+ image1 = Image.open(image_file1)
46
+ image2 = Image.open(image_file2)
47
+
48
+ # NumPy配列に変換
49
+ image1_cv = cv2.cvtColor(np.array(image1), cv2.COLOR_RGB2BGR)
50
+ image2_cv = cv2.cvtColor(np.array(image2), cv2.COLOR_RGB2BGR)
51
+
52
+ gray1 = cv2.cvtColor(image1_cv, cv2.COLOR_BGR2GRAY)
53
+ gray2 = cv2.cvtColor(image2_cv, cv2.COLOR_BGR2GRAY)
54
+
55
+ sift = cv2.SIFT_create()
56
+
57
+ keypoints1, descriptors1 = sift.detectAndCompute(gray1, None)
58
+ keypoints2, descriptors2 = sift.detectAndCompute(gray2, None)
59
+
60
+ FLANN_INDEX_KDTREE = 1
61
+ index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
62
+ search_params = dict(checks=50)
63
+ flann = cv2.FlannBasedMatcher(index_params, search_params)
64
+
65
+ matches = flann.knnMatch(descriptors1, descriptors2, k=2)
66
+
67
+ good_matches = []
68
+ for m, n in matches:
69
+ if m.distance < rate * n.distance:
70
+ good_matches.append(m)
71
+
72
+ if len(good_matches) > 4:
73
+ src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
74
+ dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
75
+
76
+ # RANSACを用いてホモグラフィを計算し、外れ値を除去
77
+ H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
78
+ matches_mask = mask.ravel().tolist()
79
+
80
+ # 射影変換を実行
81
+ height, width, channels = image2_cv.shape
82
+ transformed_img = cv2.warpPerspective(image1_cv, H, (width, height))
83
+ else:
84
+ matches_mask = None
85
+
86
+ # cv2.drawMatchesを使用してマッチング結果を描画
87
+ img_matches = cv2.drawMatches(image1_cv, keypoints1, image2_cv, keypoints2, good_matches, None,
88
+ matchesMask=matches_mask, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
89
+
90
+ # 結果を表示
91
+ st.image(cv2.cvtColor(img_matches, cv2.COLOR_BGR2RGB), caption='Great Matches', use_column_width=True)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ streamlit
2
+ Pillow
3
+ opencv-python
4
+ numpy
5
+ matplotlib