File size: 3,569 Bytes
e73c0a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import cv2
import numpy as np
import streamlit as st
from PIL import Image

def Match(image_file1, image_file2):
    t = st.sidebar.selectbox("Good or Great", ["Good", "Great"])
    rate = st.sidebar.slider("特徴量抽出の厳しさ", 0.0, 1.0, 0.8)
    if (t == "Good"):
        Good_Match(image_file1, image_file2, rate)
    elif (t == "Great"):
        Great_Match(image_file1, image_file2, rate)

def Good_Match(image_file1, image_file2,rate = 0.8):
    
    image1 = Image.open(image_file1)
    image2 = Image.open(image_file2)

    image1_cv = cv2.cvtColor(np.array(image1), cv2.COLOR_RGB2BGR)
    image2_cv = cv2.cvtColor(np.array(image2), cv2.COLOR_RGB2BGR)

    gray1 = cv2.cvtColor(image1_cv, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(image2_cv, cv2.COLOR_BGR2GRAY)

    sift = cv2.SIFT_create()
    keypoints1, descriptors1 = sift.detectAndCompute(gray1, None)
    keypoints2, descriptors2 = sift.detectAndCompute(gray2, None)

    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(descriptors1, descriptors2, k=2)

    good_matches = []
    for m, n in matches:
        if m.distance < rate * n.distance:
            good_matches.append(m)
    img_matches = cv2.drawMatches(image1_cv, keypoints1, image2_cv, keypoints2, good_matches, None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)

    st.image(cv2.cvtColor(img_matches, cv2.COLOR_BGR2RGB), caption='Good Matches', use_column_width=True)

def Great_Match(image_file1, image_file2, rate=0.8):
    image1 = Image.open(image_file1)
    image2 = Image.open(image_file2)
    
    # NumPy配列に変換
    image1_cv = cv2.cvtColor(np.array(image1), cv2.COLOR_RGB2BGR)
    image2_cv = cv2.cvtColor(np.array(image2), cv2.COLOR_RGB2BGR)

    gray1 = cv2.cvtColor(image1_cv, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(image2_cv, cv2.COLOR_BGR2GRAY)

    sift = cv2.SIFT_create()
    
    keypoints1, descriptors1 = sift.detectAndCompute(gray1, None)
    keypoints2, descriptors2 = sift.detectAndCompute(gray2, None)

    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(descriptors1, descriptors2, k=2)

    good_matches = []
    for m, n in matches:
        if m.distance < rate * n.distance:
            good_matches.append(m)
    
    if len(good_matches) > 4:
        src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
        dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)

        # RANSACを用いてホモグラフィを計算し、外れ値を除去
        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        matches_mask = mask.ravel().tolist()

        # 射影変換を実行
        height, width, channels = image2_cv.shape
        transformed_img = cv2.warpPerspective(image1_cv, H, (width, height))
    else:
        matches_mask = None

    # cv2.drawMatchesを使用してマッチング結果を描画
    img_matches = cv2.drawMatches(image1_cv, keypoints1, image2_cv, keypoints2, good_matches, None, 
                                  matchesMask=matches_mask, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)

    # 結果を表示
    st.image(cv2.cvtColor(img_matches, cv2.COLOR_BGR2RGB), caption='Great Matches', use_column_width=True)