본문 바로가기
Vision ( 더 나은 삶을 위한 공부 )

Computer Vision Feature Detection 완벽 가이드: 기초부터 실전까지 - 3

by Real_Mountain 2025. 11. 17.
OpenCV 기반 Feature Detection 종합 튜토리얼
Point Feature (SIFT, SURF, ORB) + Line Feature (Hough, LSD) + 실전 프로젝트

목차

  1. Feature Detection이란?
  2. 왜 Feature Detection이 중요한가?
  3. 좋은 Feature의 조건
  4. Point Feature Detection
  5. Line Feature Detection
  6. Feature Matching
  7. 실전 프로젝트
  8. 성능 비교

5. Line Feature Detection

5.1 Hough Transform

Hough Transform은 직선, 원, 타원 등의 파라메트릭 형상을 검출하는 강력한 방법입니다.

5.1.1 기본 원리

핵심 아이디어: 이미지 공간의 점들을 파라미터 공간으로 변환

직선의 표현:

1) 직교 좌표계 (Cartesian):
   y = mx + c
   문제: 수직선은 m = ∞

2) 극좌표계 (Polar) ✓:
   ρ = x·cosθ + y·sinθ
   
   여기서:
   ρ: 원점에서 직선까지의 수직 거리
   θ: 수직선이 x축과 이루는 각도 (0° ~ 180°)

변환 과정:

1. Edge Detection (Canny 등)
2. Hough 공간 생성 (ρ, θ)
3. 각 edge 점에 대해 모든 θ 값에서 ρ 계산
4. Accumulator에 투표
5. Threshold 이상의 누적값 → 직선으로 인식

5.1.2 OpenCV 실습 코드(python)

import cv2
import numpy as np
import matplotlib.pyplot as plt

def hough_line_detection_standard(image_path):
    """
    Standard Hough Line Transform
    """
    img = cv2.imread(image_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    
    # Edge detection
    edges = cv2.Canny(gray, 50, 150, apertureSize=3)
    
    # Standard Hough Transform
    lines = cv2.HoughLines(edges, rho=1, theta=np.pi/180, threshold=150)
    
    # 검출된 직선 그리기
    img_lines = img.copy()
    
    if lines is not None:
        for line in lines:
            rho, theta = line[0]
            a = np.cos(theta)
            b = np.sin(theta)
            x0 = a * rho
            y0 = b * rho
            x1 = int(x0 + 1000 * (-b))
            y1 = int(y0 + 1000 * (a))
            x2 = int(x0 - 1000 * (-b))
            y2 = int(y0 - 1000 * (a))
            cv2.line(img_lines, (x1, y1), (x2, y2), (0, 0, 255), 2)
    
    plt.figure(figsize=(15, 5))
    plt.subplot(131)
    plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    plt.title('Original Image')
    plt.axis('off')
    
    plt.subplot(132)
    plt.imshow(edges, cmap='gray')
    plt.title('Canny Edges')
    plt.axis('off')
    
    plt.subplot(133)
    plt.imshow(cv2.cvtColor(img_lines, cv2.COLOR_BGR2RGB))
    plt.title(f'Detected Lines ({len(lines) if lines is not None else 0} lines)')
    plt.axis('off')
    
    plt.tight_layout()
    plt.show()
    
    return lines


def hough_line_detection_probabilistic(image_path, threshold=50, minLineLength=50, maxLineGap=10):
    """
    Probabilistic Hough Line Transform
    """
    img = cv2.imread(image_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(gray, 50, 150, apertureSize=3)
    
    # Probabilistic Hough Transform
    lines = cv2.HoughLinesP(
        edges, rho=1, theta=np.pi/180,
        threshold=threshold,
        minLineLength=minLineLength,
        maxLineGap=maxLineGap
    )
    
    # 검출된 선분 그리기
    img_lines = img.copy()
    if lines is not None:
        for line in lines:
            x1, y1, x2, y2 = line[0]
            cv2.line(img_lines, (x1, y1), (x2, y2), (0, 255, 0), 2)
    
    plt.figure(figsize=(15, 5))
    plt.subplot(131)
    plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    plt.title('Original Image')
    plt.axis('off')
    
    plt.subplot(132)
    plt.imshow(edges, cmap='gray')
    plt.title('Canny Edges')
    plt.axis('off')
    
    plt.subplot(133)
    plt.imshow(cv2.cvtColor(img_lines, cv2.COLOR_BGR2RGB))
    plt.title(f'Lines ({len(lines) if lines is not None else 0})')
    plt.axis('off')
    
    plt.tight_layout()
    plt.show()
    
    return lines


def lane_detection_example(image_path):
    """
    실전 예제: 차선 검출
    """
    img = cv2.imread(image_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (5, 5), 0)
    edges = cv2.Canny(blur, 50, 150)
    
    # ROI 설정
    height, width = edges.shape
    mask = np.zeros_like(edges)
    polygon = np.array([[
        (int(width * 0.1), height),
        (int(width * 0.4), int(height * 0.6)),
        (int(width * 0.6), int(height * 0.6)),
        (int(width * 0.9), height)
    ]], np.int32)
    cv2.fillPoly(mask, polygon, 255)
    masked_edges = cv2.bitwise_and(edges, mask)
    
    # Hough Transform
    lines = cv2.HoughLinesP(
        masked_edges, rho=2, theta=np.pi/180,
        threshold=50, minLineLength=100, maxLineGap=50
    )
    
    # 직선 그리기
    line_img = np.zeros_like(img)
    if lines is not None:
        for line in lines:
            x1, y1, x2, y2 = line[0]
            cv2.line(line_img, (x1, y1), (x2, y2), (0, 255, 0), 5)
    
    result = cv2.addWeighted(img, 0.8, line_img, 1.0, 0)
    
    plt.figure(figsize=(18, 6))
    plt.subplot(141)
    plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    plt.title('Original')
    plt.axis('off')
    
    plt.subplot(142)
    plt.imshow(masked_edges, cmap='gray')
    plt.title('Masked Edges')
    plt.axis('off')
    
    plt.subplot(143)
    plt.imshow(cv2.cvtColor(line_img, cv2.COLOR_BGR2RGB))
    plt.title('Detected Lines')
    plt.axis('off')
    
    plt.subplot(144)
    plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
    plt.title('Result')
    plt.axis('off')
    
    plt.tight_layout()
    plt.show()
    
    return result

6. Feature Matching

6.1 Brute-Force Matcher(python)

def brute_force_matching(img1_path, img2_path, method='SIFT'):
    """
    Brute Force Matcher를 사용한 Feature Matching
    """
    img1 = cv2.imread(img1_path)
    img2 = cv2.imread(img2_path)
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    
    # Feature detector 선택
    if method == 'SIFT':
        detector = cv2.SIFT_create()
        norm_type = cv2.NORM_L2
    elif method == 'ORB':
        detector = cv2.ORB_create()
        norm_type = cv2.NORM_HAMMING
    
    # Keypoint 검출
    kp1, des1 = detector.detectAndCompute(gray1, None)
    kp2, des2 = detector.detectAndCompute(gray2, None)
    
    # BFMatcher 생성 및 매칭
    bf = cv2.BFMatcher(norm_type, crossCheck=True)
    matches = bf.match(des1, des2)
    matches = sorted(matches, key=lambda x: x.distance)
    
    # 상위 50개 매칭 시각화
    img_matches = cv2.drawMatches(
        img1, kp1, img2, kp2, matches[:50], None,
        flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS
    )
    
    plt.figure(figsize=(16, 8))
    plt.imshow(cv2.cvtColor(img_matches, cv2.COLOR_BGR2RGB))
    plt.title(f'{method} Matching - Top 50/{len(matches)} matches')
    plt.axis('off')
    plt.show()
    
    return matches, kp1, kp2


def ratio_test_matching(img1_path, img2_path, ratio=0.75):
    """
    Lowe's Ratio Test를 사용한 매칭
    """
    img1 = cv2.imread(img1_path)
    img2 = cv2.imread(img2_path)
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    
    sift = cv2.SIFT_create()
    kp1, des1 = sift.detectAndCompute(gray1, None)
    kp2, des2 = sift.detectAndCompute(gray2, None)
    
    bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=False)
    matches = bf.knnMatch(des1, des2, k=2)
    
    # Ratio test
    good_matches = []
    for m, n in matches:
        if m.distance < ratio * n.distance:
            good_matches.append([m])
    
    img_matches = cv2.drawMatchesKnn(
        img1, kp1, img2, kp2, good_matches, None,
        flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS
    )
    
    plt.figure(figsize=(16, 8))
    plt.imshow(cv2.cvtColor(img_matches, cv2.COLOR_BGR2RGB))
    plt.title(f'Ratio Test Matching - {len(good_matches)} good matches')
    plt.axis('off')
    plt.show()
    
    print(f"Total matches: {len(matches)}")
    print(f"Good matches (ratio < {ratio}): {len(good_matches)}")
    
    return good_matches, kp1, kp2

7. 실전 프로젝트

7.1 Image Stitching (파노라마)(python)

def manual_image_stitching(img1_path, img2_path):
    """
    수동 Image Stitching
    """
    img1 = cv2.imread(img1_path)
    img2 = cv2.imread(img2_path)
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    
    # SIFT로 feature 검출
    sift = cv2.SIFT_create()
    kp1, des1 = sift.detectAndCompute(gray1, None)
    kp2, des2 = sift.detectAndCompute(gray2, None)
    
    # FLANN matching
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)
    
    # Ratio test
    good_matches = []
    for m, n in matches:
        if m.distance < 0.7 * n.distance:
            good_matches.append(m)
    
    print(f"Good matches: {len(good_matches)}")
    
    # Homography 계산
    if len(good_matches) >= 4:
        src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
        
        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        
        # 이미지 변환
        h1, w1 = img1.shape[:2]
        h2, w2 = img2.shape[:2]
        
        pts = np.float32([[0, 0], [0, h1], [w1, h1], [w1, 0]]).reshape(-1, 1, 2)
        dst_pts = cv2.perspectiveTransform(pts, H)
        
        [x_min, y_min] = np.int32(dst_pts.min(axis=0).ravel() - 0.5)
        [x_max, y_max] = np.int32(dst_pts.max(axis=0).ravel() + 0.5)
        
        translation = [-x_min, -y_min]
        H_translation = np.array([[1, 0, translation[0]], 
                                   [0, 1, translation[1]], 
                                   [0, 0, 1]])
        
        result = cv2.warpPerspective(img1, H_translation.dot(H), 
                                       (x_max - x_min, y_max - y_min))
        result[translation[1]:h2+translation[1], 
               translation[0]:w2+translation[0]] = img2
        
        plt.figure(figsize=(15, 5))
        plt.subplot(131)
        plt.imshow(cv2.cvtColor(img1, cv2.COLOR_BGR2RGB))
        plt.title('Image 1')
        plt.axis('off')
        
        plt.subplot(132)
        plt.imshow(cv2.cvtColor(img2, cv2.COLOR_BGR2RGB))
        plt.title('Image 2')
        plt.axis('off')
        
        plt.subplot(133)
        plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
        plt.title('Stitched Result')
        plt.axis('off')
        
        plt.tight_layout()
        plt.show()
        
        return result
    
    return None

8. 알고리즘 선택 가이드

8.1 종합 비교표

 

Algorithm Speed Accuracy Scale Inv.Rotation Inv.LicenseUse Case
Harris Fast Medium Free Corner detection
SIFT Slow Very High Patent expired High accuracy
SURF Medium High Patent Balance
ORB Very Fast Medium Free Real-time
Hough Medium Medium N/A N/A Free Line detection

8.2 사용 시나리오별 추천

모바일 앱: ORB (빠르고 무료)
학술 연구: SIFT (정확도 최우선)
자율주행: Hough Transform + Canny
산업용: ORB (실시간) / SURF (정확도)
3D 재구성: SIFT/SURF
AR/VR: ORB (실시간)
SLAM: ORB-SLAM
파노라마: SIFT



반응형