如何使用 Opencv 和 python 识别图像中最大的边界矩形并将它们分成单独的图像

kcg*_*ief 4 python opencv image image-processing contour

我是 Opencv 和 python 的新手,试图识别示例图像中标记的最大的三个矩形,并将它们提取到三个单独的图像中。我能够识别图像中的轮廓,但所有轮廓都显示出来(如第二张图所示),并且我无法区分出三个最大的轮廓。到目前为止我写的代码:

import cv2

image = cv2.imread('imgpath')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
canny = cv2.Canny(gray, 130, 255, 1)

cnts = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]

#largest_contours = sorted(cnts, key=cv2.contourArea)[-3:]
#print(len(largest_contours))
for c in cnts:
    cv2.drawContours(image,[c], 0, (0,255,0), 3)

#cv2.imshow("result", image)
#cv2.drawContours(image, largest_contours, -1, (0,255,0), 3)
cv2.imshow('contours', image)
cv2.waitKey(0)
Run Code Online (Sandbox Code Playgroud)

nat*_*ncy 6

这是一种方法:

  • 将图像转换为灰度
  • 自适应阈值获取二值图像
  • 查找轮廓并排序最大的三个
  • 执行轮廓近似以确保我们有一个方形轮廓
  • 执行透视变换以获得自上而下的视图
  • 旋转图像以获得正确的方向

进行透视变换和旋转后提取的矩形

import cv2
import numpy as np

def rotate_image(image, angle):
    # Grab the dimensions of the image and then determine the center
    (h, w) = image.shape[:2]
    (cX, cY) = (w / 2, h / 2)

    # grab the rotation matrix (applying the negative of the
    # angle to rotate clockwise), then grab the sine and cosine
    # (i.e., the rotation components of the matrix)
    M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
    cos = np.abs(M[0, 0])
    sin = np.abs(M[0, 1])

    # Compute the new bounding dimensions of the image
    nW = int((h * sin) + (w * cos))
    nH = int((h * cos) + (w * sin))

    # Adjust the rotation matrix to take into account translation
    M[0, 2] += (nW / 2) - cX
    M[1, 2] += (nH / 2) - cY

    # Perform the actual rotation and return the image
    return cv2.warpAffine(image, M, (nW, nH))

def perspective_transform(image, corners):
    def order_corner_points(corners):
        # Separate corners into individual points
        # Index 0 - top-right
        #       1 - top-left
        #       2 - bottom-left
        #       3 - bottom-right
        corners = [(corner[0][0], corner[0][1]) for corner in corners]
        top_r, top_l, bottom_l, bottom_r = corners[0], corners[1], corners[2], corners[3]
        return (top_l, top_r, bottom_r, bottom_l)

    # Order points in clockwise order
    ordered_corners = order_corner_points(corners)
    top_l, top_r, bottom_r, bottom_l = ordered_corners

    # Determine width of new image which is the max distance between 
    # (bottom right and bottom left) or (top right and top left) x-coordinates
    width_A = np.sqrt(((bottom_r[0] - bottom_l[0]) ** 2) + ((bottom_r[1] - bottom_l[1]) ** 2))
    width_B = np.sqrt(((top_r[0] - top_l[0]) ** 2) + ((top_r[1] - top_l[1]) ** 2))
    width = max(int(width_A), int(width_B))

    # Determine height of new image which is the max distance between 
    # (top right and bottom right) or (top left and bottom left) y-coordinates
    height_A = np.sqrt(((top_r[0] - bottom_r[0]) ** 2) + ((top_r[1] - bottom_r[1]) ** 2))
    height_B = np.sqrt(((top_l[0] - bottom_l[0]) ** 2) + ((top_l[1] - bottom_l[1]) ** 2))
    height = max(int(height_A), int(height_B))

    # Construct new points to obtain top-down view of image in 
    # top_r, top_l, bottom_l, bottom_r order
    dimensions = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1], 
                    [0, height - 1]], dtype = "float32")

    # Convert to Numpy format
    ordered_corners = np.array(ordered_corners, dtype="float32")

    # Find perspective transform matrix
    matrix = cv2.getPerspectiveTransform(ordered_corners, dimensions)

    # Return the transformed image
    return cv2.warpPerspective(image, matrix, (width, height))

image = cv2.imread('1.jpg')
original = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,11,3)

cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:3]

ROI_number = 0
for c in cnts:
    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.015 * peri, True)

    if len(approx) == 4:
        cv2.drawContours(image,[c], 0, (36,255,12), 3)
        transformed = perspective_transform(original, approx)
        rotated = rotate_image(transformed, -90)
        cv2.imwrite('ROI_{}.png'.format(ROI_number), rotated)
        cv2.imshow('ROI_{}'.format(ROI_number), rotated)
        ROI_number += 1

cv2.imshow('thresh', thresh)
cv2.imshow('image', image)
cv2.waitKey()
Run Code Online (Sandbox Code Playgroud)