V01*_*V01 -1 c++ opencv image-processing
我正在寻找一种使用OpenCV检测给定二进制图像中的自定义形状的方法。
我的自定义形状如下所示:
我试图找出上面的形状(或近似形状)是否存在于给定的 512 x 512 二进制图像中。
我尝试了不同的方法,例如使用cv::matchTemplate() ,但这不适用于具有缩放和旋转形状的变体。
我需要一些解决方案来识别给定二进制图像中的这些形状。
预先感谢
关于形状的一些信息 :形状很简单,具有三个连接的组件和线分离。这些是踝关节处胫骨、腓骨和距骨的横截面形状。更多一些形状图像
这是我的算法。这个想法是对轮廓进行聚类(在示例算法中仅通过膨胀/腐蚀)并对每个轮廓进行大小归一化并测试不同旋转中的形状相似性。然后将图像区域与模板进行比较。
我使用这张图片作为模板:
和这个对象图像(删除了白色背景,因为我假设仅使用外部轮廓。
该算法给出了这个结果:
found target shape with similarity 72.5144% and angle: 180 degrees
5 of 26
found target shape with similarity 73.1325% and angle: 0 degrees
6 of 26
found target shape with similarity 71.7287% and angle: 270 degrees
7 of 26
8 of 26
found target shape with similarity 72.3608% and angle: 90 degrees
9 of 26
10 of 26
11 of 26
12 of 26
13 of 26
14 of 26
15 of 26
16 of 26
found target shape with similarity 62.7371% and angle: 60 degrees
17 of 26
found target shape with similarity 62.6041% and angle: 240 degrees
18 of 26
19 of 26
20 of 26
found target shape with similarity 62.8935% and angle: 150 degrees
21 of 26
found target shape with similarity 62.39% and angle: 330 degrees
22 of 26
23 of 26
24 of 26
25 of 26
Run Code Online (Sandbox Code Playgroud)
这是代码(带有一些用于保存图像等的脏助手):
int glob_counter = 0;
double contourMaskSimilarity(float angleDiff, float scale, cv::Point2f cont_center, std::vector<cv::Point> contour, cv::Mat img, cv::Point2f template_center, cv::Mat img_templ)
{
cv::Mat rotationMat = cv::getRotationMatrix2D(cont_center, angleDiff, scale);
cv::Mat rotationMatPersp = cv::Mat::eye(3, 3, CV_64FC1);
for (int y = 0; y < rotationMat.rows; ++y)
for (int x = 0; x < rotationMat.cols; ++x)
{
rotationMatPersp.at<double>(y, x) = rotationMat.at<double>(y, x);
}
//cv::Mat img_tmp = img_color.clone();
cv::Mat img_tmp_mask = cv::Mat::zeros(img.size(), img.type());
std::vector < std::vector<cv::Point> >contours_img;
contours_img.push_back(contour);
cv::drawContours(img_tmp_mask, contours_img, 0, cv::Scalar::all(255), -1);
//cv::circle(img_tmp, cont_center, 3, cv::Scalar(255, 0, 255), 2); // drawing
std::vector<cv::Point2f> points;
std::vector<cv::Point2f> warpedPoints;
points.push_back(cont_center);
cv::perspectiveTransform(points, warpedPoints, rotationMatPersp);
cv::Mat translation = cv::Mat::eye(3, 3, CV_64FC1);
translation.at<double>(0, 2) = template_center.x - warpedPoints[0].x; // x
translation.at<double>(1, 2) = template_center.y - warpedPoints[0].y; // x
cv::Mat transformation = translation * rotationMatPersp; // transformation after each other => 1. rotation 2. translation
cv::Mat imgBin = img.clone();
imgBin = imgBin & img_tmp_mask;
cv::Mat imgBinWarped;
// warp the image to same size and rotation as the template, according to angle and center
//cv::warpPerspective(imgBin, imgBinWarped, transformation, cv::Size(img.size().width * scale, img.size().height * scale));
cv::warpPerspective(imgBin, imgBinWarped, transformation, img_templ.size());
cv::Rect subImage = cv::Rect(0, 0, img_templ.cols, img_templ.rows);
cv::Mat imgSub = imgBinWarped(subImage);
cv::Mat imgMul = imgSub.mul(img_templ); // 255 everywhere where template and current image-region are non-zero. 0 everywhere else
double sum1 = cv::countNonZero(imgMul);
double sum2 = cv::countNonZero(img_templ);
double sum3 = cv::countNonZero(imgSub);
//std::cout << sum1 << " " << sum2 << " " << sum3 << std::endl;
// confidence similar to intersection over union.
// use a better shape-similarity here, like a chamfer matching or a mean-hausdorff-distance?
double conf = sum1 * sum1 / (sum2 * sum3);
//std::cout << conf * 100 << " %" << std::endl;
// TODO: remove!
if (conf > 0.5)
{
cv::imwrite("C:/data/StackOverflow/bone_shapes/out_sub_" + std::to_string(glob_counter) + "_" + std::to_string(conf) + ".png", imgSub);
cv::imwrite("C:/data/StackOverflow/bone_shapes/out_mul_" + std::to_string(glob_counter) + "_" + std::to_string(conf) + ".png", imgMul);
glob_counter++;
}
return conf;
}
int main()
{
cv::Mat img_templ = cv::imread("C:/data/StackOverflow/bone_shapes/bone_shape_template.png", cv::IMREAD_GRAYSCALE);
// binarize the img (I guess it was binarized already?)
cv::Mat templ = img_templ > 0;
cv::Mat img_shapes = cv::imread("C:/data/StackOverflow/bone_shapes/bones_set_blackBG.png", cv::IMREAD_GRAYSCALE);
// binarize the image (it was some grayscale gradients at the object borders...)
cv::Mat img = img_shapes > 200;
// 1. close-operator to merge all the parts of the shapes to a single contour. For other shapes you might need some kind of clustering.
cv::Mat img_closed = img.clone();
int nDilations = 2;
cv::dilate(img_closed, img_closed, cv::getStructuringElement(cv::MorphShapes::MORPH_RECT, cv::Size(3, 3), cv::Point(1, 1)), cv::Point(1, 1), nDilations);
cv::erode(img_closed, img_closed, cv::getStructuringElement(cv::MorphShapes::MORPH_RECT, cv::Size(3, 3), cv::Point(1, 1)), cv::Point(1, 1), nDilations);
cv::Mat template_closed = templ.clone();
int nDilationsTemplate = 2;
cv::dilate(template_closed, template_closed, cv::getStructuringElement(cv::MorphShapes::MORPH_RECT, cv::Size(3, 3), cv::Point(1, 1)), cv::Point(1, 1), nDilations);
cv::erode(template_closed, template_closed, cv::getStructuringElement(cv::MorphShapes::MORPH_RECT, cv::Size(3, 3), cv::Point(1, 1)), cv::Point(1, 1), nDilations);
// 2. find contours (only necessary once for the template:)
std::vector<std::vector<cv::Point> > contour_template;
cv::findContours(template_closed, contour_template, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_NONE);
std::vector<std::vector<cv::Point> > contours_img;
cv::findContours(img_closed, contours_img, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_NONE);
// display the results for debugging/sanity checks only
cv::Mat img_color, template_color;
cv::cvtColor(img_shapes, img_color, cv::COLOR_GRAY2BGR);
cv::cvtColor(img_templ, template_color, cv::COLOR_GRAY2BGR);
for (int i = 0; i < contour_template.size(); ++i)
cv::drawContours(template_color, contour_template, i, cv::Scalar(255, 0, 255), 4);
cv::imshow("template color", template_color);
for (int i = 0; i < contours_img.size(); ++i)
cv::drawContours(img_color, contours_img, i, cv::Scalar(0, 0, 255), 2);
cv::imshow("img color", img_color);
//cv::waitKey(1);
// make sure the template only has one contour!
if (contour_template.size() != 1)
{
std::cout << "closed template doesnt consist of a single contour" << std::endl;
throw("closed template doesnt consist of a single contour");
}
// 3. get size and orientation of the shapes:
cv::RotatedRect template_orientation = cv::minAreaRect(contour_template[0]);
cv::Point2f template_center; float template_size = 0;
//cv::minEnclosingCircle(contour_template[0], template_center, template_size);
template_center = template_orientation.center;
template_size = (template_orientation.size.width > template_orientation.size.height) ? template_orientation.size.width : template_orientation.size.height;
// now check every contour in the target image:
for (int i = 0; i < contours_img.size(); ++i)
{
std::cout << i << " of " << contours_img.size() << std::endl;
std::vector<cv::Point> cont = contours_img[i];
cv::RotatedRect cont_orientation = cv::minAreaRect(cont);
cv::Point2f cont_center; float cont_size = 0;
cont_center = cont_orientation.center;
cont_size = (cont_orientation.size.width > cont_orientation.size.height) ? cont_orientation.size.width : cont_orientation.size.height;
// angle difference according to rotated rectangle bounding boxes
float angleDiff = template_orientation.angle - cont_orientation.angle;
// scale according to rotated rectangle bounding boxes
float scale = template_size / cont_size;
double bestSimilarity = 0.0;
float bestAngle = 0.0;
float stepDegree = 15; // make smaller if you need a finer rotation resolution.
// check various angles:
for (float j = 0; j < 360.0f; j+=stepDegree)
{
//float angle = angleDiff + j * stepDegree; // initial guess for rotation. Works if shape is really similar
float angle = j;
// similarity computation is slow for small contours. Maybe because of the warping?
double similarity = contourMaskSimilarity(angle, scale, cont_center, contours_img[i], img, template_center, templ);
if (similarity > bestSimilarity)
{
bestSimilarity = similarity;
bestAngle = angle;
}
}
glob_counter++;
if (bestSimilarity > 0.5)
{
cv::drawContours(img_color, contours_img, i, cv::Scalar(0, 255, 0), 2);
std::cout << "found target shape with similarity " << 100*bestSimilarity << "% and angle: " << bestAngle << " degrees"<< std::endl;
}
//cv::waitKey(0);
//cv::waitKey(0);
/*
std::cout << rotationMat << std::endl;
cv::Point2f offsetTemplateImage;
offsetTemplateImage.x = -template_center.x;
offsetTemplateImage.y = -template_center.y;
cv::Rect subImage = cv::Rect(cont_center.x + offsetTemplateImage.x, cont_center.y + offsetTemplateImage.y, img_templ.cols, img_templ.rows);
*/
}
cv::imshow("template", img_templ);
cv::imshow("template binary", templ);
cv::imshow("template closed", template_closed);
cv::imshow("shapes", img_shapes);
cv::imshow("shapes binary", img);
cv::imshow("shapes closed", img_closed);
cv::imshow("result", img_color);
cv::imwrite("C:/data/StackOverflow/bone_shapes/out_template_color.png", template_color);
cv::imwrite("C:/data/StackOverflow/bone_shapes/out_result.png", img_color);
cv::waitKey(0);
}
Run Code Online (Sandbox Code Playgroud)
以下是扭曲图像和与模板相乘的二进制的一些示例: