opencv中的StereoBM类是否会对输入图像或帧进行校正?

Esw*_*war 6 c++ opencv video-capture image-processing video-processing

作为我项目的一部分,我使用SteroBM类作为立体视觉.我从2个网络摄像头获取输入帧并在输入帧灰度帧上运行立体声块匹配计算而不进行校正.我得到的输出远非基本事实(非常不完整).我想知道,是因为我没有对输入帧进行整改.而且,我选择的基线保持在20厘米.我使用的是opencv-3.2.0版本的c ++.

我正在运行的代码如下.

#include <opencv2/core.hpp>
#include <opencv2/opencv.hpp>
#include </home/eswar/softwares/opencv_contrib-3.2.0/modules/contrib_world/include/opencv2/contrib_world.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp>
#include <stdio.h>
#include <iostream>

using namespace std;
using namespace cv;

int main()
{
 //initialize and allocate memory to load the video stream from camera 
 VideoCapture camera0(0);
 VideoCapture camera1(1);

 if( !camera0.isOpened() ) return 1;
 if( !camera1.isOpened() ) return 1;
 Mat frame0,frame1;
 Mat frame0gray,frame1gray;
 Mat dispbm,dispsgbm;
 Mat dispnorm_bm,dispnorm_sgbm;
 Mat falseColorsMap, sfalseColorsMap;
 int ndisparities = 16*5;   /**< Range of disparity */
 int SADWindowSize = 21; /**< Size of the block window. Must be odd */
 Ptr<StereoBM> sbm = StereoBM::create( ndisparities, SADWindowSize );
 Ptr<StereoSGBM> sgbm = StereoSGBM::create(0,    //int minDisparity
                                    96,     //int numDisparities
                                    5,      //int SADWindowSize
                                    600,    //int P1 = 0
                                    2400,   //int P2 = 0
                                    10,     //int disp12MaxDiff = 0
                                    16,     //int preFilterCap = 0
                                    2,      //int uniquenessRatio = 0
                                    20,    //int speckleWindowSize = 0
                                    30,     //int speckleRange = 0
                                    true);  //bool fullDP = false
//-- Check its extreme values
double minVal; double maxVal;
while(true) 
{
   //grab and retrieve each frames of the video sequentially 
   camera0 >> frame0;
   camera1 >> frame1;

   imshow("Video0", frame0);
   imshow("Video1", frame1);
   cvtColor(frame0,frame0gray,CV_BGR2GRAY);
   cvtColor(frame1,frame1gray,CV_BGR2GRAY);

   sbm->compute( frame0gray, frame1gray, dispbm );
   minMaxLoc( dispbm, &minVal, &maxVal );
   dispbm.convertTo( dispnorm_bm, CV_8UC1, 255/(maxVal - minVal));

   sgbm->compute(frame0gray, frame1gray, dispsgbm);
   minMaxLoc( dispsgbm, &minVal, &maxVal );
   dispsgbm.convertTo( dispnorm_sgbm, CV_8UC1, 255/(maxVal - minVal));

   imshow( "BM", dispnorm_bm);
   imshow( "SGBM",dispnorm_sgbm);

   //wait for 40 milliseconds
   int c = cvWaitKey(40);
   //exit the loop if user press "Esc" key  (ASCII value of "Esc" is 27) 
   if(27 == char(c)) break;
 }
 return 0;
}
Run Code Online (Sandbox Code Playgroud)

虽然在代码中你看到块匹配也被使用,但请忽略,因为它给出了更差的输出.我发现SGBM输出更接近基本事实,因此我决定改进它.但是,如果有任何关于如何改进块匹配结果的帮助.这很棒,我当然很欣赏.

SGBM技术的输出图像深度图像看起来像. 使用SGBM方法的深度图

Esw*_*war 4

如上所述,我尝试纠正框架。代码如下。

#include <opencv2/core.hpp>
#include <opencv2/opencv.hpp>
#include </home/eswar/softwares/opencv_contrib-3.2.0/modules/contrib_world  /include/opencv2/contrib_world.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp>
#include <stdio.h>
#include <iostream>
#include <opencv2/xfeatures2d/nonfree.hpp>

using namespace std;
using namespace cv;
using namespace cv::xfeatures2d;

int main()
{
//initialize and allocate memory to load the video stream from camera 
VideoCapture camera0(0);
VideoCapture camera1(1);
int count=0;
Mat loRes, hiRes;
if( !camera0.isOpened() ) return 1;
if( !camera1.isOpened() ) return 1;
camera0.set(CV_CAP_PROP_FRAME_WIDTH, 400);
camera0.set(CV_CAP_PROP_FRAME_HEIGHT, 400); 
camera1.set(CV_CAP_PROP_FRAME_WIDTH, 400);
camera1.set(CV_CAP_PROP_FRAME_HEIGHT, 400);
Mat frame0,frame1;
Mat frame0gray,frame1gray;
Mat dispbm,dispsgbm,disparity,disparity1;
Mat dispnorm_bm,dispnorm_sgbm;
Mat falseColorsMap, sfalseColorsMap,falsemap;
Mat img_matches;
Mat H1,H2;

int ndisparities = 96;   /**< Range of disparity */
int SADWindowSize = 7;
Ptr<StereoBM> sbm = StereoBM::create( ndisparities, SADWindowSize );

Ptr<StereoSGBM> sgbm = StereoSGBM::create(-3,    //int minDisparity
                                    96,     //int numDisparities
                                    7,      //int SADWindowSize
                                    60,    //int P1 = 0
                                    2400,   //int P2 = 0
                                    90,     //int disp12MaxDiff = 0
                                    16,     //int preFilterCap = 0
                                    1,      //int uniquenessRatio = 0
                                    60,    //int speckleWindowSize = 0
                                    20,     //int speckleRange = 0
                                    true);  //bool fullDP = false


 //-- Check its extreme values
 double minVal; double maxVal;
 double max_dist = 0;
 double min_dist = 100;
 int minHessian = 630;
 Ptr<Feature2D> f2d = SIFT::create();
 vector<KeyPoint> keypoints_1, keypoints_2;
 Ptr<Feature2D> fd = SIFT::create();
 Mat descriptors_1, descriptors_2;
 BFMatcher matcher(NORM_L2, true);   //BFMatcher matcher(NORM_L2);
 vector< DMatch > matches;
 vector< DMatch > good_matches;
 vector<Point2f>imgpts1,imgpts2;
 vector<uchar> status;
while(true) 
{
    //grab and retrieve each frames of the video sequentially 
    camera0 >> frame0;
    camera1 >> frame1;
    imshow("Video0", frame0);
    imshow("Video1", frame1);

    cvtColor(frame0,frame0gray,CV_BGR2GRAY);
    cvtColor(frame1,frame1gray,CV_BGR2GRAY); 

    sbm->compute( frame0gray, frame1gray, dispbm );
    minMaxLoc( dispbm, &minVal, &maxVal );
    dispbm.convertTo( dispnorm_bm, CV_8UC1, 255/(maxVal - minVal));

    sgbm->compute(frame0gray, frame1gray, dispsgbm);
    minMaxLoc( dispsgbm, &minVal, &maxVal );
    dispsgbm.convertTo( dispnorm_sgbm, CV_8UC1, 255/(maxVal - minVal));
    applyColorMap(dispnorm_bm, falseColorsMap, cv::COLORMAP_JET);
    applyColorMap(dispnorm_sgbm, sfalseColorsMap, cv::COLORMAP_JET);

    f2d->detect( frame0gray, keypoints_1 );
    f2d->detect( frame1gray, keypoints_2 );

    //-- Step 2: Calculate descriptors (feature vectors)
    fd->compute( frame0gray, keypoints_1, descriptors_1 );
    fd->compute( frame1gray, keypoints_2, descriptors_2 );

    //-- Step 3: Matching descriptor vectors with a brute force matcher

    matcher.match( descriptors_1, descriptors_2, matches );
    drawMatches(frame0gray, keypoints_1, frame1gray, keypoints_2, matches, img_matches);
    imshow("matches", img_matches);

    //-- Quick calculation of max and min distances between keypoints
    for( int i = 0; i < matches.size(); i++ )
    { double dist = matches[i].distance;
      if( dist < min_dist ) min_dist = dist;
      if( dist > max_dist ) max_dist = dist;
    } 

   for( int i = 0; i < matches.size(); i++ )
   {
     if( matches[i].distance <= max(4.5*min_dist, 0.02) ){
         good_matches.push_back( matches[i]);
         imgpts1.push_back(keypoints_1[matches[i].queryIdx].pt);
         imgpts2.push_back(keypoints_2[matches[i].trainIdx].pt);
      }

   }

   Mat F = findFundamentalMat(imgpts1, imgpts2, cv::FM_RANSAC, 3., 0.99, status);   //FM_RANSAC
   stereoRectifyUncalibrated(imgpts1, imgpts1, F, frame0gray.size(), H1, H2);
   Mat rectified1(frame0gray.size(), frame0gray.type());
   warpPerspective(frame0gray, rectified1, H1, frame0gray.size());

   Mat rectified2(frame1gray.size(), frame1gray.type());
   warpPerspective(frame1gray, rectified2, H2, frame1gray.size());

   sgbm->compute(rectified1, rectified2, disparity);
   minMaxLoc( disparity, &minVal, &maxVal );
   disparity.convertTo( disparity1, CV_8UC1, 255/(maxVal - minVal));
   applyColorMap(disparity1, falsemap, cv::COLORMAP_JET);
   imshow("disparity_rectified_color", falsemap);
   imshow( "BM", falseColorsMap);
   imshow( "CSGBM",sfalseColorsMap);

   //wait for 40 milliseconds
    int c = cvWaitKey(40);

    //exit the loop if user press "Esc" key  (ASCII value of "Esc" is 27) 
    if(27 == char(c)) break;
}

return 0;

}
Run Code Online (Sandbox Code Playgroud)

现在的输出再次不是那么好,但比上次有所改进。然而,似乎存在一个持续存在的问题,如上图所示。输出图像的左侧有一个全黑区域。事情不应该是这样的。如何解决这个问题呢?任何帮助表示赞赏。