OpenCV根据像素值调整大小和裁剪图像

cms*_*msl 0 c opencv image-processing

#include "iostream"
#include "cv.h"
#include "highgui.h"
#include "cvaux.h"
#include "cxmisc.h"
#include "math.h"

using namespace cv;
using namespace std;

int main(){

int height, width, x, y, i, minX, minY, maxX, maxY;
char imgFileName[100];


IplImage *origImage = cvLoadImage("BaybayinMark/b9.jpg", -1);
height = origImage->height;
width = origImage->width;

IplImage *grayImage = cvCreateImage(cvSize(width, height), 8, 1);
IplImage *binImage = cvCreateImage(cvSize(width, height), 8, 1);


//Pre-processing phase


cvCvtColor(origImage, grayImage, CV_BGR2GRAY);
cvDilate(grayImage, grayImage, NULL, 1);
cvSmooth(grayImage, grayImage, CV_GAUSSIAN, 21, 21, 0, 0);
cvThreshold(grayImage, binImage, 120, 255, CV_THRESH_BINARY);
cvNormalize(binImage,binImage,0,1,CV_MINMAX);

minX = width;
minY = height;
maxX = 0;
maxY = 0;


CvScalar s;


for (x=0; x<width-1; x++){
for(y=0; y<height-1; y++){
    s = cvGet2D(binImage, y, x);
    //printf("%f\n", s.val[0]);
    if (s.val[0] == 1){
        //printf("HELLO");
        minX = min(minX, x);
        minY = min(minY, y);
        maxX = max(maxX, x);
        maxY = max(maxY, y);

    }   
}
}

cvSetImageROI(binImage, cvRect(minX, minY, maxX-minX, maxY-minY));

IplImage *cropImage = cvCreateImage(cvGetSize(binImage), 8, 1);

cvCopy(binImage, cropImage, NULL);

cvSaveImage("crop/cropImage9.jpg", cropImage);
cvResetImageROI(binImage);

cvReleaseImage(&origImage);
cvReleaseImage(&binImage);

cvReleaseImage(&grayImage);
cvReleaseImage(&cropImage);

}
Run Code Online (Sandbox Code Playgroud)

嗨!我只是想问一下这段代码.我试图识别图像的最外边缘并根据它们裁剪图像.我跑完后所有的都是一个大小相同的黑色图像.我试图以错误的方式做到这一点吗?请告诉我,我是OpenCV的初学者.

kar*_*lip 12

在发现的抢什么 -the -赫克,是最问题,人们往往忘记一个更重要的问题:如何 -the-赫克-DO-我找到的最的问题.

随着图像处理应用中,如何可以通过回答穷人的调试器在OpenCV中,这将cvSaveImage()通过代码的调用,以能够想象什么样的每一步在做:

    //Pre-processing phase
    cvCvtColor(origImage, grayImage, CV_BGR2GRAY);
    cvSaveImage("cv_color.jpg", grayImage);

    cvDilate(grayImage, grayImage, NULL, 1);
    cvSaveImage("cv_dilate.jpg", grayImage);

    cvSmooth(grayImage, grayImage, CV_GAUSSIAN, 21, 21, 0, 0);
    cvSaveImage("cv_smooth.jpg", grayImage);

    cvThreshold(grayImage, binImage, 120, 255, CV_THRESH_BINARY);
    cvSaveImage("cv_threshold.jpg", binImage);

    cvNormalize(binImage,binImage,0,1,CV_MINMAX);
    cvSaveImage("cv_normalize.jpg", binImage);
Run Code Online (Sandbox Code Playgroud)

此代码显示,即使在自定义for循环之前,生成的图像也会变黑,并且负责的调用是cvNormalize().但这有道理吗?您正在将范围为[0..255]的像素转换为值0和1.

所以问题是,在处理结束时,当您将生成的图像保存到dislk时,您忘记将值标准化回原始范围:

    IplImage *cropImage = cvCreateImage(cvGetSize(binImage), 8, 1);
    cvCopy(binImage, cropImage, NULL);

    cvNormalize(cropImage, cropImage, 0, 255, CV_MINMAX);

    cvSaveImage("result.jpg", cropImage);
Run Code Online (Sandbox Code Playgroud)

这解决了这个问题.