在Python中训练OpenCV NormalBayesClassifier

Ale*_*son 1 python opencv

我正在尝试使用NormalBayesClassifier对Foscam 9821W网络摄像头生成的图像进行分类.它们是1280x720,最初是彩色的,但我将它们转换为灰度分类.

我有一些Python代码(在http://pastebin.com/YxYWRMGs上)试图迭代一组火腿/垃圾邮件图像来训练分类器,但每当我调用train()时,OpenCV会尝试分配大量的内存并引发异常.

mock@behemoth:~/OpenFos/code/experiments$ ./cvbayes.py --ham=../training/ham --spam=../training/spam
Image is a <type 'numpy.ndarray'> (720, 1280)
...
*** trying to train with 8 images
responses is [2, 2, 2, 2, 2, 2, 1, 1]
OpenCV Error: Insufficient memory (Failed to allocate 6794772480020 bytes) in OutOfMemoryError, file /build/buildd/opencv-2.3.1/modules/core/src/alloc.cpp, line 52
Traceback (most recent call last):
  File "./cvbayes.py", line 124, in <module>
    classifier = cw.train()
  File "./cvbayes.py", line 113, in train
    classifier.train(matrixData,matrixResp)
cv2.error: /build/buildd/opencv-2.3.1/modules/core/src/alloc.cpp:52: error: (-4) Failed to allocate 6794772480020 bytes in function OutOfMemoryError
Run Code Online (Sandbox Code Playgroud)

我对Python很有经验,但在OpenCV上是一个新手,所以我怀疑我错过了一些关键的预处理.

我想要使​​用的图像示例位于https://mocko.org.uk/choowoos/?m=20130515.我有大量的训练数据,但最初我只使用了8张图像.

有人可以告诉我,我做错了让NormalBayesClassifier爆炸吗?

Ale*_*son 6

最终发现了问题 - 我使用的是NormalBayesClassifier错误.它并不意味着直接输入数十个高清图像:首先应该使用OpenCV的其他算法来处理它们.

我最终执行了以下操作:+将图像裁剪到可能包含对象的区域+将图像转换为灰度+使用cv2.goodFeaturesToTrack()从裁剪区域收集要素以训练分类器.

很少的功能对我有用,也许是因为我已经将图像向右裁剪并且它足够幸运地包含高对比度的对象,这些对象在一个类中被遮挡.

以下代码可以使95%的人口正确:

#!/usr/bin/env python
# -*- coding: utf-8 -*- 


import cv2
import sys, os.path, getopt
import numpy, random




def _usage():

    print
    print "cvbayes trainer"
    print
    print "Options:"
    print
    print "-m    --ham=     path to dir of ham images"
    print "-s    --spam=    path to dir of spam images"
    print "-h    --help     this help text"
    print "-v    --verbose  lots more output"
    print



def _parseOpts(argv):

    """
    Turn options + args into a dict of config we'll follow.  Merge in default conf.
    """

    try:
        opts, args = getopt.getopt(argv[1:], "hm:s:v", ["help", "ham=", 'spam=', 'verbose'])
    except getopt.GetoptError as err:
        print(err) # will print something like "option -a not recognized"
        _usage()
        sys.exit(2)

    optsDict = {}

    for o, a in opts:
        if o == "-v":
            optsDict['verbose'] = True
        elif o in ("-h", "--help"):
            _usage()
            sys.exit()
        elif o in ("-m", "--ham"):
            optsDict['ham'] = a
        elif o in ('-s', '--spam'):
            optsDict['spam'] = a
        else:
            assert False, "unhandled option"

    for mandatory_arg in ('ham', 'spam'):
        if mandatory_arg not in optsDict:
            print "Mandatory argument '%s' was missing; cannot continue" % mandatory_arg
            sys.exit(0)

    return optsDict     




class ClassifierWrapper(object):

    """
    Setup and encapsulate a naive bayes classifier based on OpenCV's 
    NormalBayesClassifier.  Presently we do not use it intelligently,
    instead feeding in flattened arrays of B&W pixels.
    """

    def __init__(self):
        super(ClassifierWrapper,self).__init__()
        self.classifier     = cv2.NormalBayesClassifier()
        self.data           = []
        self.responses      = []


    def _load_image_features(self, f):
        image_colour    = cv2.imread(f)
        image_crop      = image_colour[327:390, 784:926]        # Use the junction boxes, luke
        image_grey      = cv2.cvtColor(image_crop, cv2.COLOR_BGR2GRAY)
    features        = cv2.goodFeaturesToTrack(image_grey, 4, 0.02, 3)
        return features.flatten()


    def train_from_file(self, f, cl):
        features    = self._load_image_features(f)
        self.data.append(features)
        self.responses.append(cl)


    def train(self, update=False):
        matrix_data     = numpy.matrix( self.data ).astype('float32')
        matrix_resp     = numpy.matrix( self.responses ).astype('float32')
        self.classifier.train(matrix_data, matrix_resp, update=update)
        self.data       = []
        self.responses  = []


    def predict_from_file(self, f):
        features    = self._load_image_features(f)
        features_matrix = numpy.matrix( [ features ] ).astype('float32')
        retval, results = self.classifier.predict( features_matrix )
        return results




if __name__ == "__main__":

    opts = _parseOpts(sys.argv)

    cw = ClassifierWrapper()

    ham     = os.listdir(opts['ham'])
    spam    = os.listdir(opts['spam'])
    n_training_samples  = min( [len(ham),len(spam)])
    print "Will train on %d samples for equal sets" % n_training_samples

    for f in random.sample(ham, n_training_samples):
        img_path    = os.path.join(opts['ham'], f)
        print "ham: %s" % img_path
        cw.train_from_file(img_path, 2)

    for f in random.sample(spam, n_training_samples):
        img_path    = os.path.join(opts['spam'], f)
        print "spam: %s" % img_path
        cw.train_from_file(img_path, 1)

    cw.train()

    print
    print

    # spam dir much bigger so mostly unused, let's try predict() on all of it
    print "predicting on all spam..."
    n_wrong = 0
    n_files = len(os.listdir(opts['spam']))
    for f in os.listdir(opts['spam']):
        img_path    = os.path.join(opts['spam'], f)
        result = cw.predict_from_file(img_path)
        print "%s\t%s" % (result, img_path)
        if result[0][0] == 2:
            n_wrong += 1

    print
    print "got %d of %d wrong = %.1f%%" % (n_wrong, n_files, float(n_wrong)/n_files * 100, )
Run Code Online (Sandbox Code Playgroud)

现在我正在使用垃圾邮件的随机子集进行训练,仅仅因为它有更多的垃圾邮件,你应该为每个班级提供大致相同数量的培训数据.通过更好的策划数据(例如,当光线不同时,总是包括黎明和黄昏的样本),它可能会更高.

也许甚至NormalBayesClassifier都是错误的工具,我应该尝试跨越连续帧的运动检测 - 但至少互联网有一个例子可以分开.