OpenCV / Python:用于实时面部识别的多线程

Sim*_*mon 4 python multithreading opencv face-detection dlib

我正在使用 OpenCv 和 Dlib 执行带有地标的面部识别,实时来自网络摄像头流。语言是Python。它在我的 macbook 笔记本电脑上运行良好,但我需要它在台式计算机上 24/7 全天候运行。计算机是运行 Debian Jessie 的 PC Intel® Core™2 Quad CPU Q6600 @ 2.40GHz 32bit。性能下降是剧烈的:由于处理有 10 秒的延迟!

因此,我研究了多线程以获得性能:

  1. 我首先尝试了 OpenCv 的示例代码,结果很棒!四个核心都达到了100%,性能要好得多。
  2. 然后我用我的代码替换了帧处理代码,它根本没有提高性能!只有一个核心达到 100%,其他核心保持在很低的水平。我什至认为启用多线程会更糟。

我从 dlib 示例代码中得到了面部地标代码。我知道它可能可以优化,但我想了解为什么我不能通过多线程使用我的(旧)计算机的全部功能?

我会把我的代码放在下面,非常感谢阅读:)

from __future__ import print_function

import numpy as np
import cv2
import dlib

from multiprocessing.pool import ThreadPool
from collections import deque

from common import clock, draw_str, StatValue
import video

class DummyTask:
    def __init__(self, data):
        self.data = data
    def ready(self):
        return True
    def get(self):
        return self.data

if __name__ == '__main__':
    import sys

    print(__doc__)

    try:
        fn = sys.argv[1]
    except:
        fn = 0
    cap = video.create_capture(fn)
    
    #Face detector
    detector = dlib.get_frontal_face_detector()

    #Landmarks shape predictor 
    predictor = dlib.shape_predictor("landmarks/shape_predictor_68_face_landmarks.dat")

    # This is where the facial detection takes place
    def process_frame(frame, t0, detector, predictor):
        # some intensive computation...
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
        clahe_image = clahe.apply(gray)
        detections = detector(clahe_image, 1)
        for k,d in enumerate(detections): 
            shape = predictor(clahe_image, d) 
            for i in range(1,68): #There are 68 landmark points on each face
               cv2.circle(frame, (shape.part(i).x, shape.part(i).y), 1, (0,0,255), thickness=2)
        return frame, t0

    threadn = cv2.getNumberOfCPUs()
    pool = ThreadPool(processes = threadn)
    pending = deque()

    threaded_mode = True

    latency = StatValue()
    frame_interval = StatValue()
    last_frame_time = clock()
    while True:
        while len(pending) > 0 and pending[0].ready():
            res, t0 = pending.popleft().get()
            latency.update(clock() - t0)
            draw_str(res, (20, 20), "threaded      :  " + str(threaded_mode))
            draw_str(res, (20, 40), "latency        :  %.1f ms" % (latency.value*1000))
            draw_str(res, (20, 60), "frame interval :  %.1f ms" % (frame_interval.value*1000))
            cv2.imshow('threaded video', res)
        if len(pending) < threadn:
            ret, frame = cap.read()
            t = clock()
            frame_interval.update(t - last_frame_time)
            last_frame_time = t
            if threaded_mode:
                task = pool.apply_async(process_frame, (frame.copy(), t, detector, predictor))
            else:
                task = DummyTask(process_frame(frame, t, detector, predictor))
            pending.append(task)
        ch = cv2.waitKey(1)
        if ch == ord(' '):
            threaded_mode = not threaded_mode
        if ch == 27:
            break
cv2.destroyAllWindows()
Run Code Online (Sandbox Code Playgroud)

Sim*_*mon 6

性能问题是由于 dlib 的错误编译造成的。与正确编译相比,不要使用 pip install dlibwhich 出于某种原因运行非常缓慢。我以这种方式从将近 10 秒的延迟变为大约 2 秒。所以最后我不需要多线程/处理,但我正在努力提高速度。谢谢您的帮助 :)