Mut*_*onk 6 python pyqt qml python-3.x pyqt5
我正在使用 PyQT5。我想使用 QQuickPaintedItem 将帧从 Opencv 发送到 QML。我在这里写了一个示例实现。我似乎无法找到为什么仅在加载应用程序时才调用一次绘制事件。它仅将一帧从相机绘制到 QML 组件,并且self.update()未调用绘制事件。
from OpenGL import GL
from PyQt5.QtQuick import QQuickPaintedItem, QQuickView
from PyQt5.QtGui import QPainter, QPixmap, QImage
from PyQt5.QtQml import qmlRegisterType
import sys
from PyQt5.QtGui import QColor
from PyQt5.QtCore import QUrl,QObject,pyqtSignal
import cv2.cv2 as cv2
from PyQt5.QtWidgets import QApplication
class ImageWriter(QQuickPaintedItem):
cam_frame = None
def __init__(self, *args, **kwargs):
super(ImageWriter, self).__init__(*args, **kwargs)
self.setRenderTarget(QQuickPaintedItem.FramebufferObject)
def paint(self, painter):
print(ImageWriter.cam_frame)
painter.drawPixmap(0,0,ImageWriter.cam_frame)
def update_frame(self,frame):
frame = cv2.resize(frame, (700, 500), cv2.INTER_AREA)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
frame = QImage(frame, frame.shape[1], frame.shape[0], 17)
ImageWriter.cam_frame = QPixmap.fromImage(frame)
self.update()
def get_frames(app):
cap = cv2.VideoCapture(0)
num = 0
imgw = ImageWriter()
while True:
while num != 30:
_ , bgframe = cap.read()
num += 1
_ , frame = cap.read()
imgw.update_frame(frame)
print("get frames")
app.processEvents()
if __name__ == '__main__':
app = QApplication(sys.argv)
qmlRegisterType(ImageWriter, "imageWriter", 1, 0, "ImageWriter")
view = QQuickView()
view.setSource(QUrl('test.qml'))
rootObject = view.rootObject()
view.show()
get_frames(app)
sys.exit(app.exec_())
Run Code Online (Sandbox Code Playgroud)
这是我为此编写的 QML,
import QtQuick 2.0
import imageWriter 1.0
Item {
width: 800
height: 600
ImageWriter {
id : imageWriter
width : 800
height : 600
}
}
Run Code Online (Sandbox Code Playgroud)
我完全不明白为什么self.update()不调用paint事件。我不能使用 QWidgets,我必须使用它。我在这里错过了什么吗?
问题是由 2 个 ImageWriter 对象引起的,一个是imgw = ImageWriter()在 QML 中创建的,另一个是在 QML 中创建的,您可以通过在 .qml 中的 .py 中添加打印来组合它:
*.py
def get_frames(app):
cap = cv2.VideoCapture(0)
num = 0
imgw = ImageWriter()
print("Python:", imgw)
...
Run Code Online (Sandbox Code Playgroud)
*.qml
...
Component.onCompleted: console.log("QML:", imageWriter)
...
Run Code Online (Sandbox Code Playgroud)
输出:
qml: >>>> ImageWriter(0x55bf2927e770)
Python: <__main__.ImageWriter object at 0x7fce8e4ff798>
Run Code Online (Sandbox Code Playgroud)
如您所见,有 2 个对象指向不同的内存地址,因此可能的解决方案是使用此库创建单例:
from OpenGL import GL
import sys
from PyQt5 import QtCore, QtGui, QtQml, QtQuick
import cv2
try: from PyQt5.QtCore import pyqtWrapperType
except ImportError:
from sip import wrappertype as pyqtWrapperType
class Singleton(pyqtWrapperType, type):
def __init__(cls, name, bases, dict):
super().__init__(name, bases, dict)
cls.instance=None
def __call__(cls,*args,**kw):
if cls.instance is None:
cls.instance=super().__call__(*args, **kw)
return cls.instance
class ImageWriter(QtQuick.QQuickPaintedItem, metaclass=Singleton):
def __init__(self, *args, **kwargs):
super(ImageWriter, self).__init__(*args, **kwargs)
self.setRenderTarget(QtQuick.QQuickPaintedItem.FramebufferObject)
self.cam_frame = QtGui.QImage()
def paint(self, painter):
painter.drawImage(0, 0, self.cam_frame)
def update_frame(self,frame):
frame = cv2.resize(frame, (700, 500), cv2.INTER_AREA)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
frame = QtGui.QImage(frame, frame.shape[1], frame.shape[0], 17)
self.cam_frame = frame.copy()
self.update()
def get_frames(app):
cap = cv2.VideoCapture(0)
num = 0
imgw = ImageWriter()
while True:
while num != 30:
_ , bgframe = cap.read()
num += 1
ret, frame = cap.read()
if ret:
imgw.update_frame(frame)
#print("get frames")
app.processEvents()
if __name__ == '__main__':
app = QtGui.QGuiApplication(sys.argv)
QtQml.qmlRegisterType(ImageWriter, "imageWriter", 1, 0, "ImageWriter")
view = QtQuick.QQuickView()
view.setSource(QtCore.QUrl('test.qml'))
rootObject = view.rootObject()
view.show()
get_frames(app)
sys.exit(app.exec_())
Run Code Online (Sandbox Code Playgroud)
有了上面应该工作的图像采集我认为有一个更好的方法,稍后我将展示一个更好的选择。
使用我之前的答案作为基础,我创建了一个模块,该模块使用 opencv 实现了相机的处理程序,此外还有一个查看器和一个允许添加过滤器的通用类,为此项目必须具有以下结构
??? main.py
??? main.qml
??? PyCVQML
??? cvcapture.py
??? cvitem.py
??? __init__.py
Run Code Online (Sandbox Code Playgroud)
PyCVQML/cvcapture.py
import numpy as np
import threading
import cv2
from PyQt5 import QtCore, QtGui, QtQml
gray_color_table = [QtGui.qRgb(i, i, i) for i in range(256)]
class CVAbstractFilter(QtCore.QObject):
def process_image(self, src):
dst = src
return dst
class CVCapture(QtCore.QObject):
started = QtCore.pyqtSignal()
imageReady = QtCore.pyqtSignal()
indexChanged = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(CVCapture, self).__init__(parent)
self._image = QtGui.QImage()
self._index = 0
self.m_videoCapture = cv2.VideoCapture()
self.m_timer = QtCore.QBasicTimer()
self.m_filters = []
self.m_busy = False
@QtCore.pyqtSlot()
@QtCore.pyqtSlot(int)
def start(self, *args):
if args:
self.setIndex(args[0])
self.m_videoCapture.release()
self.m_videoCapture = cv2.VideoCapture(self._index)
if self.m_videoCapture.isOpened():
self.m_timer.start(0, self)
self.started.emit()
@QtCore.pyqtSlot()
def stop(self):
self.m_timer.stop()
def timerEvent(self, e):
if e.timerId() != self.m_timer.timerId(): return
ret, frame = self.m_videoCapture.read()
if not ret:
self.m_timer.stop()
return
if not self.m_busy:
threading.Thread(target=self.process_image, args=(np.copy(frame),)).start()
@QtCore.pyqtSlot(np.ndarray)
def process_image(self, frame):
self.m_busy = True
for f in self.m_filters:
frame = f.process_image(frame)
image = CVCapture.ToQImage(frame)
self.m_busy = False
QtCore.QMetaObject.invokeMethod(self,
"setImage",
QtCore.Qt.QueuedConnection,
QtCore.Q_ARG(QtGui.QImage, image))
@staticmethod
def ToQImage(im):
if im is None:
return QtGui.QImage()
if im.dtype == np.uint8:
if len(im.shape) == 2:
qim = QtGui.QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QtGui.QImage.Format_Indexed8)
qim.setColorTable(gray_color_table)
return qim.copy()
elif len(im.shape) == 3:
if im.shape[2] == 3:
w, h, _ = im.shape
rgb_image = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
flip_image = cv2.flip(rgb_image, 1)
qim = QtGui.QImage(flip_image.data, h, w, QtGui.QImage.Format_RGB888)
return qim.copy()
return QtGui.QImage()
def image(self):
return self._image
@QtCore.pyqtSlot(QtGui.QImage)
def setImage(self, image):
if self._image == image: return
self._image = image
self.imageReady.emit()
def index(self):
return self._index
def setIndex(self, index):
if self._index == index: return
self._index = index
self.indexChanged.emit()
@QtCore.pyqtProperty(QtQml.QQmlListProperty)
def filters(self):
return QtQml.QQmlListProperty(CVAbstractFilter, self, self.m_filters)
image = QtCore.pyqtProperty(QtGui.QImage, fget=image, notify=imageReady)
index = QtCore.pyqtProperty(int, fget=index, fset=setIndex, notify=indexChanged)
Run Code Online (Sandbox Code Playgroud)
PyCVQML/cvitem.py
from PyQt5 import QtCore, QtGui, QtQuick
class CVItem(QtQuick.QQuickPaintedItem):
imageChanged = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(CVItem, self).__init__(parent)
# self.setRenderTarget(QtQuick.QQuickPaintedItem.FramebufferObject)
self.m_image = QtGui.QImage()
def paint(self, painter):
if self.m_image.isNull(): return
image = self.m_image.scaled(self.size().toSize())
painter.drawImage(QtCore.QPoint(), image)
def image(self):
return self.m_image
def setImage(self, image):
if self.m_image == image: return
self.m_image = image
self.imageChanged.emit()
self.update()
image = QtCore.pyqtProperty(QtGui.QImage, fget=image, fset=setImage, notify=imageChanged)
Run Code Online (Sandbox Code Playgroud)
PyCVQML/__init__.py
from PyQt5 import QtQml
from .cvcapture import CVCapture, CVAbstractFilter
from .cvitem import CVItem
def registerTypes(uri = "PyCVQML"):
QtQml.qmlRegisterType(CVCapture, uri, 1, 0, "CVCapture")
QtQml.qmlRegisterType(CVItem, uri, 1, 0, "CVItem")
Run Code Online (Sandbox Code Playgroud)
然后你在 中使用它main.py,我添加了 2 个示例过滤器,因为它CVCapture有过滤器属性,过滤器被传递给它,它们将按照它们建立的顺序执行。要实现新的过滤器,您必须继承CVAbstractFilter并覆盖process_image()接收图像的方法,np.ndarray并应在过滤器之后返回结果。
主文件
import cv2
import numpy as np
from PyQt5 import QtGui, QtCore, QtQuick, QtQml
import PyCVQML
def max_rgb_filter(image):
# split the image into its BGR components
(B, G, R) = cv2.split(image)
# find the maximum pixel intensity values for each
# (x, y)-coordinate,, then set all pixel values less
# than M to zero
M = np.maximum(np.maximum(R, G), B)
R[R < M] = 0
G[G < M] = 0
B[B < M] = 0
# merge the channels back together and return the image
return cv2.merge([B, G, R])
def rgb_to_gray(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return gray
class MaxRGBFilter(PyCVQML.CVAbstractFilter):
def process_image(self, src):
return max_rgb_filter(src)
class GrayFilter(PyCVQML.CVAbstractFilter):
def process_image(self, src):
return rgb_to_gray(src)
if __name__ == '__main__':
import os
import sys
app = QtGui.QGuiApplication(sys.argv)
PyCVQML.registerTypes()
QtQml.qmlRegisterType(MaxRGBFilter, "Filters", 1, 0, "MaxRGBFilter")
QtQml.qmlRegisterType(GrayFilter, "Filters", 1, 0, "GrayFilter")
view = QtQuick.QQuickView()
view.setTitle("PyCVQML Example")
dir_path = os.path.dirname(os.path.realpath(__file__)
view.setSource(QtCore.QUrl.fromLocalFile(QtCore.QDir(dir_path).absoluteFilePath("main.qml")))
view.show()
sys.exit(app.exec_())
Run Code Online (Sandbox Code Playgroud)
主文件
import QtQuick 2.0
import PyCVQML 1.0
import Filters 1.0
Item {
width: 800
height: 600
CVItem {
id: imageWriter
anchors.fill: parent
image: capture.image
}
MaxRGBFilter{
id: max_rgb_filter
}
GrayFilter{
id: gray_filter
}
CVCapture{
id: capture
index: 0
filters: [max_rgb_filter, gray_filter]
Component.onCompleted: capture.start()
}
}
Run Code Online (Sandbox Code Playgroud)
| 归档时间: |
|
| 查看次数: |
1479 次 |
| 最近记录: |