我正在尝试将YCbCr 文件 从 8 bpp 转换为 10 bpp。
到目前为止,我最好的方法仍然比最基本的 C 实现慢几个数量级。
C 中的朴素方法,运行时间约为 8 秒。相反,让代码在块上运行,可以将时间大大缩短到 1 秒以下。
我很好奇处理二进制文件的标准 python 可以获得什么样的性能。示例文件采用CIF 分辨率,与 1080p 的内容相比“小”。尽管我主要对标准 python 感兴趣,但也可以随意添加 numpy-suggestions。
测试文件可以从以下位置下载
http://trace.eas.asu.edu/yuv/foreman/foreman_cif.7z
Run Code Online (Sandbox Code Playgroud)
sha1sum正确的 10 位输出是
c511dabc793383f7fd0ed69b4bb9b9f89ef73b84
Run Code Online (Sandbox Code Playgroud)
Python:
#!/usr/bin/env python
import array
f_in = 'foreman_cif.yuv'
f_out = 'py_10bpp.yuv'
def bytesfromfile(f):
while True:
raw = array.array('B')
raw.fromstring(f.read(8192))
if not raw:
break
yield raw
with open(f_in, 'rb') as fd_in, \
open(f_out, 'wb') as fd_out:
for byte in bytesfromfile(fd_in):
data = []
for i in byte:
i <<= 2
data.append(i & 0xff)
data.append((i >> 8) & 0xff)
fd_out.write(array.array('B', data).tostring())
Run Code Online (Sandbox Code Playgroud)
天真的C-dito:
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char** argv)
{
int c;
int d[2];
FILE* fd_in;
FILE* fd_out;
fd_in = fopen("foreman_cif.yuv", "rb");
fd_out = fopen("c_10bpp.yuv", "wb");
while((c = fgetc(fd_in)) != EOF) {
c <<= 2;
d[0] = c & 0xff;
d[1] = (c >> 8) & 0xff;
fwrite(&d[0], 1, 1, fd_out);
fwrite(&d[1], 1, 1, fd_out);
}
fclose(fd_in);
fclose(fd_out);
return EXIT_SUCCESS;
}
Run Code Online (Sandbox Code Playgroud)
问题中的代码25在我的机器上需要几秒钟,numpy--0.37几秒钟:
import numpy as np
a_in = np.memmap('foreman_cif.yuv', mode='readonly')
a_out = np.memmap('py_10bpp.yuv', mode='write', shape=2*len(a_in))
a_out[::2] = a_in << 2
a_out[1::2] = a_in >> 6
Run Code Online (Sandbox Code Playgroud)
cython--0.20秒:
from functools import partial
import pyximport; pyximport.install() # pip install cython
from bpp8to10 import convert # bpp8to10.pyx
f_in = 'foreman_cif.yuv'
f_out = 'py_10bpp.yuv'
def main():
with open(f_in, 'rb') as fd_in, open(f_out, 'wb') as fd_out:
for chunk in iter(partial(fd_in.read, 8192), b''):
fd_out.write(convert(chunk))
main()
Run Code Online (Sandbox Code Playgroud)
在哪里bpp8to10.pyx:
from cpython.bytes cimport PyBytes_FromStringAndSize
def convert(bytes chunk not None):
cdef:
bytes data = PyBytes_FromStringAndSize(NULL, len(chunk)*2)
char* buf = data # no copy
Py_ssize_t j = 0
unsigned char c
for c in chunk:
buf[j] = (c << 2)
buf[j + 1] = (c >> 6)
j += 2
return data
Run Code Online (Sandbox Code Playgroud)
纯 CPython 版本的主要加速是将代码从模块级别移动到函数 ( main()) -6.7秒(2 个 CPU):
from functools import partial
from multiprocessing import Pool
f_in = 'foreman_cif.yuv'
f_out = 'py_10bpp.yuv'
def convert(chunk):
data = bytearray() # [] -> bytearray(): 17 -> 15 seconds
data_append = data.append # 15 -> 12 seconds
for b in bytearray(chunk): # on Python 3: `for b in chunk:`
data_append((b << 2) & 0xff)
data_append((b >> 8) & 0xff)
return data
def main(): # put in main(): # 25 -> 17 seconds
pool = Pool(processes=2) # 12 -> 6.7 seconds
with open(f_in, 'rb') as fd_in, open(f_out, 'wb') as fd_out:
for data in pool.imap(convert, iter(partial(fd_in.read, 8192), b'')):
fd_out.write(data)
main()
Run Code Online (Sandbox Code Playgroud)
pypy--1.6秒:
f_in = 'foreman_cif.yuv'
f_out = 'py_10bpp.yuv'
def convert(chunk):
data = bytearray() # 1.6 -> 1.5 seconds for preallocated data
for b in bytearray(chunk):
data.append((b << 2) & 0xff)
data.append((b >> 6) & 0xff)
return data
with open(f_in, 'rb') as fd_in, open(f_out, 'wb') as fd_out:
while True:
chunk = fd_in.read(8192)
if not chunk:
break
fd_out.write(convert(chunk))
Run Code Online (Sandbox Code Playgroud)
| 归档时间: |
|
| 查看次数: |
1590 次 |
| 最近记录: |