在 Web Worker 中解码图像

And*_*and 3 javascript google-chrome web-worker webgl

在我们的 WebGL 应用程序中,我尝试在 Web Worker 中加载和解码纹理图像,以避免在主线程中渲染出现问题。在工作线程中使用 createImageBitmap 并将图像位图传输回主线程效果很好,但在 Chrome 中,这将使用三个或更多(可能取决于核心数量?)单独的工作线程(ThreadPoolForegroundWorker),它们与主线程和我自己的线程一起使用worker 将产生五个线程。

我猜这会导致我的四核上剩余的渲染干扰,因为我可以在 Chrome 开发工具的性能功能中看到一些莫名其妙的长时间。

那么,我可以以某种方式限制 createImageBitmap 使用的工作人员数量吗?即使我将图像作为 blob 或数组缓冲区传输到主线程并从那里激活 createImageBitmap,它的工作线程也会与我自己的工作线程和主线程竞争。

我尝试在工作人员中创建常规图像,而不是在那里显式解码它们,但是图像没有在工作人员上下文中定义,如果我想将它们创建为元素,文档也没有定义。而且常规图像也不可传输,因此在主线程上创建它们并将它们传输给工作线程似乎也不可行。

期待任何建议...

gma*_*man 6

没有理由在工作人员中使用 createImageBitmap (好吧,请参见底部)。浏览器已经在单独的线程中解码图像。在工人身上这样做不会给你带来任何好处。更大的问题是,当您最终将图像传递给 WebGL 时,ImageBitmap 无法知道您将如何使用该图像。如果您要求的格式与 ImageBitmap 解码的格式不同,那么 WebGL 必须再次转换和/或解码它,并且您无法向 ImageBitmap 提供足够的信息来告诉它您希望其解码的格式。

除此之外,Chrome 中的 WebGL 必须将图像数据从渲染进程传输到 GPU 进程,这对于大图像来说是一个相对较大的副本(RGBA 的 1024x1024 为 4meg)

在我看来,更好的 API 可以让你告诉 ImageBitmap 你想要什么格式以及你想要它的位置(CPU、GPU)。这样浏览器就可以异步准备图像,并且完成后不需要繁重的工作。

无论如何,这里有一个测试。如果您取消选中“更新纹理”,那么它仍然会下载和解码纹理,但它只是不调用gl.texImage2D上传纹理。在这种情况下,我没有看到卡顿(不能证明这是问题所在,但我认为这就是问题所在)

const m4 = twgl.m4;
const gl = document.querySelector('#webgl').getContext('webgl');
const ctx = document.querySelector('#graph').getContext('2d');

let update = true;
document.querySelector('#update').addEventListener('change', function() {
  update = this.checked;
});

const vs = `
attribute vec4 position;
uniform mat4 matrix;
varying vec2 v_texcoord;
void main() {
  gl_Position = matrix * position;
  v_texcoord = position.xy;
}
`

const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
  gl_FragColor = texture2D(tex, v_texcoord);
}
`;

const program = twgl.createProgram(gl, [vs, fs]);
const posLoc = gl.getAttribLocation(program, 'position');
const matLoc = gl.getUniformLocation(program, 'matrix');

const buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
  0, 0,
  1, 0,
  0, 1,
  0, 1,
  1, 0,
  1, 1,
]), gl.STATIC_DRAW);

gl.enableVertexAttribArray(posLoc);
gl.vertexAttribPointer(posLoc, 2, gl.FLOAT, false, 0, 0);

const tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texImage2D(
    gl.TEXTURE_2D, 0, gl.RGBA, 1, 1, 0, gl.RGBA, gl.UNSIGNED_BYTE,
    new Uint8Array([0, 0, 255, 255]));
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);

const m = m4.identity();
let frameCount = 0;
let previousTime = 0;
let imgNdx = 0;
let imgAspect = 1;

const imageUrls = [
  'https://i.imgur.com/KjUybBD.png',
  'https://i.imgur.com/AyOufBk.jpg',
  'https://i.imgur.com/UKBsvV0.jpg',
  'https://i.imgur.com/TSiyiJv.jpg',
];

async function loadNextImage() {
  const url = `${imageUrls[imgNdx]}?cachebust=${performance.now()}`;
  imgNdx = (imgNdx + 1) % imageUrls.length;
  const res = await fetch(url, {mode: 'cors'});
  const blob = await res.blob();
  const bitmap = await createImageBitmap(blob, {
    premultiplyAlpha: 'none',
    colorSpaceConversion: 'none',
  });
  if (update) {
    gl.bindTexture(gl.TEXTURE_2D, tex);
    gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, bitmap);
    imgAspect = bitmap.width / bitmap.height;
  }
  setTimeout(loadNextImage, 1000);
}
loadNextImage();

function render(currentTime) {
  const deltaTime = currentTime - previousTime;
  previousTime = currentTime;
  
  {
    const {width, height} = ctx.canvas;
    const x = frameCount % width;
    const y = 1000 / deltaTime / 60 * height / 2;
    ctx.fillStyle = frameCount % (width * 2) < width ? 'red' : 'blue';
    ctx.clearRect(x, 0, 1, height);
    ctx.fillRect(x, y, 1, height);
    ctx.clearRect(0, 0, 30, 15);
    ctx.fillText((1000 / deltaTime).toFixed(1), 2, 10);
  }

  gl.useProgram(program);
  const dispAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
  m4.scaling([1 / dispAspect, 1, 1], m);
  m4.rotateZ(m, currentTime * 0.001, m);
  m4.scale(m, [imgAspect, 1, 1], m);
  m4.translate(m, [-0.5, -0.5, 0], m);
  gl.uniformMatrix4fv(matLoc, false, m);
  gl.drawArrays(gl.TRIANGLES, 0, 6);
  
  ++frameCount;
  requestAnimationFrame(render);
}
requestAnimationFrame(render);
Run Code Online (Sandbox Code Playgroud)
canvas { border: 1px solid black; margin: 2px; }
#ui { position: absolute; }
Run Code Online (Sandbox Code Playgroud)
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<div id="ui"><input type="checkbox" id="update" checked><label for="update">Update Texture</label></div>
<canvas id="webgl"></canvas>
<canvas id="graph"></canvas>
Run Code Online (Sandbox Code Playgroud)

我很确定你可以保证没有卡顿的唯一方法是在工作人员中自己解码图像,将其作为数组缓冲区传输到主线程,然后将每帧几行上传到 WebGL gl.bufferSubData

const m4 = twgl.m4;
const gl = document.querySelector('#webgl').getContext('webgl');
const ctx = document.querySelector('#graph').getContext('2d');

const vs = `
attribute vec4 position;
uniform mat4 matrix;
varying vec2 v_texcoord;
void main() {
  gl_Position = matrix * position;
  v_texcoord = position.xy;
}
`

const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
  gl_FragColor = texture2D(tex, v_texcoord);
}
`;

const program = twgl.createProgram(gl, [vs, fs]);
const posLoc = gl.getAttribLocation(program, 'position');
const matLoc = gl.getUniformLocation(program, 'matrix');

const buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
  0, 0,
  1, 0,
  0, 1,
  0, 1,
  1, 0,
  1, 1,
]), gl.STATIC_DRAW);

gl.enableVertexAttribArray(posLoc);
gl.vertexAttribPointer(posLoc, 2, gl.FLOAT, false, 0, 0);

function createTexture(gl) {
  const tex = gl.createTexture();
  gl.bindTexture(gl.TEXTURE_2D, tex);
  gl.texImage2D(
      gl.TEXTURE_2D, 0, gl.RGBA, 1, 1, 0, gl.RGBA, gl.UNSIGNED_BYTE,
      new Uint8Array([0, 0, 255, 255]));
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
  return tex;
}

let drawingTex = createTexture(gl);
let loadingTex = createTexture(gl);

const m = m4.identity();
let frameCount = 0;
let previousTime = 0;

const workerScript = `
const ctx = new OffscreenCanvas(1, 1).getContext('2d');
let imgNdx = 0;
let imgAspect = 1;

const imageUrls = [
  'https://i.imgur.com/KjUybBD.png',
  'https://i.imgur.com/AyOufBk.jpg',
  'https://i.imgur.com/UKBsvV0.jpg',
  'https://i.imgur.com/TSiyiJv.jpg',
];

async function loadNextImage() {
  const url = \`\${imageUrls[imgNdx]}?cachebust=\${performance.now()}\`;
  imgNdx = (imgNdx + 1) % imageUrls.length;
  const res = await fetch(url, {mode: 'cors'});
  const blob = await res.blob();
  const bitmap = await createImageBitmap(blob, {
    premultiplyAlpha: 'none',
    colorSpaceConversion: 'none',
  });
  ctx.canvas.width = bitmap.width;
  ctx.canvas.height = bitmap.height;
  ctx.drawImage(bitmap, 0, 0);
  const imgData = ctx.getImageData(0, 0, ctx.canvas.width, ctx.canvas.height);
  const data = new Uint8Array(imgData.data);
  postMessage({
    width: imgData.width,
    height: imgData.height,
    data: data.buffer,
  }, [data.buffer]);
}

onmessage = loadNextImage;
`;
const blob = new Blob([workerScript], {type: 'application/javascript'});
const worker = new Worker(URL.createObjectURL(blob));
let imgAspect = 1;
worker.onmessage = async(e) => {
  const {width, height, data} = e.data;
  
  gl.bindTexture(gl.TEXTURE_2D, loadingTex);
    gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, width, height, 0, gl.RGBA, gl.UNSIGNED_BYTE, null);  
  
  const maxRows = 20;
  for (let y = 0; y < height; y += maxRows) {
    const rows = Math.min(maxRows, height - y);
    gl.bindTexture(gl.TEXTURE_2D, loadingTex);
    gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, y, width, rows, gl.RGBA, gl.UNSIGNED_BYTE, new Uint8Array(data, y * width * 4, rows * width * 4));  
    await waitRAF();
  }
  const temp = loadingTex;
  loadingTex = drawingTex;
  drawingTex = temp;
  imgAspect = width / height;
  await waitMS(1000);
  worker.postMessage('');
};
worker.postMessage('');

function waitRAF() {
  return new Promise(resolve => requestAnimationFrame(resolve));
}

function waitMS(ms = 0) {
  return new Promise(resolve => setTimeout(resolve, ms));
}

function render(currentTime) {
  const deltaTime = currentTime - previousTime;
  previousTime = currentTime;
  
  {
    const {width, height} = ctx.canvas;
    const x = frameCount % width;
    const y = 1000 / deltaTime / 60 * height / 2;
    ctx.fillStyle = frameCount % (width * 2) < width ? 'red' : 'blue';
    ctx.clearRect(x, 0, 1, height);
    ctx.fillRect(x, y, 1, height);
    ctx.clearRect(0, 0, 30, 15);
    ctx.fillText((1000 / deltaTime).toFixed(1), 2, 10);
  }

  gl.useProgram(program);
  const dispAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
  m4.scaling([1 / dispAspect, 1, 1], m);
  m4.rotateZ(m, currentTime * 0.001, m);
  m4.scale(m, [imgAspect, 1, 1], m);
  m4.translate(m, [-0.5, -0.5, 0], m);
  gl.bindTexture(gl.TEXTURE_2D, drawingTex);
  gl.uniformMatrix4fv(matLoc, false, m);
  gl.drawArrays(gl.TRIANGLES, 0, 6);
  
  ++frameCount;
  requestAnimationFrame(render);
}
requestAnimationFrame(render);
Run Code Online (Sandbox Code Playgroud)
canvas { border: 1px solid black; margin: 2px; }
Run Code Online (Sandbox Code Playgroud)
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas id="webgl"></canvas>
<canvas id="graph"></canvas>
Run Code Online (Sandbox Code Playgroud)

注意:我也不知道这是否有效。几个可怕的地方和浏览器实现定义

  1. 调整画布大小的性能问题是什么?该代码正在调整工作线程中 OffscreenCanvas 的大小。这可能是一个需要 GPU 重复执行的繁重操作。

  2. 将位图绘制到画布中的性能如何?同样,大 GPU 性能会出现问题,因为浏览器必须将图像传输到 GPU 才能将其绘制到 GPU 2D 画布中。

  3. getImageData 的性能如何?浏览器再次可能会冻结 GPU 来读取 GPU 内存以获取图像数据。

  4. 调整纹理大小可能会出现性能问题。

  5. 目前仅 Chrome 支持 OffscreenCanvas

1、2、3、5都可以通过解码jpg , png来解决图像来解决,尽管浏览器有解码图像的代码确实很糟糕,但您无法以任何有用的方式访问解码代码。

对于 4,如果这是一个问题,可以通过分配最大的图像尺寸纹理然后将较小的纹理复制到矩形区域来解决。假设这是一个问题

const m4 = twgl.m4;
const gl = document.querySelector('#webgl').getContext('webgl');
const ctx = document.querySelector('#graph').getContext('2d');

const vs = `
attribute vec4 position;
uniform mat4 matrix;
varying vec2 v_texcoord;
void main() {
  gl_Position = matrix * position;
  v_texcoord = position.xy;
}
`

const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
  gl_FragColor = texture2D(tex, v_texcoord);
}
`;

const program = twgl.createProgram(gl, [vs, fs]);
const posLoc = gl.getAttribLocation(program, 'position');
const matLoc = gl.getUniformLocation(program, 'matrix');

const buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
  0, 0,
  1, 0,
  0, 1,
  0, 1,
  1, 0,
  1, 1,
]), gl.STATIC_DRAW);

gl.enableVertexAttribArray(posLoc);
gl.vertexAttribPointer(posLoc, 2, gl.FLOAT, false, 0, 0);

function createTexture(gl) {
  const tex = gl.createTexture();
  gl.bindTexture(gl.TEXTURE_2D, tex);
  gl.texImage2D(
      gl.TEXTURE_2D, 0, gl.RGBA, 1, 1, 0, gl.RGBA, gl.UNSIGNED_BYTE,
      new Uint8Array([0, 0, 255, 255]));
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
  return tex;
}

let drawingTex = createTexture(gl);
let loadingTex = createTexture(gl);

const m = m4.identity();
let frameCount = 0;
let previousTime = 0;

const workerScript = `
importScripts(
    // from https://github.com/eugeneware/jpeg-js
    'https://greggman.github.io/doodles/js/JPG-decoder.js',
    // from https://github.com/photopea/UPNG.js
    'https://greggman.github.io/doodles/js/UPNG.js',
);

let imgNdx = 0;
let imgAspect = 1;

const imageUrls = [
  'https://i.imgur.com/KjUybBD.png',
  'https://i.imgur.com/AyOufBk.jpg',
  'https://i.imgur.com/UKBsvV0.jpg',
  'https://i.imgur.com/TSiyiJv.jpg',
];

function decodePNG(arraybuffer) {
  return UPNG.decode(arraybuffer)
}

function decodeJPG(arrayBuffer) {
  return decode(new Uint8Array(arrayBuffer), true);
}

const decoders = {
  'image/png': decodePNG,
  'image/jpeg': decodeJPG,
  'image/jpg': decodeJPG,
};

async function loadNextImage() {
  const url = \`\${imageUrls[imgNdx]}?cachebust=\${performance.now()}\`;
  imgNdx = (imgNdx + 1) % imageUrls.length;
  const res = await fetch(url, {mode: 'cors'});
  const arrayBuffer = await res.arrayBuffer();
  const type = res.headers.get('Content-Type');
  let decoder = decoders[type];
  if (!decoder) {
    console.error('unknown image type:', type);
  }
  const imgData = decoder(arrayBuffer);
  postMessage({
    width: imgData.width,
    height: imgData.height,
    arrayBuffer: imgData.data.buffer,
  }, [imgData.data.buffer]);
}

onmessage = loadNextImage;
`;
const blob = new Blob([workerScript], {type: 'application/javascript'});
const worker = new Worker(URL.createObjectURL(blob));
let imgAspect = 1;
worker.onmessage = async(e) => {
  const {width, height, arrayBuffer} = e.data;
  
  gl.bindTexture(gl.TEXTURE_2D, loadingTex);
    gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, width, height, 0, gl.RGBA, gl.UNSIGNED_BYTE, null);  
  
  const maxRows = 20;
  for (let y = 0; y < height; y += maxRows) {
    const rows = Math.min(maxRows, height - y);
    gl.bindTexture(gl.TEXTURE_2D, loadingTex);
    gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, y, width, rows, gl.RGBA, gl.UNSIGNED_BYTE, new Uint8Array(arrayBuffer, y * width * 4, rows * width * 4));  
    await waitRAF();
  }
  const temp = loadingTex;
  loadingTex = drawingTex;
  drawingTex = temp;
  imgAspect = width / height;
  await waitMS(1000);
  worker.postMessage('');
};
worker.postMessage('');

function waitRAF() {
  return new Promise(resolve => requestAnimationFrame(resolve));
}

function waitMS(ms = 0) {
  return new Promise(resolve => setTimeout(resolve, ms));
}

function render(currentTime) {
  const deltaTime = currentTime - previousTime;
  previousTime = currentTime;
  
  {
    const {width, height} = ctx.canvas;
    const x = frameCount % width;
    const y = 1000 / deltaTime / 60 * height / 2;
    ctx.fillStyle = frameCount % (width * 2) < width ? 'red' : 'blue';
    ctx.clearRect(x, 0, 1, height);
    ctx.fillRect(x, y, 1, height);
    ctx.clearRect(0, 0, 30, 15);
    ctx.fillText((1000 / deltaTime).toFixed(1), 2, 10);
  }

  gl.useProgram(program);
  const dispAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
  m4.scaling([1 / dispAspect, 1, 1], m);
  m4.rotateZ(m, currentTime * 0.001, m);
  m4.scale(m, [imgAspect, 1, 1], m);
  m4.translate(m, [-0.5, -0.5, 0], m);
  gl.bindTexture(gl.TEXTURE_2D, drawingTex);
  gl.uniformMatrix4fv(matLoc, false, m);
  gl.drawArrays(gl.TRIANGLES, 0, 6);
  
  ++frameCount;
  requestAnimationFrame(render);
}
requestAnimationFrame(render);
Run Code Online (Sandbox Code Playgroud)
canvas { border: 1px solid black; margin: 2px; }
Run Code Online (Sandbox Code Playgroud)
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas id="webgl"></canvas>
<canvas id="graph"></canvas>
Run Code Online (Sandbox Code Playgroud)

请注意 jpeg 解码器速度很慢。如果您发现或制作更快,请发表评论


更新

我只想说ImageBitmap 应该足够快,而且我上面关于没有足够信息的一些评论可能并不完全正确。

我目前的理解就是ImageBitmap让上传速度更快。它应该通过给它一个 blob 并异步将该图像加载到 GPU 中来工作。当您调用texImage2D它时,浏览器可以将该图像“blit”(使用 GPU 渲染)到您的纹理中。我不知道为什么第一个示例中会出现卡顿,但我每隔 6 个左右的图像就会看到卡顿。

另一方面,虽然将图像上传到 GPU 是重点ImageBitmap,但浏览器不需要上传到 GPU。ImageBitmap即使用户没有 GPU,仍然应该可以工作。关键是由浏览器决定如何实现该功能,以及它是快是慢还是无卡顿完全取决于浏览器。