Skip to content

Instantly share code, notes, and snippets.

@greggman
Last active August 5, 2025 17:47
Show Gist options
  • Save greggman/5db89860da113f180a2c50b5ea439622 to your computer and use it in GitHub Desktop.
Save greggman/5db89860da113f180a2c50b5ea439622 to your computer and use it in GitHub Desktop.
WebGPU Texture - Generate Mipmaps Speed Test
@import url(https://webgpufundamentals.org/webgpu/resources/webgpu-lesson.css);
html, body {
margin: 0; /* remove the default margin */
height: 100%; /* make the html,body fill the page */
}
#info {
position: absolute;
left: 0;
top: 0;
background-color: black;
color: white;
margin: 0;
padding: 0.5em;
}
<canvas></canvas>
<pre id="info"></pre>
// WebGPU Simple Textured Quad - Import Canvas
// from https://webgpufundamentals.org/webgpu/webgpu-simple-textured-quad-import-canvas.html
// see https://webgpufundamentals.org/webgpu/lessons/webgpu-utils.html#wgpu-matrix
import {mat4} from 'https://webgpufundamentals.org/3rdparty/wgpu-matrix.module.js';
import GUI from 'https://webgpufundamentals.org/3rdparty/muigui-0.x.module.js';
import RollingAverage from 'https://webgpufundamentals.org/webgpu/resources/js/rolling-average.js';
async function main() {
const adapter = await navigator.gpu?.requestAdapter();
const device = await adapter?.requestDevice();
if (!device) {
fail('need a browser that supports WebGPU');
return;
}
device.addEventListener('uncapturederror', e => console.error(e.error.message));
const fpsAverage = new RollingAverage();
// Get a WebGPU context from the canvas and configure it
const canvas = document.querySelector('canvas');
const context = canvas.getContext('webgpu');
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: presentationFormat,
});
const module = device.createShaderModule({
label: 'our hardcoded textured quad shaders',
code: `
struct OurVertexShaderOutput {
@builtin(position) position: vec4f,
@location(0) texcoord: vec2f,
};
struct Uniforms {
matrix: mat4x4f,
};
@group(0) @binding(2) var<uniform> uni: Uniforms;
@vertex fn vs(
@builtin(vertex_index) vertexIndex : u32
) -> OurVertexShaderOutput {
let pos = array(
vec3f(-0.5, 0.5,-0.5),
vec3f( 0.5, 0.5,-0.5),
vec3f(-0.5, 0.5, 0.5),
vec3f(-0.5, 0.5, 0.5),
vec3f( 0.5, 0.5,-0.5),
vec3f( 0.5, 0.5, 0.5),
);
var vsOutput: OurVertexShaderOutput;
let xyz = pos[vertexIndex];
vsOutput.position = uni.matrix * vec4f(xyz, 1.0);
vsOutput.texcoord = (xyz.xz + 0.5) * vec2f(1, 50);
return vsOutput;
}
@group(0) @binding(0) var ourSampler: sampler;
@group(0) @binding(1) var ourTexture: texture_2d<f32>;
@fragment fn fs(fsInput: OurVertexShaderOutput) -> @location(0) vec4f {
return textureSample(ourTexture, ourSampler, fsInput.texcoord);
}
`,
});
const pipeline = device.createRenderPipeline({
label: 'hardcoded textured quad pipeline',
layout: 'auto',
vertex: {
module,
},
fragment: {
module,
targets: [{ format: presentationFormat }],
},
});
const numMipLevels = (...sizes) => {
const maxSize = Math.max(...sizes);
return 1 + Math.log2(maxSize) | 0;
};
function copySourceToTexture(device, texture, source, {flipY} = {}) {
device.queue.copyExternalImageToTexture(
{ source, flipY, },
{ texture },
{ width: source.width, height: source.height },
);
//if (texture.mipLevelCount > 1) {
// generateMips(device, texture);
//}
}
const generateMips = (() => {
let sampler;
let module;
const pipelineByFormat = {};
return function generateMips(device, texture) {
if (!module) {
module = device.createShaderModule({
label: 'textured quad shaders for mip level generation',
code: `
struct VSOutput {
@builtin(position) position: vec4f,
@location(0) texcoord: vec2f,
};
@vertex fn vs(
@builtin(vertex_index) vertexIndex : u32
) -> VSOutput {
let pos = array(
vec2f( 0.0, 0.0), // center
vec2f( 1.0, 0.0), // right, center
vec2f( 0.0, 1.0), // center, top
// 2st triangle
vec2f( 0.0, 1.0), // center, top
vec2f( 1.0, 0.0), // right, center
vec2f( 1.0, 1.0), // right, top
);
var vsOutput: VSOutput;
let xy = pos[vertexIndex];
vsOutput.position = vec4f(xy * 2.0 - 1.0, 0.0, 1.0);
vsOutput.texcoord = vec2f(xy.x, 1.0 - xy.y);
return vsOutput;
}
@group(0) @binding(0) var ourSampler: sampler;
@group(0) @binding(1) var ourTexture: texture_2d<f32>;
@fragment fn fs(fsInput: VSOutput) -> @location(0) vec4f {
return textureSample(ourTexture, ourSampler, fsInput.texcoord);
}
`,
});
sampler = device.createSampler({
minFilter: 'linear',
});
}
const encoder = device.createCommandEncoder({
label: 'mip gen encoder',
});
const format = texture.format;
if (!pipelineByFormat[format]) {
pipelineByFormat[format] = device.createRenderPipeline({
label: 'mip level generator pipeline',
layout: 'auto',
vertex: {
module,
},
fragment: {
module,
targets: [{ format }],
},
});
}
const pipeline = pipelineByFormat[format];
for (let baseMipLevel = 1; baseMipLevel < texture.mipLevelCount; ++baseMipLevel) {
const bindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: sampler },
{ binding: 1, resource: texture.createView({baseMipLevel: baseMipLevel - 1, mipLevelCount: 1}) },
],
});
const renderPassDescriptor = {
label: 'our basic canvas renderPass',
colorAttachments: [
{
view: texture.createView({baseMipLevel, mipLevelCount: 1}),
loadOp: 'clear',
storeOp: 'store',
},
],
};
const pass = encoder.beginRenderPass(renderPassDescriptor);
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.draw(6); // call our vertex shader 6 times
pass.end();
}
const commandBuffer = encoder.finish();
device.queue.submit([commandBuffer]);
};
})();
const size = 2048;
const half = size / 2;
const texture = device.createTexture({
size: [size, size],
format: 'rgba8unorm',
usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
});
const view = texture.createView();
// offsets to the various uniform values in float32 indices
const kMatrixOffset = 0;
const sampler = device.createSampler();
// create a buffer for the uniform values
const uniformBufferSize =
16 * 4; // matrix is 16 32bit floats (4bytes each)
const uniformBuffer = device.createBuffer({
label: 'uniforms for quad',
size: uniformBufferSize,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
// create a typedarray to hold the values for the uniforms in JavaScript
const uniformValues = new Float32Array(uniformBufferSize / 4);
const matrix = uniformValues.subarray(kMatrixOffset, 16);
const bindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: sampler },
{ binding: 1, resource: texture.createView() },
{ binding: 2, resource: { buffer: uniformBuffer }},
],
});
const renderPassDescriptor = {
label: 'our basic canvas renderPass',
colorAttachments: [
{
// view: <- to be filled out when we render
clearValue: [0.3, 0.3, 0.3, 1],
loadOp: 'clear',
storeOp: 'store',
},
],
};
let then = 0;
const info = document.querySelector('#info');
const settings = {
count: 1,
};
const gui = new GUI();
gui.add(settings, 'count', 1, 1000, 1);
function render(time) {
const deltaTime = time - then;
then = time;
fpsAverage.addSample(deltaTime);
info.textContent = `fps: ${(1000 / fpsAverage.get()).toFixed(0)}`;
// Get the current texture from the canvas context and
// set it as the texture to render to.
const canvasTexture = context.getCurrentTexture();
renderPassDescriptor.colorAttachments[0].view = canvasTexture.createView();
//renderPassDescriptor.colorAttachments[0].view = texture.createView();
renderPassDescriptor.colorAttachments[0].clearValue[0] = time % 1;
for (let i = 0; i < settings.count; ++i) {
{
const encoder = device.createCommandEncoder({});
renderPassDescriptor.colorAttachments[0].view = view;
const pass = encoder.beginRenderPass(renderPassDescriptor);
pass.end();
const commandBuffer = encoder.finish();
device.queue.submit([commandBuffer]);
}
generateMips(device, texture);
{
const encoder = device.createCommandEncoder({});
renderPassDescriptor.colorAttachments[0].view = canvasTexture.createView();
const pass = encoder.beginRenderPass(renderPassDescriptor);
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.draw(6); // call our vertex shader 6 times
pass.end();
const commandBuffer = encoder.finish();
device.queue.submit([commandBuffer]);
}
}
requestAnimationFrame(render);
}
requestAnimationFrame(render);
const observer = new ResizeObserver(entries => {
for (const entry of entries) {
const canvas = entry.target;
const width = entry.contentBoxSize[0].inlineSize;
const height = entry.contentBoxSize[0].blockSize;
canvas.width = Math.max(1, Math.min(width, device.limits.maxTextureDimension2D));
canvas.height = Math.max(1, Math.min(height, device.limits.maxTextureDimension2D));
}
});
observer.observe(canvas);
canvas.addEventListener('click', () => {
texNdx = (texNdx + 1) % textures.length;
});
}
function fail(msg) {
// eslint-disable-next-line no-alert
alert(msg);
}
main();
{"name":"WebGPU Texture - Generate Mipmaps Speed Test","settings":{},"filenames":["index.html","index.css","index.js"]}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment