Skip to content

Instantly share code, notes, and snippets.

@gwaldron
Last active November 27, 2023 19:46
Show Gist options
  • Save gwaldron/d8a37fdda8b711892eedb5b2779c0cac to your computer and use it in GitHub Desktop.
Save gwaldron/d8a37fdda8b711892eedb5b2779c0cac to your computer and use it in GitHub Desktop.
WebGPU Hello World
<html>
<head>
<title>WebGPU Hello World</title>
<script>
async function main() {
const adapter = await window.navigator.gpu.requestAdapter();
const device = await adapter.requestDevice();
canvas = document.getElementById("gpu_canvas");
context = canvas.getContext("webgpu");
if (context === null) {
console.log("Canvas context not found - does your browser support WebGPU?");
return;
}
// connect the context to the rendering device
const presentation_format = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device: device,
format: presentation_format,
alphaMode: "opaque"
});
// shape data we will render.
const verts = new Float32Array([
-0.9, 0.9, 0,
0.9, 0.9, 0,
0.9, -0.9, 0,
-0.9, -0.9, 0
]);
const num_verts = verts.length / 3;
const vertex_buffer = device.createBuffer({
size: verts.byteLength,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST
});
device.queue.writeBuffer(vertex_buffer, 0, verts);
const indices = new Uint32Array([ 0, 1, 2, 0, 2, 3]);
const num_indices = indices.length;
const index_buffer = device.createBuffer({
size: indices.byteLength,
usage: GPUBufferUsage.INDEX | GPUBufferUsage.COPY_DST
});
device.queue.writeBuffer(index_buffer, 0, indices);
// shaders, vertex and fragment
const shaders = `
struct VertIn {
@location(0) position: vec4f,
};
struct VertOut {
@builtin(position) position : vec4f,
@location(0) uv : vec2f
};
@vertex
fn vertex_main(input: VertIn,
@builtin(vertex_index) vertex_index : u32) -> VertOut
{
var output : VertOut;
let uvs = array(vec2f(0,0), vec2f(1,0), vec2f(1,1), vec2f(0,1));
output.uv = uvs[vertex_index];
output.position = input.position;
return output;
}
@group(0) @binding(0) var my_sampler : sampler;
@group(0) @binding(1) var my_texture : texture_2d<f32>;
@fragment
fn fragment_main(input: VertOut) -> @location(0) vec4f
{
var texel : vec4f;
texel = textureSample(my_texture, my_sampler, input.uv);
return texel;
}
`.trim();
const shader_module = device.createShaderModule({
code: shaders
});
// Graphics pipeline definition - this assigns any uniforms (in the bindGroupLayouts)
// and the vertex (et al) buffer layouts.
const pipeline_descriptor = {
label: "Textured quad pipeline",
layout: "auto",
vertex: {
module: shader_module,
entryPoint: "vertex_main",
buffers: [ { // vertex data; add colors, normals, etc. here
arrayStride: 12,
attributes: [ {
shaderLocation: 0,
format: "float32x3",
offset: 0
} ]
} ]
},
fragment: {
module: shader_module,
entryPoint: "fragment_main",
targets: [ {
format: presentation_format,
blend: {
color: { srcFactor: "src-alpha", dstFactor: "one-minus-src-alpha" },
alpha: { }
}
} ]
}
};
const pipeline = device.createRenderPipeline(pipeline_descriptor);
// A texture and sampler.
const response = await fetch("icon.png");
const blob = await response.blob();
const bitmap = await createImageBitmap(blob);
console.log(" width=" + bitmap.width + " height=" + bitmap.height);
const texture_descriptor = {
size: { width: bitmap.width, height: bitmap.height },
format: "rgba8unorm",
usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT
};
const texture = device.createTexture(texture_descriptor);
device.queue.copyExternalImageToTexture(
{ source: bitmap },
{ texture: texture },
texture_descriptor.size );
const sampler = device.createSampler();
const texture_bind_group = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: sampler },
{ binding: 1, resource: texture.createView() }
]
});
// Sets up the render pass, which will record draw commands.
const render_pass_descriptor = {
colorAttachments: [{
clearValue: { r:0.3, g:0.3, b:0.4, a:1.0 },
loadOp: 'clear',
storeOp: 'store'
}]
};
// Main render loop.
requestAnimationFrame(function draw() {
// We must allocation a new view texture each frame.
render_pass_descriptor.colorAttachments[0].view = context.getCurrentTexture().createView();
// A command encoder records draw commands per render pass.
const command_encoder = device.createCommandEncoder();
// One render pass to draw our shape
const pass = command_encoder.beginRenderPass(render_pass_descriptor);
pass.setPipeline(pipeline);
pass.setVertexBuffer(0, vertex_buffer, 0, verts.byteLength);
pass.setIndexBuffer(index_buffer, "uint32", 0, indices.byteLength);
pass.setBindGroup(0, texture_bind_group);
pass.drawIndexed(num_indices, 1, 0, 0, 0);
pass.end();
// Send all commands to the GPU.
device.queue.submit([command_encoder.finish()]);
// Request the next frame.
requestAnimationFrame(draw);
});
}
// Bootstrap.
window.addEventListener('load', main);
</script>
</head>
<body>
<p>Hello, world.</p>
<canvas id="gpu_canvas" width="800" height="600">
</canvas>
</body>
</html>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment