<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/vite.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Vite + TS</title>
</head>
<body>
<div id="app">
</div>
<script type="module" src="/src/main.ts"></script>
</body>
</html>
{
"name": "webgpu_learn_typescript",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "tsc && vite build",
"preview": "vite preview"
},
"devDependencies": {
"typescript": "^5.0.2",
"vite": "^4.3.2"
},
"dependencies": {
"@types/node": "^20.1.7",
"@webgpu/types": "^0.1.32",
"ts-shader-loader": "^2.0.2"
}
}
{
"compilerOptions": {
"target": "ES2020",
"useDefineForClassFields": true,
"module": "ESNext",
"lib": ["ES2020", "DOM", "DOM.Iterable"],
"skipLibCheck": true,
/* Bundler mode */
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"resolveJsonModule": true,
"isolatedModules": true,
"noEmit": true,
/* Linting */
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"noFallthroughCasesInSwitch": true,
// type
"types": ["@webgpu/types"],
// js
"allowJs": true
},
"include": ["src"]
}
/Users/song/Code/webgpu_learn/webgpu-for-beginners/webgpu_learn_typescript/src_04_渐变颜色的矩形/main.ts
async function main() {
const adapter = await navigator.gpu?.requestAdapter();
const device = await adapter?.requestDevice()!;
if (!device) {
console.log("need a browser that supports WebGPU");
return;
}
// Get a WebGPU context from the canvas and configure it
const canvas = document.createElement("canvas");
canvas.style.width = "500px";
canvas.style.height = "300px";
canvas.style.border = "1px solid red";
const context = canvas.getContext("webgpu")!;
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: presentationFormat,
});
//
const module = device.createShaderModule({
label: "our hardcoded rgb triangle shaders",
code: `
struct OurVertexShaderOutput {
@builtin(position) position: vec4f,
@location(0) color: vec4f,
};
@vertex fn vs(
@builtin(vertex_index) vertexIndex : u32
) -> OurVertexShaderOutput {
// 位置
var pos = array<vec2f, 6>(
// 第一个三角形的坐标
vec2f(0.0, 0.0), // 0
vec2f(1.0, 0.0), // 1
vec2f(1.0, 1.0), // 2
vec2f(1.0, 1.0), // 3
vec2f(0.0, 1.0), // 4
vec2f(0.0, 0.0), // 5
);
// 颜色
var color = array<vec4f, 6>(
vec4f(1, 0, 0, 1),
vec4f(1, 0, 0, 1),
vec4f(0, 1, 0, 1),
vec4f(0, 1, 0, 1),
vec4f(0, 1, 0, 1),
vec4f(1, 0, 0, 1),
);
var vsOutput: OurVertexShaderOutput;
vsOutput.position = vec4f(pos[vertexIndex], 0.0, 1.0);
// 渐变颜色的矩形
vsOutput.color = color[vertexIndex];
return vsOutput;
}
@fragment fn fs(fsInput: OurVertexShaderOutput) -> @location(0) vec4f {
return fsInput.color;
}
`,
});
const pipeline = device.createRenderPipeline({
label: "hardcoded rgb triangle pipeline",
layout: "auto",
vertex: {
module,
entryPoint: "vs",
},
fragment: {
module,
entryPoint: "fs",
targets: [{ format: presentationFormat }],
},
});
const renderPassDescriptor = {
label: "our basic canvas renderPass",
colorAttachments: [
{
// view: <- to be filled out when we render
clearValue: [1.0, 1.0, 1.0, 1],
loadOp: "clear",
storeOp: "store",
},
],
};
function render() {
// Get the current texture from the canvas context and
// set it as the texture to render to.
renderPassDescriptor.colorAttachments[0].view = context
.getCurrentTexture()
.createView();
const encoder = device.createCommandEncoder({
label: "render triangle encoder",
});
const pass = encoder.beginRenderPass(
renderPassDescriptor as GPURenderPassDescriptor
);
pass.setPipeline(pipeline);
pass.draw(6); // call our vertex shader 3 times
pass.end();
const commandBuffer = encoder.finish();
device.queue.submit([commandBuffer]);
}
render();
document.body.appendChild(canvas);
}
main();
/Users/song/Code/webgpu_learn/webgpu-for-beginners/webgpu_learn_typescript/src_02_三角形/main.ts
async function main() {
const adapter = await navigator.gpu?.requestAdapter();
const device = await adapter?.requestDevice()!;
if (!device) {
fail("need a browser that supports WebGPU");
return;
}
// Get a WebGPU context from the canvas and configure it
const canvas = document.createElement("canvas");
canvas.style.width = "500px";
canvas.style.height = "300px";
// const canvas = document.querySelector("canvas");
const context = canvas.getContext("webgpu")!;
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: presentationFormat,
});
//
const module = device.createShaderModule({
label: "our hardcoded rgb triangle shaders",
code: `
struct OurVertexShaderOutput {
@builtin(position) position: vec4f,
@location(0) color: vec4f,
};
@vertex fn vs(
@builtin(vertex_index) vertexIndex : u32
) -> OurVertexShaderOutput {
// 位置
var pos = array<vec2f, 3>(
vec2f(0.0, 0.0), // top center
vec2f(1.0, 0.0), // bottom left
vec2f(0.5, 1.0) // bottom right
);
// 颜色
var color = array<vec4f, 3>(
vec4f(1, 0, 0, 1), // red
vec4f(0, 1, 0, 1), // green
vec4f(0, 0, 1, 1), // blue
);
var vsOutput: OurVertexShaderOutput;
vsOutput.position = vec4f(pos[vertexIndex], 0.0, 1.0);
vsOutput.color = color[vertexIndex];
return vsOutput;
}
@fragment fn fs(fsInput: OurVertexShaderOutput) -> @location(0) vec4f {
return fsInput.color;
}
`,
});
const pipeline = device.createRenderPipeline({
label: "hardcoded rgb triangle pipeline",
layout: "auto",
vertex: {
module,
entryPoint: "vs",
},
fragment: {
module,
entryPoint: "fs",
targets: [{ format: presentationFormat }],
},
});
const renderPassDescriptor = {
label: "our basic canvas renderPass",
colorAttachments: [
{
// view: <- to be filled out when we render
clearValue: [0.3, 0.3, 0.3, 1],
loadOp: "clear",
storeOp: "store",
},
],
};
function render() {
// Get the current texture from the canvas context and
// set it as the texture to render to.
renderPassDescriptor.colorAttachments[0].view = context
.getCurrentTexture()
.createView();
const encoder = device.createCommandEncoder({
label: "render triangle encoder",
});
const pass = encoder.beginRenderPass(
renderPassDescriptor as GPURenderPassDescriptor
);
pass.setPipeline(pipeline);
pass.draw(3); // call our vertex shader 3 times
pass.end();
const commandBuffer = encoder.finish();
device.queue.submit([commandBuffer]);
}
const observer = new ResizeObserver((entries) => {
for (const entry of entries) {
const canvas = entry.target;
const width = entry.contentBoxSize[0].inlineSize;
const height = entry.contentBoxSize[0].blockSize;
(canvas as HTMLCanvasElement).width = Math.min(
width,
device.limits.maxTextureDimension2D
);
(canvas as HTMLCanvasElement).height = Math.min(
height,
device.limits.maxTextureDimension2D
);
// re-render
render();
}
});
observer.observe(canvas);
document.body.appendChild(canvas);
}
function fail(msg: string) {
// eslint-disable-next-line no-alert
alert(msg);
}
main();
/Users/song/Code/webgpu_learn/webgpu-for-beginners/webgpu_learn_typescript/src_06_红色三角形_郭隆帮老师/main.ts
import { vertexShader, fragmentShader } from "./shader";
async function main() {
// 获取适配器
const adapter = await navigator.gpu?.requestAdapter();
// 获取gpu设备对象
const device = await adapter?.requestDevice()!;
if (!device) {
fail("need a browser that supports WebGPU");
return;
}
// Get a WebGPU context from the canvas and configure it
// 创建canvas画布,配置gpu上下文,将该元素作为webgpu的画布
const canvas = document.createElement("canvas");
canvas.style.width = "500px";
canvas.style.height = "300px";
const context = canvas.getContext("webgpu")!;
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device, // gpu设备对象
format: presentationFormat, //gpu渲染器使用的颜色格式,比如说rgba bgra,这里默认就好
});
const vertexArray = new Float32Array([
// 三角形三个顶点坐标的x、y、z值
0.0,
0.0,
0.0, //顶点1坐标
1.0,
0.0,
0.0, //顶点2坐标
0.0,
1.0,
0.0, //顶点3坐标
]);
// 定点缓冲区
const vertexBuffer = device.createBuffer({
size: vertexArray.byteLength, //顶点数据的字节长度
//usage设置该缓冲区的用途(作为顶点缓冲区|可以写入顶点数据)
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
});
// 将顶点信息写入缓冲器
device.queue.writeBuffer(vertexBuffer, 0, vertexArray);
const pipeline = device.createRenderPipeline({
layout: "auto",
vertex: {
//顶点相关配置
buffers: [
// 顶点所有的缓冲区模块设置
{
//其中一个顶点缓冲区设置
arrayStride: 3 * 4, //一个顶点数据占用的字节长度,该缓冲区一个顶点包含xyz三个分量,每个数字是4字节浮点数,3*4字节长度
attributes: [
{
// 顶点缓冲区属性
shaderLocation: 0, //GPU显存上顶点缓冲区标记存储位置
format: "float32x3", //格式:loat32x3表示一个顶点数据包含3个32位浮点数
offset: 0, //arrayStride表示每组顶点数据间隔字节数,offset表示读取改组的偏差字节数,没特殊需要一般设置0
},
],
},
],
module: device.createShaderModule({
label: "triangle vertex",
code: vertexShader,
}),
entryPoint: "main",
},
fragment: {
module: device.createShaderModule({
label: "fragment vertex",
code: fragmentShader,
}),
entryPoint: "main", //指定入口函数
targets: [
{
format: presentationFormat, //和WebGPU上下文配置的颜色格式保持一致
},
],
},
primitive: {
topology: "triangle-list", //绘制三角形
// topology: "point-list", //绘制三角形
// topology: "line-list", //绘制三角形
},
});
// 创建GPU命令编码器对象
const commandEncoder = device.createCommandEncoder();
const renderPass = commandEncoder.beginRenderPass({
label: "our basic canvas renderPass",
// 给渲染通道指定颜色缓冲区,配置指定的缓冲区
colorAttachments: [
{
// 指向用于Canvas画布的纹理视图对象(Canvas对应的颜色缓冲区)
// 该渲染通道renderPass输出的像素数据会存储到Canvas画布对应的颜色缓冲区(纹理视图对象)
view: context.getCurrentTexture().createView(),
storeOp: "store", //像素数据写入颜色缓冲区
loadOp: "clear",
clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 }, //背景颜色
},
],
});
// 关联顶点缓冲区数据和渲染管线
renderPass.setVertexBuffer(0, vertexBuffer);
renderPass.setPipeline(pipeline);
renderPass.draw(3); // call our vertex shader 3 times
renderPass.end();
// 命令缓冲器
const commandBuffer = commandEncoder.finish();
device.queue.submit([commandBuffer]);
document.body.appendChild(canvas);
}
function fail(msg: string) {
// eslint-disable-next-line no-alert
alert(msg);
}
main();
// 顶点着色器代码
const vertexShader = /* wgsl */ `
@vertex
fn main(@location(0) pos: vec3<f32>) -> @builtin(position) vec4<f32> {
return vec4<f32>(pos,1.0);
}
`;
// 片元着色器代码
const fragmentShader = /* wgsl */ `
@fragment
fn main() -> @location(0) vec4<f32> {
return vec4<f32>(1.0, 0.0, 0.0, 1.0);//片元设置为红色
}
`;
export { vertexShader, fragmentShader };
/Users/song/Code/webgpu_learn/webgpu-for-beginners/webgpu_learn_typescript/src_01_测试是否支持webgpu/main.ts
const oApp = document.getElementById("app")!;
if (navigator.gpu) {
oApp.innerHTML = "web gpu ok";
} else {
oApp.innerHTML = "web gpu not ok";
}
/Users/song/Code/webgpu_learn/webgpu-for-beginners/webgpu_learn_typescript/src_05_两倍数组/main.ts
async function main() {
const adapter = await navigator.gpu?.requestAdapter();
const device = await adapter?.requestDevice();
if (!device) {
console.log("need a browser that supports WebGPU");
return;
}
const module = device.createShaderModule({
label: "doubling compute module",
code: `
@group(0) @binding(0) var<storage, read_write> data: array<f32>;
@compute @workgroup_size(1) fn computeSomething(
@builtin(global_invocation_id) id: vec3<u32>
) {
let i = id.x;
data[i] = data[i] * 2.0;
}
`,
});
const pipeline = device.createComputePipeline({
label: "doubling compute pipeline",
layout: "auto",
compute: {
module,
entryPoint: "computeSomething",
},
});
const input = new Float32Array([1, 3, 5]);
// create a buffer on the GPU to hold our computation
// input and output
const workBuffer = device.createBuffer({
label: "work buffer",
size: input.byteLength,
usage:
GPUBufferUsage.STORAGE |
GPUBufferUsage.COPY_SRC |
GPUBufferUsage.COPY_DST,
});
// Copy our input data to that buffer
device.queue.writeBuffer(workBuffer, 0, input);
// create a buffer on the GPU to get a copy of the results
const resultBuffer = device.createBuffer({
label: "result buffer",
size: input.byteLength,
usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
});
// Setup a bindGroup to tell the shader which
// buffer to use for the computation
const bindGroup = device.createBindGroup({
label: "bindGroup for work buffer",
layout: pipeline.getBindGroupLayout(0),
entries: [{ binding: 0, resource: { buffer: workBuffer } }],
});
// Encode commands to do the computation
const encoder = device.createCommandEncoder({
label: "doubling encoder",
});
const pass = encoder.beginComputePass({
label: "doubling compute pass",
});
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.dispatchWorkgroups(input.length);
pass.end();
// Encode a command to copy the results to a mappable buffer.
encoder.copyBufferToBuffer(workBuffer, 0, resultBuffer, 0, resultBuffer.size);
// Finish encoding and submit the commands
const commandBuffer = encoder.finish();
device.queue.submit([commandBuffer]);
// Read the results
await resultBuffer.mapAsync(GPUMapMode.READ);
// const result = new Float32Array(
// (resultBuffer.getMappedRange() as any).slice()
// );
const result = new Float32Array(resultBuffer.getMappedRange().slice(0));
resultBuffer.unmap();
console.log("input", input);
console.log("result", result);
}
main();
/Users/song/Code/webgpu_learn/webgpu-for-beginners/webgpu_learn_typescript/src_03_纯红色三角形/main.ts
async function main() {
const adapter = await navigator.gpu?.requestAdapter();
const device = await adapter?.requestDevice()!;
if (!device) {
fail("need a browser that supports WebGPU");
return;
}
// Get a WebGPU context from the canvas and configure it
const canvas = document.createElement("canvas");
canvas.style.width = "500px";
canvas.style.height = "300px";
canvas.style.border = "1px solid red";
// const canvas = document.querySelector("canvas");
const context = canvas.getContext("webgpu")!;
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: presentationFormat,
});
//
const module = device.createShaderModule({
label: "our hardcoded rgb triangle shaders",
code: `
struct OurVertexShaderOutput {
@builtin(position) position: vec4f,
@location(0) color: vec4f,
};
@vertex fn vs(
@builtin(vertex_index) vertexIndex : u32
) -> OurVertexShaderOutput {
// 位置
var pos = array<vec2f, 3>(
vec2f(0.0, 0.0), // top center
vec2f(1.0, 0.0), // bottom left
vec2f(0.0, 1.0) // bottom right
);
// 颜色
var color = array<vec4f, 3>(
vec4f(1, 0, 0, 1), // red
vec4f(0, 1, 0, 1), // green
vec4f(0, 0, 1, 1), // blue
);
var vsOutput: OurVertexShaderOutput;
vsOutput.position = vec4f(pos[vertexIndex], 0.0, 1.0);
// 纯红色的三角形
// vsOutput.color = color[vertexIndex];
vsOutput.color = vec4f(1, 0, 0, 0.5);
return vsOutput;
}
@fragment fn fs(fsInput: OurVertexShaderOutput) -> @location(0) vec4f {
return fsInput.color;
}
`,
});
const pipeline = device.createRenderPipeline({
label: "hardcoded rgb triangle pipeline",
layout: "auto",
vertex: {
module,
entryPoint: "vs",
},
fragment: {
module,
entryPoint: "fs",
targets: [{ format: presentationFormat }],
},
});
const renderPassDescriptor = {
label: "our basic canvas renderPass",
colorAttachments: [
{
// view: <- to be filled out when we render
clearValue: [1.0, 1.0, 1.0, 1],
loadOp: "clear",
storeOp: "store",
},
],
};
function render() {
// Get the current texture from the canvas context and
// set it as the texture to render to.
renderPassDescriptor.colorAttachments[0].view = context
.getCurrentTexture()
.createView();
const encoder = device.createCommandEncoder({
label: "render triangle encoder",
});
const pass = encoder.beginRenderPass(
renderPassDescriptor as GPURenderPassDescriptor
);
pass.setPipeline(pipeline);
pass.draw(3); // call our vertex shader 3 times
pass.end();
const commandBuffer = encoder.finish();
device.queue.submit([commandBuffer]);
}
const observer = new ResizeObserver((entries) => {
for (const entry of entries) {
const canvas = entry.target;
const width = entry.contentBoxSize[0].inlineSize;
const height = entry.contentBoxSize[0].blockSize;
(canvas as HTMLCanvasElement).width = Math.min(
width,
device.limits.maxTextureDimension2D
);
(canvas as HTMLCanvasElement).height = Math.min(
height,
device.limits.maxTextureDimension2D
);
// re-render
render();
}
});
observer.observe(canvas);
document.body.appendChild(canvas);
}
function fail(msg: string) {
// eslint-disable-next-line no-alert
alert(msg);
}
main();
/Users/song/Code/webgpu_learn/webgpu-for-beginners/webgpu_learn_typescript/src/main.ts
import { vertexShader, fragmentShader } from "./shader";
async function main() {
// 获取适配器
const adapter = await navigator.gpu?.requestAdapter();
// 获取gpu设备对象
const device = await adapter?.requestDevice()!;
if (!device) {
fail("need a browser that supports WebGPU");
return;
}
// Get a WebGPU context from the canvas and configure it
// 创建canvas画布,配置gpu上下文,将该元素作为webgpu的画布
const canvas = document.createElement("canvas");
canvas.style.width = "500px";
canvas.style.height = "300px";
const context = canvas.getContext("webgpu")!;
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device, // gpu设备对象
format: presentationFormat, //gpu渲染器使用的颜色格式,比如说rgba bgra,这里默认就好
});
const vertexArray = new Float32Array([
// 三角形三个顶点坐标的x、y、z值
0.0,
0.0,
0.0, //顶点1坐标
1.0,
0.0,
0.0, //顶点2坐标
0.0,
1.0,
0.0, //顶点3坐标
]);
// 定点缓冲区
const vertexBuffer = device.createBuffer({
size: vertexArray.byteLength, //顶点数据的字节长度
//usage设置该缓冲区的用途(作为顶点缓冲区|可以写入顶点数据)
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
});
// 将顶点信息写入缓冲器
device.queue.writeBuffer(vertexBuffer, 0, vertexArray);
const pipeline = device.createRenderPipeline({
layout: "auto",
vertex: {
//顶点相关配置
buffers: [
// 顶点所有的缓冲区模块设置
{
//其中一个顶点缓冲区设置
arrayStride: 3 * 4, //一个顶点数据占用的字节长度,该缓冲区一个顶点包含xyz三个分量,每个数字是4字节浮点数,3*4字节长度
attributes: [
{
// 顶点缓冲区属性
shaderLocation: 0, //GPU显存上顶点缓冲区标记存储位置
format: "float32x3", //格式:loat32x3表示一个顶点数据包含3个32位浮点数
offset: 0, //arrayStride表示每组顶点数据间隔字节数,offset表示读取改组的偏差字节数,没特殊需要一般设置0
},
],
},
],
module: device.createShaderModule({
label: "triangle vertex",
code: vertexShader,
}),
entryPoint: "main",
},
fragment: {
module: device.createShaderModule({
label: "fragment vertex",
code: fragmentShader,
}),
entryPoint: "main", //指定入口函数
targets: [
{
format: presentationFormat, //和WebGPU上下文配置的颜色格式保持一致
},
],
},
primitive: {
topology: "triangle-list", //绘制三角形
// topology: "point-list", //绘制三角形
// topology: "line-list", //绘制三角形
},
});
// 创建GPU命令编码器对象
const commandEncoder = device.createCommandEncoder();
const renderPass = commandEncoder.beginRenderPass({
label: "our basic canvas renderPass",
// 给渲染通道指定颜色缓冲区,配置指定的缓冲区
colorAttachments: [
{
// 指向用于Canvas画布的纹理视图对象(Canvas对应的颜色缓冲区)
// 该渲染通道renderPass输出的像素数据会存储到Canvas画布对应的颜色缓冲区(纹理视图对象)
view: context.getCurrentTexture().createView(),
storeOp: "store", //像素数据写入颜色缓冲区
loadOp: "clear",
clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 }, //背景颜色
},
],
});
// 关联顶点缓冲区数据和渲染管线
renderPass.setVertexBuffer(0, vertexBuffer);
renderPass.setPipeline(pipeline);
renderPass.draw(3); // call our vertex shader 3 times
renderPass.end();
// 命令缓冲器
const commandBuffer = commandEncoder.finish();
device.queue.submit([commandBuffer]);
document.body.appendChild(canvas);
}
function fail(msg: string) {
// eslint-disable-next-line no-alert
alert(msg);
}
main();
// 顶点着色器代码
const vertexShader = /* wgsl */ `
@vertex
fn main(@location(0) pos: vec3<f32>) -> @builtin(position) vec4<f32> {
return vec4<f32>(pos,1.0);
}
`;
// 片元着色器代码
const fragmentShader = /* wgsl */ `
@fragment
fn main() -> @location(0) vec4<f32> {
return vec4<f32>(1.0, 0.0, 0.0, 1.0);//片元设置为红色
}
`;
export { vertexShader, fragmentShader };