代码分析说明:
部分说明已经在上一个案例中解释,请参考Rotating Cube
import { mat4, vec3 } from 'wgpu-matrix';
import { makeSample, SampleInit } from '../../components/SampleLayout';
import {
cubeVertexArray,
cubeVertexSize,
cubeUVOffset,
cubePositionOffset,
cubeVertexCount,
} from '../../meshes/cube';
import basicVertWGSL from '../../shaders/basic.vert.wgsl';
import vertexPositionColorWGSL from '../../shaders/vertexPositionColor.frag.wgsl';
const init: SampleInit = async ({ canvas, pageState }) => {
const adapter = await navigator.gpu.requestAdapter();
const device = await adapter.requestDevice();
if (!pageState.active) return;
const context = canvas.getContext('webgpu') as GPUCanvasContext;
const devicePixelRatio = window.devicePixelRatio;
canvas.width = canvas.clientWidth * devicePixelRatio;
canvas.height = canvas.clientHeight * devicePixelRatio;
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: presentationFormat,
alphaMode: 'premultiplied',
});
// Create a vertex buffer from the cube data.
const verticesBuffer = device.createBuffer({
size: cubeVertexArray.byteLength,
usage: GPUBufferUsage.VERTEX,
mappedAtCreation: true,
});
new Float32Array(verticesBuffer.getMappedRange()).set(cubeVertexArray);
verticesBuffer.unmap();
const pipeline = device.createRenderPipeline({
layout: 'auto',
vertex: {
module: device.createShaderModule({
code: basicVertWGSL,
}),
entryPoint: 'main',
buffers: [
{
arrayStride: cubeVertexSize,
attributes: [
{
// position
shaderLocation: 0,
offset: cubePositionOffset,
format: 'float32x4',
},
{
// uv
shaderLocation: 1,
offset: cubeUVOffset,
format: 'float32x2',
},
],
},
],
},
fragment: {
module: device.createShaderModule({
code: vertexPositionColorWGSL,
}),
entryPoint: 'main',
targets: [
{
format: presentationFormat,
},
],
},
primitive: {
topology: 'triangle-list',
// Backface culling since the cube is solid piece of geometry.
// Faces pointing away from the camera will be occluded by faces
// pointing toward the camera.
cullMode: 'back',
},
// Enable depth testing so that the fragment closest to the camera
// is rendered in front.
depthStencil: {
depthWriteEnabled: true,
depthCompare: 'less',
format: 'depth24plus',
},
});
const depthTexture = device.createTexture({
size: [canvas.width, canvas.height],
format: 'depth24plus',
usage: GPUTextureUsage.RENDER_ATTACHMENT,
});
const matrixSize = 4 * 16; // 4x4 matrix
const offset = 256; // uniformBindGroup offset must be 256-byte aligned
const uniformBufferSize = offset + matrixSize;
const uniformBuffer = device.createBuffer({
size: uniformBufferSize,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
// 创建第一个uniform bindGroup
const uniformBindGroup1 = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: {
buffer: uniformBuffer,
offset: 0,
size: matrixSize,
},
},
],
});
// 创建第二个uniform bindGroup
const uniformBindGroup2 = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: {
buffer: uniformBuffer,
offset: offset,
size: matrixSize,
},
},
],
});
const renderPassDescriptor: GPURenderPassDescriptor = {
colorAttachments: [
{
view: undefined, // Assigned later
clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
loadOp: 'clear',
storeOp: 'store',
},
],
depthStencilAttachment: {
view: depthTexture.createView(),
depthClearValue: 1.0,
depthLoadOp: 'clear',
depthStoreOp: 'store',
},
};
const aspect = canvas.width / canvas.height;
const projectionMatrix = mat4.perspective(
(2 * Math.PI) / 5,
aspect,
1,
100.0
);
// 创建一个模型矩阵
const modelMatrix1 = mat4.translation(vec3.create(-2, 0, 0));
// 创建另一个模型矩阵
const modelMatrix2 = mat4.translation(vec3.create(2, 0, 0));
// 创建MVP1
const modelViewProjectionMatrix1 = mat4.create() as Float32Array;
// 创建MVP2
const modelViewProjectionMatrix2 = mat4.create() as Float32Array;
// 创建视图矩阵
const viewMatrix = mat4.translation(vec3.fromValues(0, 0, -7));
const tmpMat41 = mat4.create();
const tmpMat42 = mat4.create();
function updateTransformationMatrix() {
const now = Date.now() / 1000;
// 旋转模型矩阵1
mat4.rotate(
modelMatrix1,
vec3.fromValues(Math.sin(now), Math.cos(now), 0),
1,
tmpMat41
);
// 旋转模型矩阵2
mat4.rotate(
modelMatrix2,
vec3.fromValues(Math.cos(now), Math.sin(now), 0),
1,
tmpMat42
);
mat4.multiply(viewMatrix, tmpMat41, modelViewProjectionMatrix1);
mat4.multiply(
projectionMatrix,
modelViewProjectionMatrix1,
modelViewProjectionMatrix1
);
mat4.multiply(viewMatrix, tmpMat42, modelViewProjectionMatrix2);
mat4.multiply(
projectionMatrix,
modelViewProjectionMatrix2,
modelViewProjectionMatrix2
);
}
function frame() {
// Sample is no longer the active page.
if (!pageState.active) return;
// 更新MVP
updateTransformationMatrix();
// 写入MVP1
device.queue.writeBuffer(
uniformBuffer,
0,
modelViewProjectionMatrix1.buffer,
modelViewProjectionMatrix1.byteOffset,
modelViewProjectionMatrix1.byteLength
);
// 写入mvp2
device.queue.writeBuffer(
uniformBuffer,
offset,
modelViewProjectionMatrix2.buffer,
modelViewProjectionMatrix2.byteOffset,
modelViewProjectionMatrix2.byteLength
);
renderPassDescriptor.colorAttachments[0].view = context
.getCurrentTexture()
.createView();
const commandEncoder = device.createCommandEncoder();
const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
passEncoder.setPipeline(pipeline);
// 设置顶点buffer
passEncoder.setVertexBuffer(0, verticesBuffer);
// Bind the bind group (with the transformation matrix) for
// each cube, and draw.
// 设置bindGroup1
passEncoder.setBindGroup(0, uniformBindGroup1);
passEncoder.draw(cubeVertexCount);
// 设置bindGroup2
passEncoder.setBindGroup(0, uniformBindGroup2);
passEncoder.draw(cubeVertexCount);
passEncoder.end();
device.queue.submit([commandEncoder.finish()]);
requestAnimationFrame(frame);
}
requestAnimationFrame(frame);
};
顶点着色器 (同上一个示例一样,不做详细的解释)
struct Uniforms {
modelViewProjectionMatrix : mat4x4<f32>,
}
@binding(0) @group(0) var<uniform> uniforms : Uniforms;
struct VertexOutput {
@builtin(position) Position : vec4<f32>,
@location(0) fragUV : vec2<f32>,
@location(1) fragPosition: vec4<f32>,
}
@vertex
fn main(
@location(0) position : vec4<f32>,
@location(1) uv : vec2<f32>
) -> VertexOutput {
var output : VertexOutput;
output.Position = uniforms.modelViewProjectionMatrix * position;
output.fragUV = uv;
output.fragPosition = 0.5 * (position + vec4(1.0, 1.0, 1.0, 1.0));
return output;
}
片元着色器(同上一个示例一样,不做详细的解释)
@fragment
fn main(
@location(0) fragUV: vec2<f32>,
@location(1) fragPosition: vec4<f32>
) -> @location(0) vec4<f32> {
return fragPosition;
}
顶点数据也同上一个示例一样
总结步骤:
- 渲染两个立方体,使用了同一个渲染管线
- 创建了不同的uniform bingGroup, 设置了相同的参数
- 定义了不同的modelMatrix, 不同的MVP矩阵, 每一个帧更新模型矩阵的旋转
- 进行了两次
device.queue.writeBuffer
写入mvp矩阵到缓存 - 编码器设置了两次uniform bindGroup,并draw绘制
passEncoder.setBindGroup(0, uniformBindGroup1);
passEncoder.draw(cubeVertexCount);
passEncoder.setBindGroup(0, uniformBindGroup2);
passEncoder.draw(cubeVertexCount);
- 最后同样调用
passEncoder.end()
device.queue.submit([commandEncoder.finish()])
渲染
网友评论