参考自《Unity Shader入门简要》
原理梳理
- 存储上一帧渲染效果的成像矩阵和当前帧成像逆矩阵
- 通过深度值获得当前NDC坐标
- 通过NDC坐标和当前帧逆矩阵获得顶点世界坐标
- 顶点世界坐标与上一帧成像矩阵获取上一帧NDC坐标,求取同一顶点在两帧内NDC距离的差值
- 根据距离计算叠加上一帧uv和像素值
- 片元返回叠加后采样效果,即该帧显示两帧效果,实现运动模糊
实现效果
C#代码
void OnEnable()
{
//设置相机属性,生成深度纹理
camera.Main.depthTextureMode = DepthTextureMode.Depth;
}
/*
OnRenderImage为MonoBehaviour内模板函数,类似与Awake等
通过该方法获取屏幕成像前的RenderTexture
该方法时后期屏幕效果处理的基础
*/
void OnRenderImage(RenderTexture src, RenderTexture dest)
{
if (material != null)
{
material.SetFloat("_BlurSize", blurSize);
material.SetMatrix("_PreviousViewProjectionMatrix", previousViewProjectionMatrix);
//通过透视投影矩阵*VP矩阵获取成像矩阵,运用VP矩阵原因是因为在shader中我们将通过世界坐标运算
Matrix4x4 currentViewProjectionMatrix = camera_.projectionMatrix * camera_.worldToCameraMatrix;
//通过成像逆矩阵和深度获取顶点世界坐标
Matrix4x4 currentViewProjectionInverseMatrix = currentViewProjectionMatrix.inverse;
material.SetMatrix("_CurrentViewProjectionInverseMatrix", currentViewProjectionInverseMatrix);
previousViewProjectionMatrix = currentViewProjectionMatrix;
//将material应用到RenderTexture实现效果
Graphics.Blit(src, dest, material);
}
else
{
Graphics.Blit(src, dest);
}
}
Shader 代码
Shader "Youcai/Chapter13/MotionBlurDepth"
{
Properties
{
_MainTex("Base (RGB)", 2D) = "white"{}
_BlurSize("BlurSize", float) = 0.5
}
SubShader
{
CGINCLUDE
#include "UnityCG.cginc"
sampler2D _MainTex;
//获取_MainTex贴图纹素值
half4 _MainTex_TexelSize;
//深度纹理,设置对应的相机属性在Shader内直接定义即可
sampler2D _CameraDepthTexture;
float4x4 _CurrentViewProjectionInverseMatrix;
float4x4 _PreviousViewProjectionMatrix;
half _BlurSize;
struct v2f
{
float4 pos: SV_POSITION;
half2 uv: TEXCOORD0;
half2 uv_depth: TEXCOORD1;
};
v2f vert(appdata_img v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv = v.texcoord;
o.uv_depth = v.texcoord;
//处理平台差异存在的图像翻转问题
#if UNITY_UV_STARTS_AT_TOP
o.uv_depth.y = 1 - o.uv_depth.y;
#endif
return o;
}
fixed4 frag(v2f i) :SV_Target
{
//获取深度值
float d = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv_depth);
//通过深度纹理获取NDC坐标
float4 H = float4(i.uv.x * 2 - 1, i.uv.y * 2 - 1, d * 2 - 1, 1);
//通过NDC坐标和成像逆矩阵获取世界坐标
float4 D = mul(_CurrentViewProjectionInverseMatrix, H);
float4 worldPos = D / D.w;
float4 currentPos = H;
//通过上一帧成像矩阵和世界坐标获取上一帧NDC坐标
float4 previousPos = mul(_PreviousViewProjectionMatrix, worldPos);
previousPos /= previousPos.w;
//叠加两帧uv和像素值
float2 velocity = (currentPos.xy - previousPos.xy);
float2 uv = i.uv;
float4 c = tex2D(_MainTex, uv);
uv += velocity * _BlurSize;
c += tex2D(_MainTex, uv);
//平分像素值
c /= 2;
return fixed4(c.rgb, 1.0);
}
ENDCG
Pass
{
ZTest Always Cull Off Zwrite Off
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
ENDCG
}
}
Fallback Off
}
网友评论