unity全局雾处理
全屏后处理全局雾
网上找到的基本是要计算摄影机边界的4条射线,然后标准化插值后射线方向,利用深度还原场景的世界坐标,代码比较繁琐,要向shader多传递4条射线参数
如下图
其实有更简化的处理方式, 用unity shader自带的全局变量与函数,加上场景深度,可以直接在shader里面还原世界坐标,然后完成雾效处理
mtx_view_inv:投影矩阵的逆 cs脚本中绑定,值为:Camera.worldToCameraMatrix.inverse
mtx_proj_inv:变换矩阵的逆 cs脚本中绑定,值为 Camera.projectionMatrix.inverse
_CameraDepthTexture:: 会被宏处理成 sample_CameraDepthTexture,也是shader的自带变量
SAMPLE_DEPTH_TEXTURE:宏处理CameraDepthTexture采样
LinearEyeDepth:根据(0,1)之间的深度纹理还原视深度,即摄影机空间的z坐标
还原世界坐标代码如下:
有了世界坐标,距离雾和高度雾就可以处理了
对应的.cs脚本就可以不用繁琐的计算射线,只要绑定行对应的两个投影与摄影机变换逆矩阵,
注意:这两个矩阵必须由外部绑定,在Post全屏路径处理下,
unity内部自带的两个矩阵unity_CameraInvProjection unity_CameraToWorld是不能用的,
它们的值会被更改成为画全屏四边形对应的逆矩阵,而不是camera对象的投影与变换矩阵的逆
如下图,fogMaterial是对应的shader材质Material对象
void OnRenderImage(RenderTexture src, RenderTexture dest)
{
Camera cam = GetComponent<Camera>();
var mtx_view_inv = cam.worldToCameraMatrix.inverse;
var mtx_proj_inv = cam.projectionMatrix.inverse;
Matrix4x4 temp = cam.worldToCameraMatrix * cam.projectionMatrix;
Matrix4x4 mtx_clip_to_world = temp.inverse;
fogMaterial.SetMatrix("_mtx_clip_to_world", mtx_clip_to_world);
fogMaterial.SetMatrix("_mtx_view_inv", mtx_view_inv);
fogMaterial.SetMatrix("_mtx_proj_inv", mtx_proj_inv);
Graphics.Blit(src, dest, fogMaterial);
}
以下贴出完整的shader代码,
Shader "lsc/test_post_fog"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
SubShader
{
// No culling or depth
Cull Off ZWrite Off ZTest Always
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
sampler2D _CameraDepthTexture;
float4x4 _mtx_view_inv;
float4x4 _mtx_proj_inv;
float4x4 _mtx_clip_to_world;
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
float4 screen_pos : TEXCOORD1;
float2 ndc_pos : TEXCOORD2;
};
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
o.screen_pos = ComputeScreenPos(o.vertex);
o.ndc_pos = (v.uv) * 2.0 - 1.0;
return o;
}
sampler2D _MainTex;
fixed4 frag(v2f i) : SV_Target
{
float4 view_pos;
float3 world_pos;
float depth01 = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv);
//lsc 取出线性深度,即摄影机空间的z坐标
float linearDepthZ = LinearEyeDepth(depth01);
//lsc 纹理映射转换到标准空间
float4 screen_pos = float4(i.ndc_pos.x, i.ndc_pos.y, depth01, 1);
//lsc 转成齐次坐标
screen_pos = screen_pos * linearDepthZ;
//lsc 还原摄影机空间坐标
view_pos = mul(_mtx_proj_inv, screen_pos);
//lsc 世界
world_pos = mul(_mtx_view_inv, fixed4(view_pos.xyz, 1));
//高度
float h_percent = saturate(((world_pos.y - 0.0f) / 20.0f));
float fac_h = exp(-h_percent * h_percent);
//距离
float dis = length(world_pos.xyz - _WorldSpaceCameraPos.xyz);
float d_percent = 1 - ((dis - 100.0) / 100.0f);
d_percent = saturate(d_percent);
float fac_d = exp(-d_percent * d_percent);
fixed4 final_col;
final_col.w = 1.0;
fixed4 col = tex2D(_MainTex, i.uv);
final_col.rgb = lerp(col.rgb, fixed3(0.5, 0.0, 0.5), fac_d * fac_h);
return final_col;
}
ENDCG
}
}
}
对应的.cs代码
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class cam_postprocess : MonoBehaviour
{
//public Shader fogShader;
public Material fogMaterial = null;
// Start is called before the first frame update
void Start()
{
Camera cam = GetComponent<Camera>();
cam.depthTextureMode = DepthTextureMode.Depth | DepthTextureMode.DepthNormals | DepthTextureMode.MotionVectors;
}
// Update is called once per frame
void Update()
{
}
void OnRenderImage(RenderTexture src, RenderTexture dest)
{
Camera cam = GetComponent<Camera>();
var mtx_view_inv = cam.worldToCameraMatrix.inverse;
var mtx_proj_inv = cam.projectionMatrix.inverse;
Matrix4x4 temp = cam.worldToCameraMatrix * cam.projectionMatrix;
Matrix4x4 mtx_clip_to_world = temp.inverse;
fogMaterial.SetMatrix("_mtx_clip_to_world", mtx_clip_to_world);
fogMaterial.SetMatrix("_mtx_view_inv", mtx_view_inv);
fogMaterial.SetMatrix("_mtx_proj_inv", mtx_proj_inv);
Graphics.Blit(src, dest, fogMaterial);
}
}
运行效果:
author: 飞天大蟾蜍 14141029@qq.com