UnityShader 屏幕后处理
亮度,饱和度,对比度调整
需要对屏幕进行后处理,实现需要获取屏幕的画面,在Unity中,使用Graphics.Blit(source, destination, material)来处理屏幕画面,source为原纹理,destination是目标纹理,material为材质,该函数会使用material.shader来处理原纹理,然后返回目标纹理。对原纹理的处理与对贴图纹理的处理相同。
挂载在Camera上的脚本:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class BrightnessSaturationAndContrast : PostEffectsBase
{
public Shader mshader;
private Material mat;
public Material material{
get{
mat = CheckShaderAndCreateMaterial(mshader, mat);
return mat;
}
}
[Range(0.0f, 3.0f)]
public float brightness = 1.0f;
[Range(0.0f, 3.0f)]
public float saturation = 1.0f;
[Range(0.0f, 3.0f)]
public float contrast = 1.0f;
private void OnRenderImage(RenderTexture source, RenderTexture destination)
{
if(material == null){
Graphics.Blit(source, destination);
}
else{
material.SetFloat("_Brightness", brightness);
material.SetFloat("_Saturation", saturation);
material.SetFloat("_Contrast", contrast);
Graphics.Blit(source, destination, material);
}
}
}
脚本使用的Shader:
Shader "Unlit/BSCPostEffectMat"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
_Brightness("Brightness", float) = 1
_Saturation("Saturation", float) = 1
_Contrast("Contrast", float) = 1
}
SubShader
{
Tags { "RenderType"="Opaque" }
Pass
{
ZWrite Off
ZTest Always
Cull Off
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
float _Brightness;
float _Saturation;
float _Contrast;
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
return o;
}
fixed4 frag (v2f i) : SV_Target
{
fixed4 col = tex2D(_MainTex, i.uv);
fixed3 finalColor = col.rgb * _Brightness;
fixed luminance = 0.2125 * col.r + 0.7154 * col.g + 0.0721 * col.b;
fixed3 luminanceColor = fixed3(luminance, luminance, luminance);
finalColor = lerp(luminanceColor, finalColor, _Saturation);
fixed3 avgColor = fixed3(0.5, 0.5, 0.5);
finalColor = lerp(avgColor,finalColor, _Contrast);
return fixed4(finalColor, 1);
}
ENDCG
}
}
}
luminance = 0.2125 * col.r + 0.7154 * col.g + 0.0721 * col.b;
以上为去色公式,用于将颜色饱和度变为0,即黑白图像。以下为调整过的屏幕图像与屏幕原图像。
边缘检测
在屏幕后处理过程中,很多时候我们需要获得描边效果。显然可知,需要描边的像素点都是位于色彩分界线处的像素。所以只需要使用边缘检测算子卷积计算各像素点是否在色彩分界线上即可。
一般是将垂直方向检测的算子和水平方向检测的算子的绝对值相加作为描边程度的判断依据。
挂载在Camera上的脚本:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class EdgeDetection : PostEffectsBase
{
public Shader mshader;
private Material mat;
public Material material
{
get
{
mat = CheckShaderAndCreateMaterial(mshader, mat);
return mat;
}
}
public Color EdgeColor;
[Range(0.0f, 1.0f)]
public float EdgeOnly = 1.0f;
[Range(0.0f, 1.0f)]
public float EdgeWidth = 1.0f;
private void OnRenderImage(RenderTexture source, RenderTexture destination)
{
if (material == null)
{
Graphics.Blit(source, destination);
}
else
{
material.SetFloat("_EdgeOnly", EdgeOnly);
material.SetFloat("_EdgeWidth", EdgeWidth);
material.SetColor("_EdgeColor", EdgeColor);
Graphics.Blit(source, destination, material);
}
}
}
脚本使用的shader:
Shader "Unlit/EdgeDetectionMat"
{
Properties
{
_MainTex ("Base (RGB)", 2D) = "white" {}
_EdgeOnly ("Edge Only", Float) = 1.0
_EdgeColor ("Edge Color", Color) = (0, 0, 0, 1)
_EdgeWidth("EdgeWidth", float) = 1
_BackgroundColor ("Background Color", Color) = (1, 1, 1, 1)
}
SubShader
{
Pass
{
ZWrite Off
ZTest Always
Cull Off
CGPROGRAM
#pragma vertex vert
#pragma fragment fragSobel
#include "UnityCG.cginc"
struct v2f
{
float2 uv[9] : TEXCOORD0;
float4 pos : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_TexelSize;
float _EdgeOnly;
float4 _EdgeColor;
float4 _BackgroundColor;
float _EdgeWidth;
v2f vert(appdata_img v) {
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
half2 uv = v.texcoord;
o.uv[0] = uv + _MainTex_TexelSize.xy * half2(-1, -1);
o.uv[1] = uv + _MainTex_TexelSize.xy * half2(0, -1);
o.uv[2] = uv + _MainTex_TexelSize.xy * half2(1, -1);
o.uv[3] = uv + _MainTex_TexelSize.xy * half2(-1, 0);
o.uv[4] = uv + _MainTex_TexelSize.xy * half2(0, 0);
o.uv[5] = uv + _MainTex_TexelSize.xy * half2(1, 0);
o.uv[6] = uv + _MainTex_TexelSize.xy * half2(-1, 1);
o.uv[7] = uv + _MainTex_TexelSize.xy * half2(0, 1);
o.uv[8] = uv + _MainTex_TexelSize.xy * half2(1, 1);
return o;
}
fixed luminance(fixed4 color){
return 0.2125 * color.r + 0.7145 * color.g + 0.0721 * color.b;
}
half Sobel(v2f i) {
const half Gx[9] = {-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
const half Gy[9] = {-1, -2, -1,
0, 0, 0,
1, 2, 1};
half texColor;
half edgeX = 0;
half edgeY = 0;
for (int it = 0; it < 9; it++) {
texColor = luminance(tex2D(_MainTex, i.uv[it]));
edgeX += texColor * Gx[it];
edgeY += texColor * Gy[it];
}
half edge = abs(edgeX) + abs(edgeY);
return edge;
}
fixed4 fragSobel(v2f i) : SV_Target
{
half edge = Sobel(i);
edge = lerp(0, edge, _EdgeWidth);
fixed4 withEdgeColor = lerp(tex2D(_MainTex, i.uv[4]), _EdgeColor, edge);
fixed4 onlyEdgeColor = lerp(_BackgroundColor, _EdgeColor, edge);
return lerp(withEdgeColor, onlyEdgeColor, _EdgeOnly);
}
ENDCG
}
}
FallBack Off
}
本次使用Sobel算子作为边缘检测算子。
_MainTex_TexelSize.xy可以返回屏幕分辨率下每个像素的大小,比如分辨率为512*256的屏幕,_MainTex_TexelSize.xy的值为(1/512, 1/256)。通过_MainTex_TexelSize.xy可以求得顶点周围各点在UV上的坐标。
不能使用_MainTex_TS,否则无法得到周围各点的正确坐标。
多个屏幕后处理脚本可以在一个Camera上同时使用。
高斯模糊
高斯模糊的具体实现,是通过将各片元周围片元的颜色进行卷积作为当前片元的颜色输出,原理与边缘检测相同,高斯模糊一般是用高斯核进行卷积。
但仅计算一个5×5大小的高斯核,就需要得到25个片元是颜色参数。一般不使用这种方法进行卷积计算,而是用5×1和1×5两个矩阵在水平和竖直方向各进行一次计算来近似高斯核卷积的结果。
又因为矩阵中有多个参数重复出现,所以我们只需要记录三个参数即可。
同时可以设置高斯模糊的迭代次数,来得到不同的模糊效果。
挂载在Camera上的脚本:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class GaussianBlur : PostEffectsBase
{
public Shader mshader;
private Material mat = null;
public Material material{
get{
mat = CheckShaderAndCreateMaterial(mshader, mat);
return mat;
}
}
[Range(0, 3)]
public float blurspread = 1;
[Range(1, 8)]
public int iterations = 1;
[Range(1, 8)]
public int downsample = 1;
private void OnRenderImage(RenderTexture source, RenderTexture destination)
{
if (material != null)
{
int scH = source.height / downsample;
int scW = source.width / downsample;
material.SetFloat("_BlurSize", blurspread);
RenderTexture buffer0 = RenderTexture.GetTemporary(scW, scH, 0);
buffer0.filterMode = FilterMode.Bilinear;
Graphics.Blit(source, buffer0);
for (int i = 0; i < iterations; i++)
{
RenderTexture buffer1 = RenderTexture.GetTemporary(scW, scH, 0);
Graphics.Blit(buffer0, buffer1, material, 0);
RenderTexture.ReleaseTemporary(buffer0);
buffer0 = buffer1;
buffer1 = RenderTexture.GetTemporary(scW, scH, 0);
Graphics.Blit(buffer0, buffer1, material, 1);
RenderTexture.ReleaseTemporary(buffer0);
buffer0 = buffer1;
}
Graphics.Blit(buffer0, destination);
RenderTexture.ReleaseTemporary(buffer0);
}
else
{
Graphics.Blit(source, destination);
}
}
}
挂载在脚本上的Shader:
// Upgrade NOTE: replaced 'mul(UNITY_MATRIX_MVP,*)' with 'UnityObjectToClipPos(*)'
Shader "Unlit/GaussianBlurMat"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
_BlurSize ("BlurSize", int) = 1
}
SubShader
{
CGINCLUDE
sampler2D _MainTex;
float4 _MainTex_TexelSize;
int _BlurSize;
struct appdata
{
float2 texcoord : TEXCOORD0;
float4 vertex : POSITION;
};
struct v2f
{
float2 uv[5] : TEXCOORD0;
float4 vertex : SV_POSITION;
};
v2f vertBlurVertial(appdata i){
v2f o;
o.vertex = UnityObjectToClipPos(i.vertex);
half2 midPos = i.texcoord;
o.uv[0] = midPos;
o.uv[1] = midPos + float2(0.0, _MainTex_TexelSize.y * 1.0) * _BlurSize;
o.uv[2] = midPos - float2(0.0, _MainTex_TexelSize.y * 1.0) * _BlurSize;
o.uv[3] = midPos + float2(0.0, _MainTex_TexelSize.y * 2.0) * _BlurSize;
o.uv[4] = midPos - float2(0.0, _MainTex_TexelSize.y * 2.0) * _BlurSize;
return o;
}
v2f vertBlurHorizontal(appdata i){
v2f o;
o.vertex = UnityObjectToClipPos(i.vertex);
half2 midPos = i.texcoord;
o.uv[0] = midPos;
o.uv[1] = midPos + float2(_MainTex_TexelSize.x * 1.0, 0.0) * _BlurSize;
o.uv[2] = midPos - float2(_MainTex_TexelSize.x * 1.0, 0.0) * _BlurSize;
o.uv[3] = midPos + float2(_MainTex_TexelSize.x * 2.0, 0.0) * _BlurSize;
o.uv[4] = midPos - float2(_MainTex_TexelSize.x * 2.0, 0.0) * _BlurSize;
return o;
}
fixed4 fragmentBlur(v2f i) : SV_Target{
float weight[3] = {0.4026, 0.2442, 0.0545};
fixed3 sum = tex2D(_MainTex, i.uv[0]).rgb * weight[0];
for (int it = 1; it < 3; it++) {
sum += tex2D(_MainTex, i.uv[it*2-1]).rgb * weight[it];
sum += tex2D(_MainTex, i.uv[it*2]).rgb * weight[it];
}
return fixed4(sum, 1.0);
}
ENDCG
Tags { "RenderType"="Opaque" }
Pass
{
NAME "BLUR_VERTIAL"
CGPROGRAM
#pragma vertex vertBlurVertial
#pragma fragment fragmentBlur
ENDCG
}
Pass
{
NAME "BLUR_HORIZONTAL"
CGPROGRAM
#pragma vertex vertBlurHorizontal
#pragma fragment fragmentBlur
ENDCG
}
}
FallBack Off
}
泛光效果(Bloom)
所谓泛光效果,就是画面的亮部向暗部扩散(?大概表达清楚了吧)的效果 。
具体实现很简单,实现需要获得屏幕画面的亮部,然后对提取的亮部纹理进行模糊,最后叠加到原纹理上,就实现了Bloom效果。
获得亮部纹理的方法,在片元着色器中获得该片元的颜色,然后得到对片元去色的结果,减去亮部阈值(超过判断为亮部)并saturate为0到1之间,将最终结果与原片元颜色相乘,就得到了只剩下亮部的纹理。在用高斯模糊的相同方法对亮部纹理模糊迭代,最后与原纹理相加,就得到了Bloom效果。
挂载在Camera上的脚本:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class Bloom : PostEffectsBase
{
public Shader mshader;
private Material mat = null;
public Material material
{
get
{
mat = CheckShaderAndCreateMaterial(mshader, mat);
return mat;
}
}
[Range(0, 3)]
public float blurspread = 1;
[Range(1, 8)]
public int iterations = 1;
[Range(1, 8)]
public int downsample = 1;
[Range(0, 3)]
public float Luminance = 0.5f;
private void OnRenderImage(RenderTexture source, RenderTexture destination)
{
if (material != null)
{
int scH = source.height / downsample;
int scW = source.width / downsample;
material.SetFloat("_BlurSize", blurspread);
material.SetFloat("_Luminance", Luminance);
RenderTexture buffer0 = RenderTexture.GetTemporary(scW, scH, 0);
buffer0.filterMode = FilterMode.Bilinear;
Graphics.Blit(source, buffer0, material, 2);
for (int i = 0; i < iterations; i++)
{
RenderTexture buffer1 = RenderTexture.GetTemporary(scW, scH, 0);
Graphics.Blit(buffer0, buffer1, material, 0);
RenderTexture.ReleaseTemporary(buffer0);
buffer0 = buffer1;
buffer1 = RenderTexture.GetTemporary(scW, scH, 0);
Graphics.Blit(buffer0, buffer1, material, 1);
RenderTexture.ReleaseTemporary(buffer0);
buffer0 = buffer1;
}
material.SetTexture("_Bloom", buffer0);
Graphics.Blit(source, destination, material, 3);
RenderTexture.ReleaseTemporary(buffer0);
}
else
{
Graphics.Blit(source, destination);
}
}
}
挂载在脚本上的Shader:
// Upgrade NOTE: replaced 'mul(UNITY_MATRIX_MVP,*)' with 'UnityObjectToClipPos(*)'
// Upgrade NOTE: replaced 'mul(UNITY_MATRIX_MVP,*)' with 'UnityObjectToClipPos(*)'
Shader "Unlit/BloomMat"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
_Bloom ("Bloom", 2D) = "white" {}
_BlurSize ("BlurSize", int) = 1
_Luminance("Luminance", float) = 0.5
}
SubShader
{
CGINCLUDE
sampler2D _MainTex;
float4 _MainTex_TexelSize;
sampler2D _Bloom;
float4 _Bloom_TexelSize;
int _BlurSize;
float _Luminance;
struct appdata
{
float2 texcoord : TEXCOORD0;
float4 vertex : POSITION;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
v2f vertexBloom(appdata i){
v2f o;
o.vertex = UnityObjectToClipPos(i.vertex);
o.uv = i.texcoord;
return o;
}
fixed Getluminance(fixed4 col){
return 0.2125 * col.r + 0.7154 * col.g + 0.0721 * col.b;
}
fixed4 fragmentBloom(v2f i) : SV_Target{
fixed4 col = tex2D(_MainTex, i.uv);
fixed val = saturate(Getluminance(col) - _Luminance);
val = val + _Luminance * val / (1.0 - _Luminance);
return col * val;
}
fixed4 fragmentMixed(v2f i) : SV_Target{
fixed4 col = tex2D(_MainTex, i.uv);
fixed4 bloomCol = tex2D(_Bloom, i.uv);
return col + bloomCol;
}
ENDCG
ZTest Always ZWrite Off Cull Off
Tags { "RenderType"="Opaque" }
UsePass "Unlit/GaussianBlurMat/BLUR_VERTIAL"
UsePass "Unlit/GaussianBlurMat/BLUR_HORIZONTAL"
Pass
{
CGPROGRAM
#pragma vertex vertexBloom
#pragma fragment fragmentBloom
ENDCG
}
Pass
{
CGPROGRAM
#pragma vertex vertexBloom
#pragma fragment fragmentMixed
ENDCG
}
}
FallBack Off
}
运动模糊(MotionBlur)
对于运动模糊的实现有两种方法,一种是累计缓存(Accumulation buffer),一种是速度缓存(velocity buffer),本例中使用累计缓存实现运动模糊。我们需要保存之前的渲染结果,不断的把当前渲染的图像叠加到之前渲染的图像中,从而产生一种运动轨迹的视觉效果。
具体我们需要使用一个RenderTexture,来存储每次屏幕渲染的结果,然后使用MarkRestoreExpected()方法,恢复该变量之前存储的图像。在直接将当前屏幕图像渲染到该变量中(在Graphics.Blit中使用了非null的输出,会依据material的渲染方式将当前渲染的结果与输出的结果相混合,如果不在material的Pass中设置混合模式,则原纹理的渲染结果会直接覆盖输出纹理的内容)
挂载在Camera上的脚本:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class MotionBlur : PostEffectsBase
{
public Shader mshader;
private Material mat = null;
public Material material{
get{
mat = CheckShaderAndCreateMaterial(mshader, mat);
return mat;
}
}
[Range(0.0f, 0.9f)]
public float bluralpha;
private RenderTexture accumulationTexture;
private void OnRenderImage(RenderTexture source, RenderTexture destination)
{
if(material != null){
if(accumulationTexture == null || accumulationTexture.width != source.width || accumulationTexture.height != source.height){
DestroyImmediate(accumulationTexture);
accumulationTexture = new RenderTexture(source.width, source.height, 0);
accumulationTexture.hideFlags = HideFlags.HideAndDontSave;
Graphics.Blit(source, accumulationTexture);
}
accumulationTexture.MarkRestoreExpected();
material.SetFloat("_BlurAlpha", bluralpha);
Graphics.Blit(source, accumulationTexture, material);
Graphics.Blit(accumulationTexture, destination);
}
else{
Graphics.Blit(source, destination);
}
}
}
挂载在脚本上的Shader:
// Upgrade NOTE: replaced 'mul(UNITY_MATRIX_MVP,*)' with 'UnityObjectToClipPos(*)'
Shader "Unlit/MotionBlurMat"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
_BlurAlpha("MotionAlpha", float) = 0.9
}
SubShader
{
CGINCLUDE
struct appdata{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
};
struct v2f{
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
};
sampler2D _MainTex;
float4 _MainTex_TS;
float _BlurAlpha;
v2f vertexMotion(appdata i){
v2f o;
o.vertex = UnityObjectToClipPos(i.vertex);
o.uv = i.texcoord;
return o;
}
fixed4 fragMotion(v2f i) : SV_Target{
fixed4 col = tex2D(_MainTex, i.uv);
return fixed4(col.rgb, _BlurAlpha);
}
ENDCG
Pass{
ZWrite Off ZTest Always Cull Off
Blend SrcAlpha OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vertexMotion
#pragma fragment fragMotion
ENDCG
}
}
}
通过设置的Alpha值,确定每一次混合的程度。