描边shader
法线外扩法
法线外扩是用剔除来实现的,在一个pass里面剔除正面,让物体只有正面被绘制 这一面让它输出的片元颜色=描边颜色,但是顶点需要进行放大,
再用另一个PASS再绘制一遍覆盖,这样背面只有描边显示出来了.
代码:
Shader "Custom/OutLine1"
{
Properties
{
_Color ("Color", Color) = (1,1,1,1)
_MainTex ("Albedo (RGB)", 2D) = "white" {}
_Glossiness ("Smoothness", Range(0,1)) = 0.5
_Metallic ("Metallic", Range(0,1)) = 0.0
_OutlineWidth("Outline Width", range(0,5)) = 0.5
_OutlineColor("Outline Color", Color) =(0,0,0,1)
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 200
Pass{
Cull Front
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
float _OutlineWidth;
fixed4 _OutlineColor;
struct v2f
{
float4 pos : SV_POSITION;
};
v2f vert(appdata_base v):SV_POSITION{
v2f o;
o.pos =float4(v.vertex.xyz+ v.normal*_OutlineWidth*0.1,1);
o.pos = UnityObjectToClipPos(o.pos);
return o;
}
fixed4 frag(v2f i ):SV_TARGET{
fixed4 color = _OutlineColor;
return color;
}
ENDCG
}
CGPROGRAM
// Physically based Standard lighting model, and enable shadows on all light types
#pragma surface surf Standard fullforwardshadows
// Use shader model 3.0 target, to get nicer looking lighting
#pragma target 3.0
sampler2D _MainTex;
struct Input
{
float2 uv_MainTex;
};
half _Glossiness;
half _Metallic;
fixed4 _Color;
UNITY_INSTANCING_BUFFER_START(Props)
// put more per-instance properties here
UNITY_INSTANCING_BUFFER_END(Props)
void surf (Input IN, inout SurfaceOutputStandard o)
{
// Albedo comes from a texture tinted by color
fixed4 c = tex2D (_MainTex, IN.uv_MainTex) * _Color;
o.Albedo = c.rgb;
// Metallic and smoothness come from slider variables
o.Metallic = _Metallic;
o.Smoothness = _Glossiness;
o.Alpha = c.a;
}
ENDCG
}
FallBack "Diffuse"
}
效果:
对正方形这种物体效果是不太好的,解决方法之一是法线外扩的normal,改成切线数据:https://zhuanlan.zhihu.com/p/109101851
用后处理描边
这个是照着Unity Shader入门精要搞的
边缘检测重点是是卷积算法,怎么在shader里写卷积计算梯度是一个小的难点.
卷积和信号在games101的第lecture 6 里面有讲,简单理解就是信号数组中任意一个数是与下面滤波器数组做点积,求平均值.
这个意思就是取某一个像素,与它周围3x3个做平均操作
shader入门精要里写的是用卷积核求梯度G=|Gx|+|Gy|来判断边缘,G越大,越有可能是边缘 .
先不看camera代码,来看shader里怎么写卷积算子,再写camera脚本好理解点
Sobol算子卷积要对一个像素周围的3x3像素采样一遍,因此一套uv就是不够的
顶点到片元中输出的uv就得改成大小为9的数组,放9个uv坐标.
struct v2f {
float4 pos : SV_POSITION;
half2 uv[9] : TEXCOORD0;
};
v2f vert(appdata_img v) {
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
half2 uv = v.texcoord;
//根据sobol算法计算uv
o.uv[0] = uv + _MainTex_TexelSize.xy * half2(-1, -1);
o.uv[1] = uv + _MainTex_TexelSize.xy * half2(0, -1);
o.uv[2] = uv + _MainTex_TexelSize.xy * half2(1, -1);
o.uv[3] = uv + _MainTex_TexelSize.xy * half2(-1, 0);
o.uv[4] = uv + _MainTex_TexelSize.xy * half2(0, 0);
o.uv[5] = uv + _MainTex_TexelSize.xy * half2(1, 0);
o.uv[6] = uv + _MainTex_TexelSize.xy * half2(-1, 1);
o.uv[7] = uv + _MainTex_TexelSize.xy * half2(0, 1);
o.uv[8] = uv + _MainTex_TexelSize.xy * half2(1, 1);
return o;
}
改uv要用到_MainTex_TexelSize,这东西就是它字面意思,一个像素有多大,如果是512x1024那么x=1/512,y=1/1024
_MainTex_TexelSize应该乘多少是sobel算子里对应的Gx,Gy值
对于其中算子其中的一个卷积的求法:
//先采样一个像素,再求梯度
texColor = tex2D(_MainTex, i.uv[it]);
edgeX += texColor * Gx[it];
edgeY += texColor * Gy[it];
3x3个像素采样求梯度:
half Sobel(v2f i) {
const half Gx[9] = {-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
const half Gy[9] = {-1, -2, -1,
0, 0, 0,
1, 2, 1};
half texColor;
half edgeX = 0;
half edgeY = 0;
for (int it = 0; it < 9; it++) {
texColor = tex2D(_MainTex, i.uv[it]);
edgeX += texColor * Gx[it];
edgeY += texColor * Gy[it];
}
//edge越小,有可能是边缘
half edge = 1 - abs(edgeX) - abs(edgeY);
return edge;
}
有sobel算出的梯度和edge值,片元着色器就好写了
fixed4 fragSobel(v2f i) : SV_Target {
half edge = Sobel(i);
fixed4 withEdgeColor = lerp(_EdgeColor, tex2D(_MainTex, i.uv[4]), edge);
//fixed4 onlyEdgeColor = lerp(_EdgeColor, _BackgroundColor, edge);
//return lerp(withEdgeColor, onlyEdgeColor, _EdgeOnly);
return withEdgeColor;
}
先不看onlyEdgeColor这里,其实这个片元着色器的做的事就是用描边的颜色和采样主纹理的颜色做混合,混合的系数由edge决定,edge越小,边缘越明显
到此为止已经有了基本的效果了
深度和法线描边
原理:从摄像机获取深度和法线纹理,比较深度和法线的差值,如果超过阈值则认为是边缘
//camera的c#脚本设置:
GetComponent<Camera>().depthTextureMode |= DepthTextureMode.DepthNormals;
//shader里面声明:
_CameraDepthNormalsTexture
只获取深度
depthTextureMode |= DepthTextureMode.Depth;
_CameraDepthTexture
顶点着色器中用robot算子时用4个UV坐标就够了
o.uv[1] = uv + _MainTex_TexelSize.xy * half2(1,1) * _SampleDistance;
o.uv[2] = uv + _MainTex_TexelSize.xy * half2(-1,-1) * _SampleDistance;
o.uv[3] = uv + _MainTex_TexelSize.xy * half2(-1,1) * _SampleDistance;
o.uv[4] = uv + _MainTex_TexelSize.xy * half2(1,-1) * _SampleDistance;
最重要的是片元着色中的深度和法线计算过程,采样深度和法线纹理:
half4 sample1 = tex2D(_CameraDepthNormalsTexture, i.uv[1]);
half4 sample2 = tex2D(_CameraDepthNormalsTexture, i.uv[2]);
half4 sample3 = tex2D(_CameraDepthNormalsTexture, i.uv[3]);
half4 sample4 = tex2D(_CameraDepthNormalsTexture, i.uv[4]);
检查差值的过程就是两个采样纹理xy,zw值相减取绝对值,xy不是真正的解码后法线,但法线与xy有关
//检查法线
half2 centerNormal = center.xy;
half2 sampleNormal = sample.xy;
half2 diffNormal = abs(centerNormal - sampleNormal) * _Sensitivity.x;
//检查深度
float centerDepth = DecodeFloatRG(center.zw);
float sampleDepth = DecodeFloatRG(sample.zw);
float diffDepth = abs(centerDepth - sampleDepth) * _Sensitivity.y;
edge *= CheckSame(sample1, sample2);
edge *= CheckSame(sample3, sample4);