我试图在片段着色器中实现Oren-Nayar照明,如图
here所示.
但是,如下图所示,我在地形上获得了一些奇怪的照明效果.
我正在向着色器发送“视图方向”制服作为相机的“前”矢量.我不知道这是否正确,因为移动相机会改变工件.
将“前”矢量与MVP矩阵相乘可以获得更好的结果,但是从某些角度观察地形时,人为因素仍然非常显着.在黑暗区域和屏幕边缘周围特别明显.
可能会导致这种影响?
神器的例子
场景应该如何看
顶点着色器
#version 450 layout(location = 0) in vec3 position; layout(location = 1) in vec3 normal; out VS_OUT { vec3 normal; } vert_out; void main() { vert_out.normal = normal; gl_Position = vec4(position,1.0); }
管状控制着色器
#version 450 layout(vertices = 3) out; in VS_OUT { vec3 normal; } tesc_in[]; out TESC_OUT { vec3 normal; } tesc_out[]; void main() { if(gl_InvocationID == 0) { gl_TessLevelInner[0] = 1.0; gl_TessLevelInner[1] = 1.0; gl_TessLevelOuter[0] = 1.0; gl_TessLevelOuter[1] = 1.0; gl_TessLevelOuter[2] = 1.0; gl_TessLevelOuter[3] = 1.0; } tesc_out[gl_InvocationID].normal = tesc_in[gl_InvocationID].normal; gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position; }
Tesselation评估着色器
#version 450 layout(triangles,equal_spacing) in; in TESC_OUT { vec3 normal; } tesc_in[]; out TESE_OUT { vec3 normal; float height; vec4 shadow_position; } tesc_out; uniform mat4 model_view; uniform mat4 model_view_perspective; uniform mat3 normal_matrix; uniform mat4 depth_matrix; vec3 lerp(vec3 v0,vec3 v1,vec3 v2) { return ( (vec3(gl_TessCoord.x) * v0) + (vec3(gl_TessCoord.y) * v1) + (vec3(gl_TessCoord.z) * v2) ); } vec4 lerp(vec4 v0,vec4 v1,vec4 v2) { return ( (vec4(gl_TessCoord.x) * v0) + (vec4(gl_TessCoord.y) * v1) + (vec4(gl_TessCoord.z) * v2) ); } void main() { gl_Position = lerp( gl_in[0].gl_Position,gl_in[1].gl_Position,gl_in[2].gl_Position ); tesc_out.normal = normal_matrix * lerp( tesc_in[0].normal,tesc_in[1].normal,tesc_in[2].normal ); tesc_out.height = gl_Position.y; tesc_out.shadow_position = depth_matrix * gl_Position; gl_Position = model_view_perspective * gl_Position; }
片段着色器
#version 450 in TESE_OUT { vec3 normal; float height; vec4 shadow_position; } frag_in; out vec4 colour; uniform vec3 view_direction; uniform vec3 light_position; #define PI 3.141592653589793 void main() { const vec3 ambient = vec3(0.1,0.1,0.1); const float roughness = 0.8; const vec4 water = vec4(0.0,0.0,0.8,1.0); const vec4 sand = vec4(0.93,0.87,0.51,1.0); const vec4 grass = vec4(0.0,1.0); const vec4 ground = vec4(0.49,0.27,0.08,1.0); const vec4 snow = vec4(0.9,0.9,1.0); if(frag_in.height == 0.0) { colour = water; } else if(frag_in.height < 0.2) { colour = sand; } else if(frag_in.height < 0.575) { colour = grass; } else if(frag_in.height < 0.8) { colour = ground; } else { colour = snow; } vec3 normal = normalize(frag_in.normal); vec3 view_dir = normalize(view_direction); vec3 light_dir = normalize(light_position); float NdotL = dot(normal,light_dir); float NdotV = dot(normal,view_dir); float angleVN = acos(NdotV); float angleLN = acos(NdotL); float alpha = max(angleVN,angleLN); float beta = min(angleVN,angleLN); float gamma = dot(view_dir - normal * dot(view_dir,normal),light_dir - normal * dot(light_dir,normal)); float roughnessSquared = roughness * roughness; float roughnessSquared9 = (roughnessSquared / (roughnessSquared + 0.09)); // calculate C1,C2 and C3 float C1 = 1.0 - 0.5 * (roughnessSquared / (roughnessSquared + 0.33)); float C2 = 0.45 * roughnessSquared9; if(gamma >= 0.0) { C2 *= sin(alpha); } else { C2 *= (sin(alpha) - pow((2.0 * beta) / PI,3.0)); } float powValue = (4.0 * alpha * beta) / (PI * PI); float C3 = 0.125 * roughnessSquared9 * powValue * powValue; // now calculate both main parts of the formula float A = gamma * C2 * tan(beta); float B = (1.0 - abs(gamma)) * C3 * tan((alpha + beta) / 2.0); // put it all together float L1 = max(0.0,NdotL) * (C1 + A + B); // also calculate interreflection float twoBetaPi = 2.0 * beta / PI; float L2 = 0.17 * max(0.0,NdotL) * (roughnessSquared / (roughnessSquared + 0.13)) * (1.0 - gamma * twoBetaPi * twoBetaPi); colour = vec4(colour.xyz * (L1 + L2),1.0); }
解决方法
首先,我将我的视图/正常/光矢量的片段着色器插入到我的渲染器中,并且它的工作原理完美.所以问题必须在你计算这些向量的方式上.
接下来,您说您将view_dir设置为相机的前端矢量.我假设你的意思是“摄像机在世界空间中的前向量”,这是不正确的.由于您使用相机空间中的向量计算点积,所以view_dir也必须位于相机空间中.那是vec3(0,1)将是一个简单的方法来检查.如果它有效 – 我们发现你的问题.
然而,当您进行透视投影时,使用(0,1)视图方向不是严格正确的,因为从片段到相机的方向依赖于片段在屏幕上的位置.那么正确的公式就是view_dir = normalize(-pos)其中pos是片段在相机空间中的位置(即使用没有投影的模型视图矩阵).此外,此数量现在仅取决于屏幕上的片段位置,因此可以将其计算为:
view_dir = normalize(vec3(-(gl_FragCoord.xy - frame_size/2) / (frame_width/2),flen))
flen是您的相机的焦距,您可以计算为flen = cot(fovx / 2).