目录
任务
实现
预计算E(µ)
预计算Eavg
Bonus1:重要性采样
在实时渲染中使用预计算数据
结果
任务
1.预计算E(µ)
2.预计算Eavg
3.在实时渲染中使用预计算的数据。
Bonus1:使用重要性采样方法
实现
该作业框架采用的F,D,G三种模型
F项采用Schlick近似
D项采用GGX法线分布
G项采用GGX法线分布匹配的Smith模型
预计算E(µ)
这里使用框架里提到的Revisiting Physically Based Shading at Image works SIGGRAPH 2017 course,by Kulla and Conty
当我们使用Mircofacet模型时,当材质的粗糙度越大,通过白炉测试会发现损失的能量越来越多。因为Mircofacet模型只涉及一次的光线弹射,当物体很粗糙时,一根光线很容易会与表面发生多次作用,Mircofacet忽略了这一点,因此粗糙度大的物体,渲染结果会有点暗。而Kulla Conty的模型解决了这一问题。
将入射光看作四面八方radiance都为1的光,对所有方向的入射光求积分求它的irradiance,可以得到下面的式子,
这种形式是将dw拆分成了两项,并将原来的式子中的cosθ移到了dθ里,因此变成sinθd(sinθ),将sinθ换元成μ,就得到了最终上面的式子。好处就是不需要再关注 Φ了,就留下一个μ。
假设入射光为1,那么损失的能量是1-E(μ),因为BRDF的对称性,因此需要考虑入射与出射方向。但是1-E(μ)是<0的,多乘一次,会变得更小,使得损失的能量计算错误,因此需要在补一项,最终得到下面的式子。
通过下面的验证,证明了该公式的合理。
因此对于任意一个Mircofacet的补偿能量,我们需要知道1-E(μ)和1-Eavg,就能求出需要补偿的能量。E(μ)依赖三个参数,入射角μ,粗糙度α,和折射率。但是三个变量会产生很大的存储空间,因此简单将菲涅尔项先当作1处理,因此通过μ和α可以求出最终的预计算的表。
但是对于有色的物体,其自身也会对光进行吸收,也会存在能量损失。
因此对于有色物体,在补偿的能量上还需要乘以颜色带来的能量损失。因此最终的BRDF是
//Emu_MC.cppfloat DistributionGGX(Vec3f N, Vec3f H, float roughness)
{float a = roughness*roughness;float a2 = a*a;float NdotH = std::max(dot(N, H), 0.0f);float NdotH2 = NdotH*NdotH;float nom = a2;float denom = (NdotH2 * (a2 - 1.0) + 1.0);denom = PI * denom * denom;return nom / std::max(denom, 0.0001f);
}float GeometrySchlickGGX(float NdotV, float roughness) {float a = roughness;float k = (a * a) / 2.0f;float nom = NdotV;float denom = NdotV * (1.0f - k) + k;return nom / denom;
}float GeometrySmith(float roughness, float NoV, float NoL) {float ggx2 = GeometrySchlickGGX(NoV, roughness);float ggx1 = GeometrySchlickGGX(NoL, roughness);return ggx1 * ggx2;
}Vec3f IntegrateBRDF(Vec3f V, float roughness, float NdotV) {float A = 0.0;float B = 0.0;float C = 0.0;const int sample_count = 1024;Vec3f N = Vec3f(0.0, 0.0, 1.0);samplePoints sampleList = squareToCosineHemisphere(sample_count);for (int i = 0; i < sample_count; i++) {// TODO: To calculate (fr * ni) / p_o hereVec3f L = normalize(sampleList.directions[i]);float pdf = sampleList.PDFs[i];Vec3f H = normalize(L + V);float NdotL = dot(N,L);float F = 1.0;float G = GeometrySmith(roughness,NdotV,NdotL);float D = DistributionGGX(N,H,roughness);float denominator = 4 * NdotL * NdotV;float result = F * G * D / denominator * NdotL / pdf;A += result;B += result;C += result;}return {A / sample_count, B / sample_count, C / sample_count};
}
预计算Eavg
其实按照题目给的要求,完全不需要采样,直接在main里面求和然后再求平均就好了。
int main() {unsigned char *Edata = stbi_load("./GGX_E_MC_LUT.png", &resolution, &resolution, &channel, 3);if (Edata == NULL) {std::cout << "ERROE_FILE_NOT_LOAD" << std::endl;return -1;}else {std::cout << resolution << " " << resolution << " " << channel << std::endl;// | -----> mu(j)// | // | rough(i)// flip it if you want to write the data on picture uint8_t data[resolution * resolution * 3];float step = 1.0 / resolution;Vec3f Eavg = Vec3f(0.0);for (int i = 0; i < resolution; i++) {float roughness = step * (static_cast<float>(i) + 0.5f);for (int j = 0; j < resolution; j++) {float NdotV = step * (static_cast<float>(j) + 0.5f);Vec3f V = Vec3f(std::sqrt(1.f - NdotV * NdotV), 0.f, NdotV);Vec3f Ei = getEmu((resolution - 1 - i), j, 0, Edata, NdotV, roughness);// Eavg += IntegrateEmu(V, roughness, NdotV, Ei) * step;Eavg += Ei * NdotV * 2.0 * step;setRGB(i, j, 0.0, data);}for(int k = 0; k < resolution; k++){setRGB(i, k, Eavg, data);}Eavg = Vec3f(0.0);}// stbi_flip_vertically_on_write(true);stbi_write_png("GGX_Eavg_LUT.png", resolution, resolution, channel, data, 0);}stbi_image_free(Edata);return 0;
}
Bonus1:重要性采样
这里直接就采用作业文档里给出的公式了。
通过采样法线,通过反射来计算入射光的方向。
法线的采样
pdf的计算
最终的权重
//Emu_IS.cppVec3f ImportanceSampleGGX(Vec2f Xi, Vec3f N, float roughness) {float a = roughness * roughness;//TODO: in spherical space - Bonus 1float theta = atan(a * sqrt(Xi.x) / sqrt(1.0 - Xi.x));float phi = 2.0 * PI * Xi.y;//TODO: from spherical space to cartesian space - Bonus 1Vec3f H = Vec3f(cos(phi) * sin(theta) , sin(phi) * sin(theta) , cos(theta) );//TODO: tangent coordinates - Bonus 1Vec3f temp = Vec3f(0.0,0.0,1.0);if( abs(N.z ) > 0.999)temp = Vec3f(1.0,0.0,0.0);Vec3f tangent = normalize( cross(temp,N) );Vec3f bitangent = normalize( cross(N,tangent));//TODO: transform H to tangent space - Bonus 1Vec3f sample = tangent * H.x + bitangent * H.y + N * H.z ;return normalize(sample); }
//Emu_IS.cppVec3f IntegrateBRDF(Vec3f V, float roughness) {const int sample_count = 1024;Vec3f N = Vec3f(0.0, 0.0, 1.0);Vec3f Emu = Vec3f(0.0);for (int i = 0; i < sample_count; i++) {Vec2f Xi = Hammersley(i, sample_count);Vec3f H = ImportanceSampleGGX(Xi, N, roughness);Vec3f L = normalize(H * 2.0f * dot(V, H) - V);float NoL = std::max(L.z, 0.0f);float NoH = std::max(H.z, 0.0f);float VoH = std::max(dot(V, H), 0.0f);float NoV = std::max(dot(N, V), 0.0f);// TODO: To calculate (fr * ni) / p_o here - Bonus 1float G = GeometrySmith(roughness , NoV , NoL);float weight = VoH * G / NoV / NoH;Emu += Vec3f(1.0) * weight;// Split Sum - Bonus 2}std::cout << Emu.x << Emu.y << Emu.z << std::endl;return Emu / sample_count;
}
在实时渲染中使用预计算数据
//KullaContyFragment.glsl#ifdef GL_ES
precision mediump float;
#endifuniform vec3 uLightPos;
uniform vec3 uCameraPos;
uniform vec3 uLightRadiance;
uniform vec3 uLightDir;uniform sampler2D uAlbedoMap;
uniform float uMetallic;
uniform float uRoughness;
uniform sampler2D uBRDFLut;
uniform sampler2D uEavgLut;
uniform samplerCube uCubeTexture;varying highp vec2 vTextureCoord;
varying highp vec3 vFragPos;
varying highp vec3 vNormal;const float PI = 3.14159265359;float DistributionGGX(vec3 N, vec3 H, float roughness)
{// TODO: To calculate GGX NDF herefloat a2 = roughness * roughness;float NdotH = max(dot(N, H), 0.0);float NdotH2 = NdotH*NdotH;float nom = a2;float denom = (NdotH2 * (a2 - 1.0) + 1.0);denom = PI * denom * denom;return nom / denom;}float GeometrySchlickGGX(float NdotV, float roughness)
{// TODO: To calculate Schlick G1 herefloat a = roughness;float k = (a * a) / 2.0;float nom = NdotV;float denom = NdotV * (1.0 - k) + k;return nom / denom;
}float GeometrySmith(vec3 N, vec3 V, vec3 L, float roughness)
{// TODO: To calculate Smith G herefloat NdotV = max(dot(N, V), 0.0);float NdotL = max(dot(N, L), 0.0);float ggx2 = GeometrySchlickGGX(NdotV, roughness);float ggx1 = GeometrySchlickGGX(NdotL, roughness);return ggx1 * ggx2;
}vec3 fresnelSchlick(vec3 F0, vec3 V, vec3 H)
{// TODO: To calculate Schlick F herereturn F0 + (1.0 - F0) * pow(1.0 - dot(V, H), 5.0);
}//https://blog.selfshadow.com/publications/s2017-shading-course/imageworks/s2017_pbs_imageworks_slides_v2.pdf
vec3 AverageFresnel(vec3 r, vec3 g)
{return vec3(0.087237) + 0.0230685*g - 0.0864902*g*g + 0.0774594*g*g*g+ 0.782654*r - 0.136432*r*r + 0.278708*r*r*r+ 0.19744*g*r + 0.0360605*g*g*r - 0.2586*g*r*r;
}vec3 MultiScatterBRDF(float NdotL, float NdotV)
{vec3 albedo = pow(texture2D(uAlbedoMap, vTextureCoord).rgb, vec3(2.2));vec3 E_o = texture2D(uBRDFLut, vec2(NdotL, uRoughness)).xyz;vec3 E_i = texture2D(uBRDFLut, vec2(NdotV, uRoughness)).xyz;vec3 E_avg = texture2D(uEavgLut, vec2(0, uRoughness)).xyz;// coppervec3 edgetint = vec3(0.827, 0.792, 0.678);vec3 F_avg = AverageFresnel(albedo, edgetint);// TODO: To calculate fms and missing energy herevec3 fms = ( vec3(1.0) - E_o ) * (vec3(1.0) - E_i) / ( PI * (vec3(1.0) - E_avg) );vec3 fadd = F_avg * E_avg / ( vec3(1.0) - F_avg * ( vec3(1.0) - E_avg ) );return fms * fadd;return vec3(1.0);}void main(void) {vec3 albedo = pow(texture2D(uAlbedoMap, vTextureCoord).rgb, vec3(2.2));vec3 N = normalize(vNormal);vec3 V = normalize(uCameraPos - vFragPos);float NdotV = max(dot(N, V), 0.0);vec3 F0 = vec3(0.04); F0 = mix(F0, albedo, uMetallic);vec3 Lo = vec3(0.0);// calculate per-light radiancevec3 L = normalize(uLightDir);vec3 H = normalize(V + L);float distance = length(uLightPos - vFragPos);float attenuation = 1.0 / (distance * distance);vec3 radiance = uLightRadiance;float NDF = DistributionGGX(N, H, uRoughness); float G = GeometrySmith(N, V, L, uRoughness);vec3 F = fresnelSchlick(F0, V, H);vec3 numerator = NDF * G * F; float denominator = 4.0 * max(dot(N, V), 0.0) * max(dot(N, L), 0.0);vec3 Fmicro = numerator / max(denominator, 0.001); float NdotL = max(dot(N, L), 0.0); vec3 Fms = MultiScatterBRDF(NdotL, NdotV);vec3 BRDF = Fmicro + Fms;Lo += BRDF * radiance * NdotL;vec3 color = Lo;color = color / (color + vec3(1.0));color = pow(color, vec3(1.0/2.2)); gl_FragColor = vec4(color, 1.0);}
结果
没有采用重要性采样得到的E(μ)的结果
采用重要性采样后得到的E(μ)的结果
Eavg的结果
在渲染端使用预计算后补偿能量的结果