Commit 527fd05c authored by Florian Oetke's avatar Florian Oetke
Browse files

tweaked gi and documented the most important shaders

parent a064b41d
......@@ -4,7 +4,8 @@
#include "normal_encoding.glsl"
#include "color_conversion.glsl"
#include "random.glsl"
#include "global_uniforms.glsl"
layout(location = 0) in Vertex_data {
vec2 tex_coords;
......@@ -128,5 +129,5 @@ vec3 resolve_fxaa() {
void main() {
//out_color = vec4(tone_mapping(texture(color_sampler, vertex_out.tex_coords).rgb), 1.0);
out_color = vec4(tone_mapping(resolve_fxaa().rgb), 1.0);
out_color = vec4(tone_mapping(resolve_fxaa().rgb) + random(vec4(vertex_out.tex_coords,global_uniforms.time.x,0))/255.0/2.0, 1.0);
}
......@@ -2,10 +2,6 @@
#extension GL_ARB_separate_shader_objects : enable
#extension GL_ARB_shading_language_420pack : enable
#include "normal_encoding.glsl"
#include "poisson.glsl"
#include "random.glsl"
layout(location = 0) in Vertex_data {
vec2 tex_coords;
......@@ -17,31 +13,31 @@ layout(location = 1) out vec4 out_mat_data;
layout(set=1, binding = 0) uniform sampler2D depth_sampler;
layout(set=1, binding = 1) uniform sampler2D mat_data_sampler;
layout(push_constant) uniform Push_constants {
vec4 arguments;
} pcs;
// gaussian weight for normal axis difference
float g1(float x) {
float b = 0;
float c = 0.1;
return exp(- (x-b)*(x-b) / (2*c*c));
}
// gaussian weight for depth difference
float g2(float x) {
float b = 0;
float c = 0.001f;
return exp(- (x-b)*(x-b) / (2*c*c));
}
// finds the pixel of the 4 high-res pixels that is most similar to their surrounding 16 pixels
void main() {
// calculate uv coordinates of 2x2 blocks to sample
vec2 tex_size = textureSize(depth_sampler, 0);
const vec2 uv_00 = vertex_out.tex_coords + vec2(-1,-1) / tex_size;
const vec2 uv_10 = vertex_out.tex_coords + vec2( 1,-1) / tex_size;
const vec2 uv_11 = vertex_out.tex_coords + vec2( 1, 1) / tex_size;
const vec2 uv_01 = vertex_out.tex_coords + vec2(-1, 1) / tex_size;
const ivec2[] center_offsets = ivec2[4](ivec2(0,0), ivec2(1,0), ivec2(1,1), ivec2(0,1));
// sample depth and calculate score based on their difference to the center depth value
vec4 depth_00 = textureGather(depth_sampler, uv_00, 0);
vec4 depth_10 = textureGather(depth_sampler, uv_10, 0);
vec4 depth_11 = textureGather(depth_sampler, uv_11, 0);
......@@ -52,14 +48,13 @@ void main() {
vec4 center_depths = vec4(depth_00.y, depth_10.x, depth_11.w, depth_01.z);
vec4 score = vec4(0.8, 0.8, 0.8, 1.0);
vec4 score = vec4(1.0);
score *= vec4(g2(avg_depth - center_depths.x),
g2(avg_depth - center_depths.y),
g2(avg_depth - center_depths.z),
g2(avg_depth - center_depths.w) );
// sample x axis of encoded normals and modulate score based on their difference to center value
vec4 normal_x_00 = textureGather(mat_data_sampler, uv_00, 0);
vec4 normal_x_10 = textureGather(mat_data_sampler, uv_10, 0);
vec4 normal_x_11 = textureGather(mat_data_sampler, uv_11, 0);
......@@ -75,7 +70,7 @@ void main() {
g1(avg_normal_x - center_normals_x.z),
g1(avg_normal_x - center_normals_x.w) );
// sample y axis of encoded normals and modulate score based on their difference to center value
vec4 normal_y_00 = textureGather(mat_data_sampler, uv_00, 1);
vec4 normal_y_10 = textureGather(mat_data_sampler, uv_10, 1);
vec4 normal_y_11 = textureGather(mat_data_sampler, uv_11, 1);
......@@ -91,6 +86,7 @@ void main() {
g1(avg_normal_y - center_normals_y.z),
g1(avg_normal_y - center_normals_y.w) );
// determine the index of the pixel with the highes score
int max_index = 3;
float s = score.w;
......@@ -107,7 +103,7 @@ void main() {
s = score.z;
}
// write the depth/mat_data that is most similar to its surroundings
out_depth = texelFetch(depth_sampler, ivec2(vertex_out.tex_coords * tex_size) + center_offsets[max_index], 0);
out_mat_data = texelFetch(mat_data_sampler, ivec2(vertex_out.tex_coords * tex_size) + center_offsets[max_index], 0);
}
......@@ -18,17 +18,26 @@ layout(set=1, binding = 5) uniform sampler2D prev_depth_sampler;
layout(set=1, binding = 6) uniform sampler2D prev_mat_data_sampler;
layout(set=1, binding = 7) uniform sampler2D ao_sampler;
layout (constant_id = 0) const int LAST_SAMPLE = 0;
layout (constant_id = 1) const float R = 40;
layout (constant_id = 2) const int SAMPLES = 128;
layout (constant_id = 3) const int UPSAMPLE_ONLY = 0;
layout (constant_id = 0) const int LAST_SAMPLE = 0; // 1 if this is the last MIP level to sample
layout (constant_id = 1) const float R = 40; // the radius to fetch samples from
layout (constant_id = 2) const int SAMPLES = 128; // the number of samples to fetch
layout (constant_id = 3) const int UPSAMPLE_ONLY = 0;// 1 if only the previous result should be
// upsampled but no new samples calculated
// nearer samples have a higher weight. Less physically correct but results in more notacable color bleeding
// nearer samples have a higher weight. Less physically correct but results in more noticable color bleeding
layout (constant_id = 4) const float PRIORITISE_NEAR_SAMPLES = 0.6;
// arguments are packet into the matrices to keep the pipeline layouts compatible between GI passes
layout(push_constant) uniform Push_constants {
// [3][3] = intensity of ambient occlusion (0 = disabled)
mat4 projection;
// [0][0] = higher resolution base MIP level
// [2][0] = exponent for alternative MIP level scaling factor (for more color bleeding)
// [0][3] = current MIP level (relative to base MIP)
// [1][3] = highest relevant MIP level (relative to base MIP)
// [2][3] = precalculated part of the ds factor used in calc_illumination_from
// [3][3] = base MIP level
mat4 prev_projection;
} pcs;
......@@ -44,6 +53,7 @@ vec3 gi_sample(int lod, int base_mip);
vec3 calc_illumination_from(int lod, vec2 tex_size, ivec2 src_uv, vec2 shaded_uv, float shaded_depth,
vec3 shaded_point, vec3 shaded_normal, out float weight);
// calculate luminance of a color (used for normalization)
float luminance_norm(vec3 c) {
vec3 f = vec3(0.299,0.587,0.114);
return sqrt(c.r*c.r*f.r + c.g*c.g*f.g + c.b*c.b*f.b);
......@@ -54,6 +64,7 @@ void main() {
float max_mip = pcs.prev_projection[1][3];
float base_mip = pcs.prev_projection[3][3];
// upsample the previous result (if there is one)
if(current_mip < max_mip)
out_color = vec4(upsampled_result(depth_sampler, mat_data_sampler,
prev_depth_sampler, prev_mat_data_sampler,
......@@ -61,11 +72,13 @@ void main() {
else
out_color = vec4(0,0,0, 1);
// calculate contibution from this level (if we haven't reached the target level, yet)
if(UPSAMPLE_ONLY==0)
out_color.rgb += gi_sample(int(current_mip+0.5), int(base_mip+0.5));
// last mip level => blend with history
// reached the last MIP level => blend with history
if(abs(current_mip - base_mip) < 0.00001) {
// calculate interpolation factor based on the depth-error in its surrounding during reporjection
vec2 hws_step = 1.0 / textureSize(history_weight_sampler, 0);
vec4 history_weights = textureGather(history_weight_sampler, vertex_out.tex_coords+hws_step, 0);
......@@ -74,53 +87,52 @@ void main() {
history_weights = textureGather(history_weight_sampler, vertex_out.tex_coords-hws_step, 0);
history_weight = min(history_weight,min(history_weights.x, min(history_weights.y, min(history_weights.z, history_weights.w))));
// modulate diffuse GI by ambient occlusion
if(pcs.projection[3][3]>0.0) {
float ao = texture(ao_sampler, vertex_out.tex_coords).r;
ao = mix(1.0, ao, pcs.projection[3][3]);
out_color.rgb *= ao;
}
// normalize diffuse GI by its luminance to reduce fire-flies
out_color.rgb /= (1 + luminance_norm(out_color.rgb));
// calculate the min/max interpolation weights based on the delta time
float weight_measure = smoothstep(5.0/1000.0, 40.0/1000.0, global_uniforms.time.z);
float weight_min = mix(0.8, 0.1, weight_measure);
float weight_max = mix(0.95, 0.8, weight_measure);
float weight_measure = smoothstep(1.0/120.0, 1.0/30.0, global_uniforms.time.z);
float weight_min = mix(0.85, 0.1, weight_measure);
float weight_max = mix(0.98, 0.85, weight_measure);
// scale by calculated weight to alpha-blend with the reprojected result of the previous frame
out_color *= 1.0 - mix(weight_min, weight_max, history_weight);
}
// clamp the result to a reasonable range to reduce artefacts
out_color = clamp(out_color, vec4(0), vec4(20));
}
const float PI = 3.14159265359;
const float REC_PI = 0.3183098861837906715;
vec3 saturation(vec3 c, float change) {
vec3 f = vec3(0.299,0.587,0.114);
float p = sqrt(c.r*c.r*f.r + c.g*c.g*f.g + c.b*c.b*f.b);
return vec3(p) + (c-vec3(p))*vec3(change);
}
const float PI = 3.14159265359;
const float REC_PI = 0.3183098861837906715; // 1/PI
// calculates the diffuse GI contribution from this MIP level
vec3 gi_sample(int lod, int base_mip) {
// calculate uv coordinates in pixel
vec2 texture_size = textureSize(color_sampler, 0);
ivec2 uv = ivec2(vertex_out.tex_coords * texture_size);
// workaround for white bar at right/bottom border
// clamp area to reduce artefacts around borders
if(uv.y >= texture_size.y-1 || uv.x >= texture_size.x-1)
return vec3(0, 0, 0);
// fetch the depth/normal of the target pixel and reconstruct its view-space position
float depth = texelFetch(depth_sampler, uv, 0).r;
vec4 mat_data = texelFetch(mat_data_sampler, uv, 0);
vec3 N = decode_normal(mat_data.rg);
vec3 P = position_from_ldepth(vertex_out.tex_coords, depth);
// fetch SAMPLES samples in a spiral pattern and combine their GI contribution
vec3 c = vec3(0,0,0);
float samples_used = 0.0;
float angle = random(vec4(vertex_out.tex_coords, 0.0, 0));
float angle = random(vec4(vertex_out.tex_coords, 0, global_uniforms.time.x*10.0));
float angle_step = 1.0 / float(SAMPLES) * PI * 2.0 * 19.0;
for(int i=0; i<SAMPLES; i++) {
......@@ -138,59 +150,62 @@ vec3 gi_sample(int lod, int base_mip) {
samples_used += weight;
}
// could be used to blend between screen-space and static GI
// float visibility = 1.0 - (samples_used / float(SAMPLES));
// scale the collected diffuse GI by a factor to compensate for low intensity of smaller MIP levels
float actual_lod = lod - pcs.prev_projection[0][0];
float scale_exponent = mix(actual_lod,
pcs.prev_projection[1][0] - pcs.prev_projection[2][0] - 0.8,
pcs.prev_projection[2][0],
PRIORITISE_NEAR_SAMPLES);
c *= pow(2.0, scale_exponent*2);
// normalize based on number of samples for consistent results, independent of SAMPLES
c *= 128.0 / SAMPLES;
return c;
}
// calculate the light transfer between to pixel of the current level
vec3 calc_illumination_from(int lod, vec2 tex_size, ivec2 src_uv, vec2 shaded_uv, float shaded_depth,
vec3 shaded_point, vec3 shaded_normal, out float weight) {
// fetch depth/normal at src pixel
vec4 mat_data = texelFetch(mat_data_sampler, src_uv, 0);
vec3 N = decode_normal(mat_data.rg);
float depth = texelFetch(depth_sampler, src_uv, 0).r;
if(depth>=0.9999) {
// we hit the skybox => reduce depth so it still contributes some light
depth = 0.1;
}
vec3 P = position_from_ldepth(src_uv / tex_size, depth); // x_i
// reconstruct the position (x_i) of the src point and calculate the direction and distance^2 to x
vec3 P = position_from_ldepth(src_uv / tex_size, depth);
vec3 Pn = normalize(P);
vec3 diff = shaded_point - P;
vec3 dir = normalize(diff);
float r2 = max(dot(diff, diff), 0.01*0.01);
float visibility = 1.0; // TODO
float r2 = dot(diff, diff);
vec4 mat_data = texelFetch(mat_data_sampler, src_uv, 0);
vec3 N = decode_normal(mat_data.rg);
float visibility = 1.0; // v(x, x_i); currently not implemented
vec3 radiance = texelFetch(color_sampler, src_uv, 0).rgb;
// interpolate r^2 to (r^2)^1.25 for distant pixels to counter missing visibility term
r2 = mix(r2, pow(r2,1.25), clamp((r2-1)*0.5, 0, 1));
float NdotL_src = clamp(dot(N, dir), 0.0, 1.0); // cos(θ')
float NdotL_src = clamp(dot(N, dir), 0.0, 1.0); // cos(θ')
float NdotL_dst = clamp(dot(shaded_normal, -dir), 0.0, 1.0); // cos(θ)
// if the material is an emitter (mat_data.b=0), flip the normal if that would result in a higher
// itensity (approximates light emitted from the backside by assuming rotation invariance)
NdotL_src = mix(max(clamp(dot(-N, dir), 0.0, 1.0), NdotL_src), NdotL_src, step(0.0001, mat_data.b));
// calculate the size of the differential area
float cos_alpha = Pn.z;
float cos_beta = dot(Pn, N);
float z = depth * global_uniforms.proj_planes.y;
float ds = pcs.prev_projection[2][3] * z*z * clamp(cos_alpha / cos_beta, 0.001, 1000.0);
float R2 = REC_PI * NdotL_src * ds;
float area = R2 / (r2 + R2); // point-to-differential area form-factor
weight = visibility * NdotL_dst * area * step(0.1, r2);
// multiply all factors, that modulate the light transfer
weight = visibility * NdotL_dst * NdotL_src * ds / (0.1+r2);
// fetch the light emitted by the src pixel, modulate it by the calculated factor and return it
vec3 radiance = texelFetch(color_sampler, src_uv, 0).rgb;
return max(vec3(0.0), radiance * weight);
}
......@@ -136,13 +136,16 @@ void main() {
out_color.rgb /= (1 + luminance_norm(out_color.rgb));
} else {
out_color.rgb = textureLod(result_sampler, vertex_out.tex_coords, pcs.prev_projection[0][3]).rgb / (PI*PI*2);
out_color.rgb = textureLod(result_sampler, vertex_out.tex_coords, pcs.prev_projection[0][3]).rgb / (PI*PI*4);
}
float history_weight = texelFetch(history_weight_sampler,
ivec2(vertex_out.tex_coords * textureSize(history_weight_sampler, 0)),
0).r;
float weight_measure = smoothstep(1.0/120.0, 1.0/30.0, global_uniforms.time.z);
float weight_min = mix(0.85, 0.1, weight_measure);
float weight_max = mix(0.95, 0.85, weight_measure);
out_color *= 1.0 - (history_weight*0.92);
out_color = max(out_color, vec4(0));
......
......@@ -2,57 +2,52 @@
#define UPSAMPLE_INCLUDED
#include "global_uniforms.glsl"
#include "poisson.glsl"
#include "random.glsl"
vec4 weight_depth(vec4 x, float depth_dev) {
float c = depth_dev;
return exp(- x*x / (2*c*c));
// calculate a weighting factor based on the normal differences x and the deviation
vec4 weight_depth(vec4 x, float dev) {
return exp(-x*x / (2*dev*dev));
}
// calculate a weighting factor based on the difference of the encoded normals
vec4 weight_mat_data(vec4 dx, vec4 dy) {
return max(vec4(0.005), 1 - smoothstep(0.05, 0.2, (dx*dx+dy*dy)));
}
// calculate the uv coordinates of the 2x2 blocks to sample and the per-pixel weights based on normal/depth
// returns the sum of all per-pixel weights
float calc_upsampled_weights(sampler2D highres_depth_sampler, sampler2D highres_mat_data_sampler,
sampler2D depth_sampler, sampler2D mat_data_sampler, vec2 tex_coords,
out vec2 uv_00, out vec2 uv_10, out vec2 uv_11, out vec2 uv_01,
sampler2D depth_sampler, sampler2D mat_data_sampler,
vec2 tex_coords,
out vec2 uv_00, out vec2 uv_10, out vec2 uv_11, out vec2 uv_01,
out vec4 weight_00, out vec4 weight_10, out vec4 weight_11, out vec4 weight_01) {
vec2 tex_size = textureSize(depth_sampler, 0);
float depth = texelFetch(highres_depth_sampler, ivec2(textureSize(highres_depth_sampler, 0)*tex_coords), 0).r;
float depth_dev = mix(0.3, 1.5, depth) / global_uniforms.proj_planes.y;
// sample high-res depth + normal
float depth = texelFetch(highres_depth_sampler, ivec2(textureSize(highres_depth_sampler, 0)*tex_coords), 0).r;
vec2 normal = texelFetch(highres_mat_data_sampler, ivec2(textureSize(highres_mat_data_sampler, 0)*tex_coords), 0).xy;
// calculate uv coordinates
vec2 tex_size = textureSize(depth_sampler, 0);
uv_00 = tex_coords + vec2(-1,-1) / tex_size;
uv_10 = tex_coords + vec2( 1,-1) / tex_size;
uv_11 = tex_coords + vec2( 1, 1) / tex_size;
uv_01 = tex_coords + vec2(-1, 1) / tex_size;
weight_00 = vec4(0.125794409230998,
0.132980760133811,
0.125794409230998,
0.118996412547595);
weight_10 = vec4(0.125794409230998,
0.106482668507451,
0.100728288549083,
0.118996412547595);
weight_11 = vec4(0.100728288549083,
0.085264655436308,
0.100728288549083,
0.118996412547595);
weight_01 = vec4(0.100728288549083,
0.106482668507451,
0.125794409230998,
0.118996412547595);
// initialize the per-pixel weights with gaussian weights
weight_00 = vec4(0.125794409230998, 0.132980760133811, 0.125794409230998, 0.118996412547595);
weight_10 = vec4(0.125794409230998, 0.106482668507451, 0.100728288549083, 0.118996412547595);
weight_11 = vec4(0.100728288549083, 0.085264655436308, 0.100728288549083, 0.118996412547595);
weight_01 = vec4(0.100728288549083, 0.106482668507451, 0.125794409230998, 0.118996412547595);
// calculate the maximum depth deviation based on the distance, to reduce bluring
// near the camera where it's most noticable
float depth_dev = mix(0.3, 1.5, depth) / global_uniforms.proj_planes.y;
// sample low-res depth and modulate the weights based on their difference to the high-res depth
weight_00 *= weight_depth(textureGather(depth_sampler, uv_00, 0) - depth, depth_dev);
weight_10 *= weight_depth(textureGather(depth_sampler, uv_10, 0) - depth, depth_dev);
weight_11 *= weight_depth(textureGather(depth_sampler, uv_11, 0) - depth, depth_dev);
weight_01 *= weight_depth(textureGather(depth_sampler, uv_01, 0) - depth, depth_dev);
// sample the encoded low-res normals
vec4 normal_x_00 = textureGather(mat_data_sampler, uv_00, 0) - normal.x;
vec4 normal_x_10 = textureGather(mat_data_sampler, uv_10, 0) - normal.x;
vec4 normal_x_11 = textureGather(mat_data_sampler, uv_11, 0) - normal.x;
......@@ -63,40 +58,37 @@ float calc_upsampled_weights(sampler2D highres_depth_sampler, sampler2D highres_
vec4 normal_y_11 = textureGather(mat_data_sampler, uv_11, 1) - normal.y;
vec4 normal_y_01 = textureGather(mat_data_sampler, uv_01, 1) - normal.y;
// modulate the weights based on normal difference
weight_00 *= weight_mat_data(normal_x_00, normal_y_00);
weight_10 *= weight_mat_data(normal_x_10, normal_y_10);
weight_11 *= weight_mat_data(normal_x_11, normal_y_11);
weight_01 *= weight_mat_data(normal_x_01, normal_y_01);
// sum all per-pixel weights
return dot(weight_00, vec4(1))
+ dot(weight_10, vec4(1))
+ dot(weight_11, vec4(1))
+ dot(weight_01, vec4(1));
}
// calculate the high-res approximation of the given low-res solution (color_sampler) at a single point
// using Join-Bilateral-Upsampling based on the high- and low-res normal and depth values
vec3 upsampled_result(sampler2D highres_depth_sampler, sampler2D highres_mat_data_sampler,
sampler2D depth_sampler, sampler2D mat_data_sampler,
sampler2D color_sampler, vec2 tex_coords) {
vec2 uv_00;
vec2 uv_10;
vec2 uv_11;
vec2 uv_01;
vec4 weight_00;
vec4 weight_10;
vec4 weight_11;
vec4 weight_01;
sampler2D depth_sampler, sampler2D mat_data_sampler,
sampler2D color_sampler, vec2 tex_coords) {
// calculate the uv coordinates and per-pixel weights
vec2 uv_00, uv_10, uv_11, uv_01;
vec4 weight_00, weight_10, weight_11, weight_01;
float weight_sum = calc_upsampled_weights(highres_depth_sampler, highres_mat_data_sampler,
depth_sampler, mat_data_sampler, tex_coords,
uv_00, uv_10, uv_11, uv_01,
weight_00, weight_10, weight_11, weight_01);
depth_sampler, mat_data_sampler, tex_coords,
uv_00, uv_10, uv_11, uv_01,
weight_00, weight_10, weight_11, weight_01);
// fallback to linear interpolation if no good match could be found in the low-res solution
if(weight_sum<0.001)
return textureLod(color_sampler, tex_coords, 0).rgb;
// gather the RGB values of the 16 surrounding pixels and weight them by the calcuated weights
float color_r = dot(vec4(1),
textureGather(color_sampler, uv_00, 0) * weight_00
+ textureGather(color_sampler, uv_10, 0) * weight_10
......@@ -115,36 +107,30 @@ vec3 upsampled_result(sampler2D highres_depth_sampler, sampler2D highres_mat_dat
+ textureGather(color_sampler, uv_11, 2) * weight_11
+ textureGather(color_sampler, uv_01, 2) * weight_01);
vec3 color = vec3(color_r, color_g, color_b) / weight_sum;
return color;
return vec3(color_r, color_g, color_b) / weight_sum;
}
// same as upsampled_result but upsamples to solutions at the same time
void upsampled_two(sampler2D highres_depth_sampler, sampler2D highres_mat_data_sampler,
sampler2D depth_sampler, sampler2D mat_data_sampler,
sampler2D color_sampler_a, sampler2D color_sampler_b, vec2 tex_coords,
out vec3 out_color_a, out vec3 out_color_b) {
vec2 uv_00;
vec2 uv_10;
vec2 uv_11;
vec2 uv_01;
vec4 weight_00;
vec4 weight_10;
vec4 weight_11;
vec4 weight_01;
sampler2D depth_sampler, sampler2D mat_data_sampler,
sampler2D color_sampler_a, sampler2D color_sampler_b, vec2 tex_coords,
out vec3 out_color_a, out vec3 out_color_b) {
// calculate the uv coordinates and per-pixel weights
vec2 uv_00, uv_10, uv_11, uv_01;
vec4 weight_00, weight_10, weight_11, weight_01;
float weight_sum = calc_upsampled_weights(highres_depth_sampler, highres_mat_data_sampler,
depth_sampler, mat_data_sampler, tex_coords,
uv_00, uv_10, uv_11, uv_01,
weight_00, weight_10, weight_11, weight_01);
depth_sampler, mat_data_sampler, tex_coords,
uv_00, uv_10, uv_11, uv_01,
weight_00, weight_10, weight_11, weight_01);
// fallback to linear interpolation if no good match could be found in the low-res solution
if(weight_sum<0.001) {
out_color_a = textureLod(color_sampler_a, tex_coords, 0).rgb;
out_color_b = textureLod(color_sampler_b, tex_coords, 0).rgb;
return;
}
// gather the RGB values of the 16 surrounding pixels and weight them by the calcuated weights
float color_r = dot(vec4(1),
textureGather(color_sampler_a, uv_00, 0) * weight_00
+ textureGather(color_sampler_a, uv_10, 0) * weight_10
......@@ -165,7 +151,7 @@ void upsampled_two(sampler2D highres_depth_sampler, sampler2D highres_mat_data_s
out_color_a = vec3(color_r, color_g, color_b) / weight_sum;
// gather the RGB values of the 16 surrounding pixels and weight them by the calcuated weights
color_r = dot(vec4(1),
textureGather(color_sampler_b, uv_00, 0) * weight_00
+ textureGather(color_sampler_b, uv_10, 0) * weight_10
......
......@@ -41,10 +41,34 @@ namespace mirrage {
namespace {
constexpr auto presets = std::array<Preset, 6>{
{Preset{{-0.00465f, 2.693f, 0.03519f}, 0.f, 0.f, 0.92f, 1.22f, 5600.f, false},
Preset{{-6.2272f, 17.4041f, 0.70684f}, 1.5745f, 1.37925f, 0.64f, 1.41f, 5600.f, false},
Preset{{-6.92102f, 4.65626f, 8.85025f}, -4.71325f, 0.0302201f, 0.74f, 1.22f, 5600.f, true},
Preset{{5.93751f, 5.96643f, -4.34917f}, -0.0337765f, 0.0992601f, 0.62f, 1.22f, 5600.f, false},
Preset{{9.88425f, 5.69793f, 4.93024f}, 0.450757f, -0.0187274f, 0.62f, 1.85f, 5600.f, false},
Preset{{-6.2272f, 17.4041f, 0.70684f},
1.5745f,
1.37925f,
0.64f,
1.41f,
5600.f,
false},
Preset{{-6.92102f, 4.65626f, 8.85025f},
-4.71325f,
0.0302201f,
0.74f,
1.22f,
5600.f,
true},
Preset{{5.93751f, 5.96643f, -4.34917f},
-0.0337765f,
0.0992601f,
0.62f,
1.22f,
5600.f,
false},
Preset{{9.88425f, 5.69793f, 4.93024f},
0.450757f,
-0.0187274f,
0.62f,
1.85f,
5600.f,
false},