mirror of
https://github.com/sphynx-owner/JFA_driven_motion_blur_addon.git
synced 2025-09-19 04:06:08 +08:00
first commit
This commit is contained in:
21
addons/MyJumpFloodIteration/LICENSE-MotionBlurEffect
Normal file
21
addons/MyJumpFloodIteration/LICENSE-MotionBlurEffect
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2024 sphynx-owner
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
257
addons/MyJumpFloodIteration/jfp_backtracking_experimental.glsl
Normal file
257
addons/MyJumpFloodIteration/jfp_backtracking_experimental.glsl
Normal file
@ -0,0 +1,257 @@
|
||||
#[compute]
|
||||
#version 450
|
||||
|
||||
#define FLT_MAX 3.402823466e+38
|
||||
#define FLT_MIN 1.175494351e-38
|
||||
#define DBL_MAX 1.7976931348623158e+308
|
||||
#define DBL_MIN 2.2250738585072014e-308
|
||||
|
||||
layout(set = 0, binding = 0) uniform sampler2D depth_sampler;
|
||||
layout(set = 0, binding = 1) uniform sampler2D velocity_sampler;
|
||||
layout(rgba16f, set = 0, binding = 2) uniform image2D buffer_a;
|
||||
layout(rgba16f, set = 0, binding = 3) uniform image2D buffer_b;
|
||||
|
||||
layout(push_constant, std430) uniform Params
|
||||
{
|
||||
int iteration_index;
|
||||
int last_iteration_index;
|
||||
int nan1;
|
||||
int nan2;
|
||||
float perpen_error_thresh;
|
||||
float sample_step_multiplier;
|
||||
float motion_blur_intensity;
|
||||
float velocity_match_threshold;
|
||||
float parallel_sensitivity;
|
||||
float perpendicular_sensitivity;
|
||||
float nan3;
|
||||
float nan4;
|
||||
} params;
|
||||
|
||||
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
|
||||
|
||||
const int kernel_size = 9;//8;//
|
||||
|
||||
const vec2 check_step_kernel[kernel_size] = {
|
||||
vec2(0, 0),
|
||||
vec2(1, 1),
|
||||
vec2(0, 1),
|
||||
vec2(-1, 1),
|
||||
vec2(1, 0),
|
||||
vec2(1, -1),
|
||||
vec2(-1, 0),
|
||||
vec2(-1, -1),
|
||||
vec2(0, -1),
|
||||
};
|
||||
|
||||
// near plane distance
|
||||
float npd = 0.05;
|
||||
|
||||
vec4 get_value(bool a, ivec2 uvi, ivec2 render_size)
|
||||
{
|
||||
if ((uvi.x >= render_size.x) || (uvi.x < 0) || (uvi.y >= render_size.y) || (uvi.y < 0))
|
||||
{
|
||||
return vec4(-1, -1, 0, 1);
|
||||
}
|
||||
|
||||
if(a)
|
||||
{
|
||||
return imageLoad(buffer_a, uvi);
|
||||
}
|
||||
|
||||
return imageLoad(buffer_b, uvi);
|
||||
}
|
||||
|
||||
void set_value(bool a, ivec2 uvi, vec4 value, ivec2 render_size)
|
||||
{
|
||||
if ((uvi.x >= render_size.x) || (uvi.x < 0) || (uvi.y >= render_size.y) || (uvi.y < 0))
|
||||
{
|
||||
return;
|
||||
}
|
||||
if(a)
|
||||
{
|
||||
imageStore(buffer_a, uvi, value);
|
||||
return;
|
||||
}
|
||||
|
||||
imageStore(buffer_b, uvi, value);
|
||||
}
|
||||
|
||||
// Motion similarity
|
||||
// ----------------------------------------------------------
|
||||
float get_motion_difference(vec2 V, vec2 V2, float parallel_sensitivity, float perpendicular_sensitivity)
|
||||
{
|
||||
vec2 VO = V - V2;
|
||||
double parallel = abs(dot(VO, V) / max(DBL_MIN, dot(V, V)));
|
||||
vec2 perpen_V = vec2(V.y, -V.x);
|
||||
double perpendicular = abs(dot(VO, perpen_V) / max(DBL_MIN, dot(V, V)));
|
||||
float difference = float(parallel) * parallel_sensitivity + float(perpendicular) * perpendicular_sensitivity;
|
||||
return clamp(difference, 0, 1);
|
||||
}
|
||||
// ----------------------------------------------------------
|
||||
|
||||
vec4 sample_fitness(vec2 uv_offset, vec4 uv_sample)
|
||||
{
|
||||
vec2 sample_velocity = -uv_sample.xy;
|
||||
|
||||
if (dot(sample_velocity, sample_velocity) <= FLT_MIN)
|
||||
{
|
||||
return vec4(FLT_MAX, FLT_MAX, FLT_MAX, 0);
|
||||
}
|
||||
|
||||
// if(dot(uv_offset, uv_offset) <= FLT_MIN)
|
||||
// {
|
||||
// uv_offset = normalize(sample_velocity) * FLT_MIN;
|
||||
// }
|
||||
|
||||
double velocity_space_distance = dot(sample_velocity, uv_offset) / max(FLT_MIN, dot(sample_velocity, sample_velocity));
|
||||
|
||||
double mid_point = params.motion_blur_intensity / 2;
|
||||
|
||||
double absolute_velocity_space_distance = abs(velocity_space_distance - mid_point);
|
||||
|
||||
double within_velocity_range = step(absolute_velocity_space_distance, mid_point);
|
||||
|
||||
vec2 perpen_offset = vec2(uv_offset.y, -uv_offset.x);
|
||||
|
||||
double side_offset = abs(dot(perpen_offset, sample_velocity)) / max(FLT_MIN, dot(sample_velocity, sample_velocity));
|
||||
|
||||
double within_perpen_error_range = step(side_offset, params.perpen_error_thresh * params.motion_blur_intensity);
|
||||
|
||||
return vec4(absolute_velocity_space_distance, velocity_space_distance, uv_sample.z, within_velocity_range * within_perpen_error_range);
|
||||
}
|
||||
|
||||
bool is_sample_better(vec4 a, vec4 b)
|
||||
{
|
||||
if((a.w == b.w) && (a.w == 1))
|
||||
{
|
||||
return a.z < b.z;
|
||||
}
|
||||
|
||||
float nearer = a.z > b.z ? 1 : 0;
|
||||
|
||||
return a.x * b.w * nearer < b.x * a.w;
|
||||
}
|
||||
|
||||
vec4 get_backtracked_sample(vec2 uvn, vec2 chosen_uv, vec2 chosen_velocity, vec4 best_sample_fitness, vec2 render_size)
|
||||
{
|
||||
//return vec4(chosen_uv, best_sample_fitness.x, 0);// comment this to enable backtracking
|
||||
|
||||
int step_count = 16;
|
||||
|
||||
float smallest_step = 1 / max(render_size.x, render_size.y);
|
||||
|
||||
float max_dilation_radius = pow(2, params.last_iteration_index) * params.sample_step_multiplier * smallest_step / (length(chosen_velocity) * params.motion_blur_intensity);
|
||||
|
||||
float general_velocity_multiplier = min(best_sample_fitness.y, max_dilation_radius);
|
||||
|
||||
for(int i = -step_count; i < step_count + 1; i++)
|
||||
{
|
||||
float velocity_multiplier = general_velocity_multiplier * (1 + float(i) / float(step_count));
|
||||
|
||||
if(velocity_multiplier > params.motion_blur_intensity + 0.2 || velocity_multiplier < FLT_MIN)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
vec2 new_sample = uvn - chosen_velocity * velocity_multiplier;
|
||||
|
||||
if((new_sample.x < 0.) || (new_sample.x > 1.) || (new_sample.y < 0.) || (new_sample.y > 1.))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
vec2 velocity_test = textureLod(velocity_sampler, new_sample, 0.0).xy;
|
||||
|
||||
if(get_motion_difference(chosen_velocity, velocity_test, params.parallel_sensitivity, params.perpendicular_sensitivity) <= params.velocity_match_threshold)
|
||||
{
|
||||
chosen_uv = new_sample;
|
||||
best_sample_fitness.x = velocity_multiplier;
|
||||
return vec4(chosen_uv, best_sample_fitness.x, 0);
|
||||
}
|
||||
}
|
||||
|
||||
return vec4(uvn, best_sample_fitness.x, 1);
|
||||
}
|
||||
|
||||
void main()
|
||||
{
|
||||
ivec2 render_size = ivec2(textureSize(velocity_sampler, 0));
|
||||
ivec2 uvi = ivec2(gl_GlobalInvocationID.xy);
|
||||
if ((uvi.x >= render_size.x) || (uvi.y >= render_size.y))
|
||||
{
|
||||
return;
|
||||
}
|
||||
vec2 uvn = (vec2(uvi) + vec2(0.5)) / render_size;
|
||||
|
||||
int iteration_index = params.iteration_index;
|
||||
|
||||
float step_size = round(pow(2, params.last_iteration_index - iteration_index));
|
||||
|
||||
vec2 uv_step = vec2(step_size) * params.sample_step_multiplier / render_size;
|
||||
|
||||
vec4 best_sample_fitness = vec4(FLT_MAX, FLT_MAX, FLT_MAX, 0);
|
||||
|
||||
vec2 chosen_uv = uvn;
|
||||
|
||||
vec2 chosen_velocity = vec2(0);
|
||||
|
||||
bool set_a = !bool(step(0.5, float(iteration_index % 2)));
|
||||
|
||||
for(int i = 0; i < kernel_size; i++)
|
||||
{
|
||||
if((true || params.iteration_index == 0) && i == 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
vec2 step_offset = check_step_kernel[i] * uv_step;
|
||||
vec2 check_uv = uvn + step_offset;
|
||||
|
||||
if((check_uv.x < 0.) || (check_uv.x > 1.) || (check_uv.y < 0.) || (check_uv.y > 1.))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if(iteration_index > 0)
|
||||
{
|
||||
ivec2 check_uv2 = ivec2(check_uv * render_size);
|
||||
|
||||
vec4 buffer_load = get_value(!set_a, check_uv2, render_size);
|
||||
|
||||
check_uv = buffer_load.xy;
|
||||
|
||||
step_offset = check_uv - uvn;
|
||||
}
|
||||
|
||||
vec4 uv_sample = vec4(textureLod(velocity_sampler, check_uv, 0.0).xy, npd / textureLod(depth_sampler, check_uv, 0.0).x, 0);
|
||||
|
||||
vec4 current_sample_fitness = sample_fitness(step_offset, uv_sample);
|
||||
|
||||
if (is_sample_better(current_sample_fitness, best_sample_fitness))
|
||||
{
|
||||
best_sample_fitness = current_sample_fitness;
|
||||
chosen_uv = check_uv;
|
||||
chosen_velocity = uv_sample.xy;
|
||||
}
|
||||
}
|
||||
|
||||
if(iteration_index < params.last_iteration_index)
|
||||
{
|
||||
set_value(set_a, uvi, vec4(chosen_uv, best_sample_fitness.x, best_sample_fitness.w), render_size);
|
||||
return;
|
||||
}
|
||||
|
||||
float depth = npd / textureLod(depth_sampler, uvn, 0.0).x;
|
||||
|
||||
if(best_sample_fitness.w == 0 || depth < best_sample_fitness.z)
|
||||
{
|
||||
set_value(set_a, uvi, vec4(uvn, best_sample_fitness.x, 0), render_size);
|
||||
return;
|
||||
}
|
||||
|
||||
vec4 backtracked_sample = get_backtracked_sample(uvn, chosen_uv, chosen_velocity, best_sample_fitness, render_size);
|
||||
|
||||
set_value(set_a, uvi, backtracked_sample, render_size);
|
||||
|
||||
return;
|
||||
}
|
290
addons/MyJumpFloodIteration/jump_flood_blur.gd
Normal file
290
addons/MyJumpFloodIteration/jump_flood_blur.gd
Normal file
@ -0,0 +1,290 @@
|
||||
extends CompositorEffect
|
||||
class_name MotionBlurSphynxJumpFlood
|
||||
|
||||
@export_group("Motion Blur", "motion_blur_")
|
||||
# diminishing returns over 16
|
||||
@export_range(4, 64) var motion_blur_samples: int = 8
|
||||
# you really don't want this over 0.5, but you can if you want to try
|
||||
@export_range(0, 0.5, 0.001, "or_greater") var motion_blur_intensity: float = 1
|
||||
@export_range(0, 1) var motion_blur_center_fade: float = 0.0
|
||||
|
||||
|
||||
@export var blur_shader_file : RDShaderFile = preload("res://addons/MyJumpFloodIteration/jump_flood_blur.glsl"):
|
||||
set(value):
|
||||
blur_shader_file = value
|
||||
_init()
|
||||
|
||||
@export var overlay_shader_file : RDShaderFile = preload("res://addons/MyJumpFloodIteration/jump_flood_overlay.glsl"):
|
||||
set(value):
|
||||
overlay_shader_file = value
|
||||
_init()
|
||||
|
||||
@export var construction_pass : RDShaderFile = preload("res://addons/MyJumpFloodIteration/jfp_backtracking_experimental.glsl"):
|
||||
set(value):
|
||||
construction_pass = value
|
||||
_init()
|
||||
|
||||
## the portion of speed that is allowed for side bleed of velocities
|
||||
## during the jfa dilation passes and before backtracking. Getting this a higher value
|
||||
## would make it so that meshes at movement blur more reliably, but also bleed
|
||||
## further perpendicularly to their velocity, thus wash elemets behind them out.
|
||||
@export var perpen_error_threshold : float = 0.3
|
||||
|
||||
## an initial step size that can increase the dilation radius proportionally, at the
|
||||
## sacrifice of some quality in the final resolution of the dilation
|
||||
@export var sample_step_multiplier : float = 8
|
||||
|
||||
## how sensitive the backtracking for velocities be
|
||||
@export var backtracking_velocity_match_threshold : float = 0.49
|
||||
|
||||
## how sensitively the backtracking should treat velocities that are a different
|
||||
## length along that velocity
|
||||
@export var backtracking_velocity_match_parallel_sensitivity : float = 0.5;
|
||||
|
||||
## how sensitively the backtracking should treat velcoities that have perpendicular
|
||||
## offset to that velocity
|
||||
@export var backtracking_velcoity_match_perpendicular_sensitivity : float = 0.1;
|
||||
|
||||
## the number of passes performed by the jump flood algorithm based dilation,
|
||||
## each pass added doubles the maximum radius of dilation available
|
||||
@export var JFA_pass_count : int = 3
|
||||
|
||||
## wether this motion blur stays the same intensity below
|
||||
## target_constant_framerate
|
||||
@export var framerate_independent : bool = true
|
||||
|
||||
## if framerate_independent is enabled, the blur would simulate
|
||||
## sutter speeds at that framerate, and up.
|
||||
@export var target_constant_framerate : float = 30
|
||||
|
||||
var rd: RenderingDevice
|
||||
|
||||
var linear_sampler: RID
|
||||
|
||||
var construct_shader : RID
|
||||
var construct_pipeline : RID
|
||||
|
||||
var motion_blur_shader: RID
|
||||
var motion_blur_pipeline: RID
|
||||
|
||||
var overlay_shader: RID
|
||||
var overlay_pipeline: RID
|
||||
|
||||
var context: StringName = "MotionBlur"
|
||||
var texture: StringName = "texture"
|
||||
|
||||
var buffer_a : StringName = "buffer_a"
|
||||
var buffer_b : StringName = "buffer_b"
|
||||
|
||||
var past_color : StringName = "past_color"
|
||||
|
||||
var velocity_3D : StringName = "velocity_3D"
|
||||
var velocity_curl : StringName = "velocity_curl"
|
||||
|
||||
var draw_debug : float = 0
|
||||
|
||||
var freeze : bool = false
|
||||
|
||||
func _init():
|
||||
effect_callback_type = CompositorEffect.EFFECT_CALLBACK_TYPE_POST_TRANSPARENT
|
||||
needs_motion_vectors = true
|
||||
RenderingServer.call_on_render_thread(_initialize_compute)
|
||||
|
||||
func _notification(what):
|
||||
if what == NOTIFICATION_PREDELETE:
|
||||
if linear_sampler.is_valid():
|
||||
rd.free_rid(linear_sampler)
|
||||
if motion_blur_shader.is_valid():
|
||||
rd.free_rid(motion_blur_shader)
|
||||
if overlay_shader.is_valid():
|
||||
rd.free_rid(overlay_shader)
|
||||
|
||||
func _initialize_compute():
|
||||
rd = RenderingServer.get_rendering_device()
|
||||
if !rd:
|
||||
return
|
||||
|
||||
var sampler_state := RDSamplerState.new()
|
||||
sampler_state.min_filter = RenderingDevice.SAMPLER_FILTER_LINEAR
|
||||
sampler_state.mag_filter = RenderingDevice.SAMPLER_FILTER_LINEAR
|
||||
sampler_state.repeat_u = RenderingDevice.SAMPLER_REPEAT_MODE_CLAMP_TO_EDGE
|
||||
sampler_state.repeat_v = RenderingDevice.SAMPLER_REPEAT_MODE_CLAMP_TO_EDGE
|
||||
linear_sampler = rd.sampler_create(sampler_state)
|
||||
|
||||
var construct_shader_spirv : RDShaderSPIRV = construction_pass.get_spirv()
|
||||
construct_shader = rd.shader_create_from_spirv(construct_shader_spirv)
|
||||
construct_pipeline = rd.compute_pipeline_create(construct_shader)
|
||||
|
||||
var shader_spirv: RDShaderSPIRV = blur_shader_file.get_spirv()
|
||||
motion_blur_shader = rd.shader_create_from_spirv(shader_spirv)
|
||||
motion_blur_pipeline = rd.compute_pipeline_create(motion_blur_shader)
|
||||
|
||||
var overlay_shader_spirv: RDShaderSPIRV = overlay_shader_file.get_spirv()
|
||||
overlay_shader = rd.shader_create_from_spirv(overlay_shader_spirv)
|
||||
overlay_pipeline = rd.compute_pipeline_create(overlay_shader)
|
||||
|
||||
func get_image_uniform(image: RID, binding: int) -> RDUniform:
|
||||
var uniform: RDUniform = RDUniform.new()
|
||||
uniform.uniform_type = RenderingDevice.UNIFORM_TYPE_IMAGE
|
||||
uniform.binding = binding
|
||||
uniform.add_id(image)
|
||||
return uniform
|
||||
|
||||
func get_sampler_uniform(image: RID, binding: int) -> RDUniform:
|
||||
var uniform: RDUniform = RDUniform.new()
|
||||
uniform.uniform_type = RenderingDevice.UNIFORM_TYPE_SAMPLER_WITH_TEXTURE
|
||||
uniform.binding = binding
|
||||
uniform.add_id(linear_sampler)
|
||||
uniform.add_id(image)
|
||||
return uniform
|
||||
|
||||
|
||||
|
||||
var temp_motion_blur_intensity : float
|
||||
|
||||
var previous_time : float = 0
|
||||
|
||||
func _render_callback(p_effect_callback_type, p_render_data):
|
||||
var time : float = float(Time.get_ticks_msec()) / 1000
|
||||
|
||||
var delta_time : float = time - previous_time
|
||||
|
||||
previous_time = time
|
||||
|
||||
temp_motion_blur_intensity = motion_blur_intensity
|
||||
|
||||
if framerate_independent:
|
||||
var capped_frame_time : float = min(1 / target_constant_framerate, delta_time)
|
||||
temp_motion_blur_intensity = motion_blur_intensity * capped_frame_time / delta_time
|
||||
|
||||
if rd and p_effect_callback_type == CompositorEffect.EFFECT_CALLBACK_TYPE_POST_TRANSPARENT:
|
||||
var render_scene_buffers: RenderSceneBuffersRD = p_render_data.get_render_scene_buffers()
|
||||
var render_scene_data: RenderSceneDataRD = p_render_data.get_render_scene_data()
|
||||
if render_scene_buffers and render_scene_data:
|
||||
var render_size: Vector2 = render_scene_buffers.get_internal_size()
|
||||
if render_size.x == 0.0 or render_size.y == 0.0:
|
||||
return
|
||||
|
||||
ensure_texture(texture, render_scene_buffers)
|
||||
ensure_texture(buffer_a, render_scene_buffers, true)#, Vector2(0.25, 0.25))
|
||||
ensure_texture(buffer_b, render_scene_buffers, true)#, Vector2(0.25, 0.25))
|
||||
ensure_texture(past_color, render_scene_buffers)
|
||||
|
||||
rd.draw_command_begin_label("Motion Blur", Color(1.0, 1.0, 1.0, 1.0))
|
||||
|
||||
var last_iteration_index : int = JFA_pass_count - 1;
|
||||
|
||||
var push_constant: PackedFloat32Array = [
|
||||
motion_blur_samples, temp_motion_blur_intensity,
|
||||
motion_blur_center_fade, draw_debug,
|
||||
freeze,
|
||||
Engine.get_frames_drawn() % 8,
|
||||
last_iteration_index,
|
||||
sample_step_multiplier
|
||||
]
|
||||
|
||||
var view_count = render_scene_buffers.get_view_count()
|
||||
for view in range(view_count):
|
||||
var color_image := render_scene_buffers.get_color_layer(view)
|
||||
var depth_image := render_scene_buffers.get_depth_layer(view)
|
||||
var velocity_image := render_scene_buffers.get_velocity_layer(view)
|
||||
var texture_image := render_scene_buffers.get_texture_slice(context, texture, view, 0, 1, 1)
|
||||
var buffer_a_image := render_scene_buffers.get_texture_slice(context, buffer_a, view, 0, 1, 1)
|
||||
var buffer_b_image := render_scene_buffers.get_texture_slice(context, buffer_b, view, 0, 1, 1)
|
||||
var past_color_image := render_scene_buffers.get_texture_slice(context, past_color, view, 0, 1, 1)
|
||||
rd.draw_command_begin_label("Construct blur " + str(view), Color(1.0, 1.0, 1.0, 1.0))
|
||||
|
||||
var tex_uniform_set
|
||||
var compute_list
|
||||
|
||||
var x_groups := floori((render_size.x - 1) / 8 + 1)
|
||||
var y_groups := floori((render_size.y - 1) / 8 + 1)
|
||||
|
||||
tex_uniform_set = UniformSetCacheRD.get_cache(construct_shader, 0, [
|
||||
get_sampler_uniform(depth_image, 0),
|
||||
get_sampler_uniform(velocity_image, 1),
|
||||
get_image_uniform(buffer_a_image, 2),
|
||||
get_image_uniform(buffer_b_image, 3),
|
||||
])
|
||||
|
||||
compute_list = rd.compute_list_begin()
|
||||
rd.compute_list_bind_compute_pipeline(compute_list, construct_pipeline)
|
||||
rd.compute_list_bind_uniform_set(compute_list, tex_uniform_set, 0)
|
||||
|
||||
for i in JFA_pass_count:
|
||||
var jf_push_constants : PackedInt32Array = [
|
||||
i,
|
||||
last_iteration_index,
|
||||
16,
|
||||
16
|
||||
]
|
||||
|
||||
var jf_float_push_constants_test : PackedFloat32Array = [
|
||||
perpen_error_threshold,
|
||||
sample_step_multiplier,
|
||||
temp_motion_blur_intensity,
|
||||
backtracking_velocity_match_threshold,
|
||||
backtracking_velocity_match_parallel_sensitivity,
|
||||
backtracking_velcoity_match_perpendicular_sensitivity,
|
||||
0,
|
||||
0
|
||||
]
|
||||
|
||||
var jf_byte_array = jf_push_constants.to_byte_array()
|
||||
jf_byte_array.append_array(jf_float_push_constants_test.to_byte_array())
|
||||
|
||||
rd.compute_list_set_push_constant(compute_list, jf_byte_array, jf_byte_array.size())
|
||||
rd.compute_list_dispatch(compute_list, x_groups, y_groups, 1)
|
||||
|
||||
rd.compute_list_end()
|
||||
|
||||
rd.draw_command_end_label()
|
||||
|
||||
rd.draw_command_begin_label("Compute blur " + str(view), Color(1.0, 1.0, 1.0, 1.0))
|
||||
|
||||
tex_uniform_set = UniformSetCacheRD.get_cache(motion_blur_shader, 0, [
|
||||
get_sampler_uniform(color_image, 0),
|
||||
get_sampler_uniform(depth_image, 1),
|
||||
get_sampler_uniform(velocity_image, 2),
|
||||
get_image_uniform(buffer_b_image if last_iteration_index % 2 else buffer_a_image, 3),
|
||||
get_image_uniform(texture_image, 4),
|
||||
get_image_uniform(past_color_image, 5),
|
||||
])
|
||||
|
||||
compute_list = rd.compute_list_begin()
|
||||
rd.compute_list_bind_compute_pipeline(compute_list, motion_blur_pipeline)
|
||||
rd.compute_list_bind_uniform_set(compute_list, tex_uniform_set, 0)
|
||||
rd.compute_list_set_push_constant(compute_list, push_constant.to_byte_array(), push_constant.size() * 4)
|
||||
rd.compute_list_dispatch(compute_list, x_groups, y_groups, 1)
|
||||
rd.compute_list_end()
|
||||
rd.draw_command_end_label()
|
||||
|
||||
rd.draw_command_begin_label("Overlay result " + str(view), Color(1.0, 1.0, 1.0, 1.0))
|
||||
|
||||
tex_uniform_set = UniformSetCacheRD.get_cache(overlay_shader, 0, [
|
||||
get_sampler_uniform(texture_image, 0),
|
||||
get_image_uniform(color_image, 1),
|
||||
])
|
||||
|
||||
compute_list = rd.compute_list_begin()
|
||||
rd.compute_list_bind_compute_pipeline(compute_list, overlay_pipeline)
|
||||
rd.compute_list_bind_uniform_set(compute_list, tex_uniform_set, 0)
|
||||
rd.compute_list_dispatch(compute_list, x_groups, y_groups, 1)
|
||||
rd.compute_list_end()
|
||||
rd.draw_command_end_label()
|
||||
|
||||
rd.draw_command_end_label()
|
||||
|
||||
|
||||
func ensure_texture(texture_name : StringName, render_scene_buffers : RenderSceneBuffersRD, high_accuracy : bool = false, render_size_multiplier : Vector2 = Vector2(1, 1)):
|
||||
var render_size : Vector2 = Vector2(render_scene_buffers.get_internal_size()) * render_size_multiplier
|
||||
|
||||
if render_scene_buffers.has_texture(context, texture_name):
|
||||
var tf: RDTextureFormat = render_scene_buffers.get_texture_format(context, texture_name)
|
||||
if tf.width != render_size.x or tf.height != render_size.y:
|
||||
render_scene_buffers.clear_context(context)
|
||||
|
||||
if !render_scene_buffers.has_texture(context, texture_name):
|
||||
var usage_bits: int = RenderingDevice.TEXTURE_USAGE_SAMPLING_BIT | RenderingDevice.TEXTURE_USAGE_STORAGE_BIT
|
||||
var texture_format = RenderingDevice.DATA_FORMAT_R32G32B32A32_SFLOAT if high_accuracy else RenderingDevice.DATA_FORMAT_R16G16B16A16_SFLOAT
|
||||
render_scene_buffers.create_texture(context, texture_name, texture_format, usage_bits, RenderingDevice.TEXTURE_SAMPLES_1, render_size, 1, 1, true)
|
261
addons/MyJumpFloodIteration/jump_flood_blur.glsl
Normal file
261
addons/MyJumpFloodIteration/jump_flood_blur.glsl
Normal file
@ -0,0 +1,261 @@
|
||||
#[compute]
|
||||
#version 450
|
||||
|
||||
#define FLT_MAX 3.402823466e+38
|
||||
#define FLT_MIN 1.175494351e-38
|
||||
#define DBL_MAX 1.7976931348623158e+308
|
||||
#define DBL_MIN 2.2250738585072014e-308
|
||||
|
||||
layout(set = 0, binding = 0) uniform sampler2D color_sampler;
|
||||
layout(set = 0, binding = 1) uniform sampler2D depth_sampler;
|
||||
layout(set = 0, binding = 2) uniform sampler2D vector_sampler;
|
||||
layout(rgba16f, set = 0, binding = 3) uniform readonly image2D velocity_map;
|
||||
layout(rgba16f, set = 0, binding = 4) uniform image2D output_image;
|
||||
layout(rgba16f, set = 0, binding = 5) uniform image2D past_color_image;
|
||||
|
||||
layout(push_constant, std430) uniform Params
|
||||
{
|
||||
float motion_blur_samples;
|
||||
float motion_blur_intensity;
|
||||
float motion_blur_center_fade;
|
||||
float debug;
|
||||
float freeze;
|
||||
float frame;
|
||||
float last_iteration_index;
|
||||
float sample_step_multiplier;
|
||||
} params;
|
||||
|
||||
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
|
||||
// velocity similarity divisors
|
||||
float vsim_parallel = 20;
|
||||
float vsim_perpendicular = 20;
|
||||
|
||||
// for velocity similarity check
|
||||
float depth_bias = 0.1;
|
||||
|
||||
// sample weight threshold
|
||||
float sw_threshold = 0.1;
|
||||
|
||||
// near plane distance
|
||||
float npd = 0.05;
|
||||
|
||||
// SOFT_Z_EXTENT
|
||||
float sze = 0.1;
|
||||
|
||||
// Helper functions
|
||||
// --------------------------------------------
|
||||
vec2 get_depth_difference_at_derivative(vec2 uv, vec2 step_size)
|
||||
{
|
||||
float base = textureLod(depth_sampler, uv, 0.0).x;
|
||||
float x = textureLod(depth_sampler, uv + vec2(0, step_size.x), 0.0).x;
|
||||
float y = textureLod(depth_sampler, uv + vec2(step_size.y, 0), 0.0).x;
|
||||
return vec2(x - base, y - base);
|
||||
}
|
||||
|
||||
// from https://www.shadertoy.com/view/ftKfzc
|
||||
float interleaved_gradient_noise(vec2 uv, int FrameId){
|
||||
uv += float(FrameId) * (vec2(47, 17) * 0.695);
|
||||
|
||||
vec3 magic = vec3( 0.06711056, 0.00583715, 52.9829189 );
|
||||
|
||||
return fract(magic.z * fract(dot(uv, magic.xy)));
|
||||
}
|
||||
|
||||
float get_velocity_convergence(vec2 uv, vec2 step_size)
|
||||
{
|
||||
vec2 base = textureLod(vector_sampler, uv, 0.0).xy;
|
||||
vec2 x = textureLod(vector_sampler, uv + vec2(0, step_size.x), 0.0).xy;
|
||||
vec2 y = textureLod(vector_sampler, uv + vec2(step_size.y, 0), 0.0).xy;
|
||||
|
||||
return (dot(vec2(0, 1), vec2(x - base)) + dot(vec2(1, 0), vec2(y - base)));
|
||||
}
|
||||
|
||||
vec3 get_ndc_velocity(vec2 uv, vec2 render_size, float depth)
|
||||
{
|
||||
float ndc_velocity_z = get_velocity_convergence(uv, vec2(1) / render_size) / depth;
|
||||
|
||||
vec2 ndc_velocity_xy = textureLod(vector_sampler, uv, 0.0).xy;
|
||||
|
||||
return vec3(ndc_velocity_xy, ndc_velocity_z);
|
||||
}
|
||||
|
||||
vec3 get_world_velocity(vec2 uv, vec2 render_size, float depth)
|
||||
{
|
||||
return get_ndc_velocity(uv, render_size, depth) / depth;
|
||||
}
|
||||
|
||||
|
||||
vec3 get_velocity_curl_vector(vec2 uv, vec2 render_size)
|
||||
{
|
||||
float depth = textureLod(depth_sampler, uv, 0.0).x;
|
||||
|
||||
vec2 step_size = vec2(1) / render_size;
|
||||
vec3 base = get_world_velocity(uv, render_size, depth);
|
||||
vec3 x = get_world_velocity(uv + vec2(step_size.x, 0), render_size, depth);
|
||||
vec3 y = get_world_velocity(uv + vec2(0, step_size.y), render_size, depth);
|
||||
|
||||
vec2 depth_derivative = get_depth_difference_at_derivative(uv, step_size) / depth;
|
||||
|
||||
vec3 x_vector = normalize(vec3(step_size.x, 0, 0));
|
||||
vec3 y_vector = normalize(vec3(0, step_size.y, 0));
|
||||
|
||||
vec3 cross_x = cross((x - base) / vec3(step_size, 0), x_vector);
|
||||
vec3 cross_y = cross((y - base) / vec3(step_size, 0), y_vector);
|
||||
|
||||
return cross_x + cross_y;
|
||||
}
|
||||
|
||||
float get_velocity_curl(vec2 uv, vec2 render_size)
|
||||
{
|
||||
vec2 step_size = vec2(1) / render_size;
|
||||
vec2 base = textureLod(vector_sampler, uv, 0.0).xy;
|
||||
vec2 x = textureLod(vector_sampler, uv + vec2(0, step_size.x), 0.0).xy;
|
||||
vec2 y = textureLod(vector_sampler, uv + vec2(step_size.y, 0), 0.0).xy;
|
||||
|
||||
return (cross(vec3(0, 1, 0), vec3(x - base, 0) / vec3(step_size, 0)) + cross(vec3(1, 0, 0), vec3(y - base, 0) / vec3(step_size, 0))).z;
|
||||
}
|
||||
// -------------------------------------------------------
|
||||
|
||||
// McGuire's functions https://docs.google.com/document/d/1IIlAKTj-O01hcXEdGxTErQbCHO9iBmRx6oFUy_Jm0fI/edit
|
||||
// ----------------------------------------------------------
|
||||
float soft_depth_compare(float depth_X, float depth_Y)
|
||||
{
|
||||
return clamp(1 - (depth_X - depth_Y) / sze, 0, 1);
|
||||
}
|
||||
|
||||
float cone(vec2 X, vec2 Y, vec2 v)
|
||||
{
|
||||
return clamp(1 - length(X - Y) / length(v), 0, 1);
|
||||
}
|
||||
|
||||
float cylinder(vec2 X, vec2 Y, vec2 v)
|
||||
{
|
||||
return 1.0 + smoothstep(0.95 * length(v), 1.05 * length(v), length(X - Y));
|
||||
}
|
||||
// ----------------------------------------------------------
|
||||
|
||||
// Motion similarity
|
||||
// ----------------------------------------------------------
|
||||
float get_motion_difference(vec2 V, vec2 V2, float power)
|
||||
{
|
||||
vec2 VO = V - V2;
|
||||
float difference = dot(VO, V) / max(FLT_MIN, dot(V, V));
|
||||
return pow(clamp(difference, 0, 1), power);
|
||||
}
|
||||
// ----------------------------------------------------------
|
||||
|
||||
void main()
|
||||
{
|
||||
ivec2 render_size = ivec2(textureSize(color_sampler, 0));
|
||||
ivec2 uvi = ivec2(gl_GlobalInvocationID.xy);
|
||||
if ((uvi.x >= render_size.x) || (uvi.y >= render_size.y))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if(params.freeze > 0)
|
||||
{
|
||||
imageStore(output_image, uvi, imageLoad(past_color_image, uvi));
|
||||
return;
|
||||
}
|
||||
|
||||
vec2 uvn = (vec2(uvi) + vec2(0.5)) / render_size;
|
||||
|
||||
int iteration_count = int(params.motion_blur_samples);
|
||||
|
||||
vec4 base = textureLod(color_sampler, uvn, 0.0);
|
||||
|
||||
vec4 result_constructed_color = vec4(0);
|
||||
|
||||
vec4 velocity_map_sample = imageLoad(velocity_map, uvi);
|
||||
|
||||
vec3 velocity = -textureLod(vector_sampler, velocity_map_sample.xy, 0.0).xyz;
|
||||
|
||||
vec3 naive_velocity = -textureLod(vector_sampler, uvn, 0.0).xyz;
|
||||
|
||||
float max_dialtion_radius = pow(2, params.last_iteration_index) * params.sample_step_multiplier / max(render_size.x, render_size.y);
|
||||
|
||||
if ((dot(velocity, velocity) == 0 || params.motion_blur_intensity == 0) && params.debug == 0) //(uvn.y > 0.5)//
|
||||
{
|
||||
imageStore(output_image, uvi, base);
|
||||
imageStore(past_color_image, uvi, base);
|
||||
return;
|
||||
}
|
||||
|
||||
float velocity_step_coef = min(params.motion_blur_intensity, max_dialtion_radius / (length(velocity) * params.motion_blur_intensity)) / max(1.0, params.motion_blur_samples - 1.0) * (1 + (interleaved_gradient_noise(uvi, int(params.frame)) - 0.5));
|
||||
|
||||
vec3 sample_step = velocity * velocity_step_coef;
|
||||
|
||||
vec4 velocity_map_sample_step = vec4(0);
|
||||
|
||||
//float d = 1.0 - min(1.0, 2.0 * distance(uvn, vec2(0.5)));
|
||||
//sample_step *= 1.0 - d * params.fade_padding.x;
|
||||
|
||||
float total_weight = 1;// max(0.0001, length(naive_velocity));
|
||||
|
||||
vec2 offset = vec2(0.0);
|
||||
|
||||
vec4 col = base * total_weight;
|
||||
|
||||
float depth = max(FLT_MIN, textureLod(depth_sampler, velocity_map_sample.xy, 0.0).x);
|
||||
|
||||
float naive_depth = max(FLT_MIN, textureLod(depth_sampler, uvn, 0.0).x);
|
||||
|
||||
for (int i = 1; i < iteration_count; i++)
|
||||
{
|
||||
offset += sample_step.xy;// * interleaved_gradient_noise(uvi, int(params.frame) + i);
|
||||
|
||||
vec2 uvo = uvn + offset;
|
||||
|
||||
if (any(notEqual(uvo, clamp(uvo, vec2(0.0), vec2(1.0)))))
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
velocity_map_sample_step = imageLoad(velocity_map, ivec2(uvo * render_size));
|
||||
|
||||
vec3 current_velocity = -textureLod(vector_sampler, velocity_map_sample_step.xy, 0.0).xyz;
|
||||
|
||||
float current_depth = max(FLT_MIN, textureLod(depth_sampler, velocity_map_sample_step.xy, 0.0).x);
|
||||
|
||||
float sample_weight = 1;
|
||||
|
||||
float motion_difference = get_motion_difference(velocity.xy, current_velocity.xy, 0.1);
|
||||
|
||||
float foreground = soft_depth_compare(npd / current_depth, npd / depth);
|
||||
|
||||
sample_weight *= 1 - (foreground * motion_difference);
|
||||
|
||||
total_weight += sample_weight;
|
||||
|
||||
col += textureLod(color_sampler, uvo, 0.0) * sample_weight;
|
||||
}
|
||||
|
||||
col /= total_weight;
|
||||
|
||||
if (params.debug == 0)
|
||||
{
|
||||
imageStore(output_image, uvi, col);
|
||||
imageStore(past_color_image, uvi, col);
|
||||
return;
|
||||
}
|
||||
|
||||
vec4 tl_col = vec4(abs(textureLod(vector_sampler, uvn, 0.0).xy) * 10, 0, 1);
|
||||
|
||||
vec4 tr_col = vec4(abs(velocity.xy) * 10, 0, 1);
|
||||
|
||||
vec4 bl_col = vec4(abs(velocity_map_sample.xyw - vec3(uvn, 0)) * vec3(10, 10, 1), 1);
|
||||
|
||||
vec4 br_col = col;
|
||||
|
||||
//imageStore(past_color_image, uvi, imageLoad(output_image, uvi));
|
||||
|
||||
imageStore(output_image, uvi / 2, tl_col);
|
||||
imageStore(output_image, uvi / 2 + ivec2(vec2(0.5, 0.5) * render_size), br_col);
|
||||
imageStore(output_image, uvi / 2 + ivec2(vec2(0.0, 0.5) * render_size), bl_col);
|
||||
imageStore(output_image, uvi / 2 + ivec2(vec2(0.5, 0.0) * render_size), tr_col);
|
||||
imageStore(past_color_image, uvi / 2, tl_col);
|
||||
imageStore(past_color_image, uvi / 2 + ivec2(vec2(0.5, 0.5) * render_size), br_col);
|
||||
imageStore(past_color_image, uvi / 2 + ivec2(vec2(0.0, 0.5) * render_size), bl_col);
|
||||
imageStore(past_color_image, uvi / 2 + ivec2(vec2(0.5, 0.0) * render_size), tr_col);
|
||||
}
|
17
addons/MyJumpFloodIteration/jump_flood_overlay.glsl
Normal file
17
addons/MyJumpFloodIteration/jump_flood_overlay.glsl
Normal file
@ -0,0 +1,17 @@
|
||||
#[compute]
|
||||
#version 450
|
||||
|
||||
layout(set = 0, binding = 0) uniform sampler2D blur_sampler;
|
||||
layout(rgba16f, set = 0, binding = 1) uniform image2D color_image;
|
||||
|
||||
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
|
||||
void main()
|
||||
{
|
||||
ivec2 render_size = ivec2(textureSize(blur_sampler, 0));
|
||||
ivec2 uv = ivec2(gl_GlobalInvocationID.xy);
|
||||
if ((uv.x >= render_size.x) || (uv.y >= render_size.y))
|
||||
{
|
||||
return;
|
||||
}
|
||||
imageStore(color_image, uv, textureLod(blur_sampler, (vec2(uv) + 0.5) / render_size, 0.0));
|
||||
}
|
Reference in New Issue
Block a user