Realtime Compositor: Add basic distort nodes

This patch implements the following nodes for the realtime compositor:

- Crop node.
- Flip node.
- Lens distort node.
- Rotate node.
- Transform node.
- Translate node.

Differential Revision: https://developer.blender.org/D15231

Reviewed By: Clement Foucault
This commit is contained in:
Omar Emara 2022-08-10 10:30:27 +02:00
parent c014021802
commit 40c45985a9
18 changed files with 916 additions and 15 deletions

View File

@ -323,10 +323,15 @@ set(GLSL_SRC
shaders/common/gpu_shader_common_math_utils.glsl
shaders/common/gpu_shader_common_mix_rgb.glsl
shaders/compositor/compositor_alpha_crop.glsl
shaders/compositor/compositor_box_mask.glsl
shaders/compositor/compositor_convert.glsl
shaders/compositor/compositor_ellipse_mask.glsl
shaders/compositor/compositor_flip.glsl
shaders/compositor/compositor_image_crop.glsl
shaders/compositor/compositor_projector_lens_distortion.glsl
shaders/compositor/compositor_realize_on_domain.glsl
shaders/compositor/compositor_screen_lens_distortion.glsl
shaders/compositor/compositor_set_alpha.glsl
shaders/compositor/compositor_split_viewer.glsl
@ -562,10 +567,15 @@ set(SRC_SHADER_CREATE_INFOS
shaders/infos/gpu_shader_text_info.hh
shaders/infos/gpu_srgb_to_framebuffer_space_info.hh
shaders/compositor/infos/compositor_alpha_crop_info.hh
shaders/compositor/infos/compositor_box_mask_info.hh
shaders/compositor/infos/compositor_convert_info.hh
shaders/compositor/infos/compositor_ellipse_mask_info.hh
shaders/compositor/infos/compositor_flip_info.hh
shaders/compositor/infos/compositor_image_crop_info.hh
shaders/compositor/infos/compositor_projector_lens_distortion_info.hh
shaders/compositor/infos/compositor_realize_on_domain_info.hh
shaders/compositor/infos/compositor_screen_lens_distortion_info.hh
shaders/compositor/infos/compositor_set_alpha_info.hh
shaders/compositor/infos/compositor_split_viewer_info.hh
)

View File

@ -0,0 +1,11 @@
#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl)
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
/* The lower bound is inclusive and upper bound is exclusive. */
bool is_inside = all(greaterThanEqual(texel, lower_bound)) && all(lessThan(texel, upper_bound));
/* Write the pixel color if it is inside the cropping region, otherwise, write zero. */
vec4 color = is_inside ? texture_load(input_tx, texel) : vec4(0.0);
imageStore(output_img, texel, color);
}

View File

@ -0,0 +1,15 @@
#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl)
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
ivec2 size = texture_size(input_tx);
ivec2 flipped_texel = texel;
if (flip_x) {
flipped_texel.x = size.x - texel.x - 1;
}
if (flip_y) {
flipped_texel.y = size.y - texel.y - 1;
}
imageStore(output_img, texel, texture_load(input_tx, flipped_texel));
}

View File

@ -0,0 +1,7 @@
#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl)
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
imageStore(output_img, texel, texture_load(input_tx, texel + lower_bound));
}

View File

@ -0,0 +1,16 @@
#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl)
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
/* Get the normalized coordinates of the pixel centers. */
vec2 normalized_texel = (vec2(texel) + vec2(0.5)) / vec2(texture_size(input_tx));
/* Sample the red and blue channels shifted by the dispersion amount. */
const float red = texture(input_tx, normalized_texel + vec2(dispersion, 0.0)).r;
const float green = texture_load(input_tx, texel).g;
const float blue = texture(input_tx, normalized_texel - vec2(dispersion, 0.0)).b;
imageStore(output_img, texel, vec4(red, green, blue, 1.0));
}

View File

@ -0,0 +1,151 @@
#pragma BLENDER_REQUIRE(gpu_shader_common_hash.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl)
/* A model that approximates lens distortion parameterized by a distortion parameter and dependent
* on the squared distance to the center of the image. The distorted pixel is then computed as the
* scalar multiplication of the pixel coordinates with the value returned by this model. See the
* compute_distorted_uv function for more details. */
float compute_distortion_scale(float distortion, float distance_squared)
{
return 1.0 / (1.0 + sqrt(max(0.0, 1.0 - distortion * distance_squared)));
}
/* A vectorized version of compute_distortion_scale that is applied on the chromatic distortion
* parameters passed to the shader. */
vec3 compute_chromatic_distortion_scale(float distance_squared)
{
return 1.0 / (1.0 + sqrt(max(vec3(0.0), 1.0 - chromatic_distortion * distance_squared)));
}
/* Compute the image coordinates after distortion by the given distortion scale computed by the
* compute_distortion_scale function. Note that the function expects centered normalized UV
* coordinates but outputs non-centered image coordinates. */
vec2 compute_distorted_uv(vec2 uv, float scale)
{
return (uv * scale + 0.5) * texture_size(input_tx) - 0.5;
}
/* Compute the number of integration steps that should be used to approximate the distorted pixel
* using a heuristic, see the compute_number_of_steps function for more details. The numbers of
* steps is proportional to the number of pixels spanned by the distortion amount. For jitter
* distortion, the square root of the distortion amount plus 1 is used with a minimum of 2 steps.
* For non-jitter distortion, the distortion amount plus 1 is used as the number of steps */
int compute_number_of_integration_steps_heuristic(float distortion)
{
#if defined(JITTER)
return distortion < 4.0 ? 2 : int(sqrt(distortion + 1.0));
#else
return int(distortion + 1.0);
#endif
}
/* Compute the number of integration steps that should be used to compute each channel of the
* distorted pixel. Each of the channels are distorted by their respective chromatic distortion
* amount, then the amount of distortion between each two consecutive channels is computed, this
* amount is then used to heuristically infer the number of needed integration steps, see the
* integrate_distortion function for more information. */
ivec3 compute_number_of_integration_steps(vec2 uv, float distance_squared)
{
/* Distort each channel by its respective chromatic distortion amount. */
vec3 distortion_scale = compute_chromatic_distortion_scale(distance_squared);
vec2 distorted_uv_red = compute_distorted_uv(uv, distortion_scale.r);
vec2 distorted_uv_green = compute_distorted_uv(uv, distortion_scale.g);
vec2 distorted_uv_blue = compute_distorted_uv(uv, distortion_scale.b);
/* Infer the number of needed integration steps to compute the distorted red channel starting
* from the green channel. */
float distortion_red = distance(distorted_uv_red, distorted_uv_green);
int steps_red = compute_number_of_integration_steps_heuristic(distortion_red);
/* Infer the number of needed integration steps to compute the distorted blue channel starting
* from the green channel. */
float distortion_blue = distance(distorted_uv_green, distorted_uv_blue);
int steps_blue = compute_number_of_integration_steps_heuristic(distortion_blue);
/* The number of integration steps used to compute the green channel is the sum of both the red
* and the blue channel steps because it is computed once with each of them. */
return ivec3(steps_red, steps_red + steps_blue, steps_blue);
}
/* Returns a random jitter amount, which is essentially a random value in the [0, 1] range. If
* jitter is not enabled, return a constant 0.5 value instead. */
float get_jitter(int seed)
{
#if defined(JITTER)
return hash_uint3_to_float(gl_GlobalInvocationID.x, gl_GlobalInvocationID.y, seed);
#else
return 0.5;
#endif
}
/* Each color channel may have a different distortion with the guarantee that the red will have the
* lowest distortion while the blue will have the highest one. If each channel is distorted
* independently, the image will look disintegrated, with each channel seemingly merely shifted.
* Consequently, the distorted pixels needs to be computed by integrating along the path of change
* of distortion starting from one channel to another. For instance, to compute the distorted red
* from the distorted green, we accumulate the color of the distorted pixel starting from the
* distortion of the red, taking small steps until we reach the distortion of the green. The pixel
* color is weighted such that it is maximum at the start distortion and zero at the end distortion
* in an arithmetic progression. The integration steps can be augmented with random values to
* simulate lens jitter. Finally, it should be noted that this function integrates both the start
* and end channels in reverse directions for more efficient computation. */
vec3 integrate_distortion(int start, int end, float distance_squared, vec2 uv, int steps)
{
vec3 accumulated_color = vec3(0.0);
float distortion_amount = chromatic_distortion[end] - chromatic_distortion[start];
for (int i = 0; i < steps; i++) {
/* The increment will be in the [0, 1) range across iterations. */
float increment = (i + get_jitter(i)) / steps;
float distortion = chromatic_distortion[start] + increment * distortion_amount;
float distortion_scale = compute_distortion_scale(distortion, distance_squared);
/* Sample the color at the distorted coordinates and accumulate it weighted by the increment
* value for both the start and end channels. */
vec2 distorted_uv = compute_distorted_uv(uv, distortion_scale);
vec4 color = texture(input_tx, distorted_uv / texture_size(input_tx));
accumulated_color[start] += (1.0 - increment) * color[start];
accumulated_color[end] += increment * color[end];
}
return accumulated_color;
}
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
/* Compute the UV image coordinates in the range [-1, 1] as well as the squared distance to the
* center of the image, which is at (0, 0) in the UV coordinates. */
vec2 center = texture_size(input_tx) / 2.0;
vec2 uv = scale * (texel + 0.5 - center) / center;
float distance_squared = dot(uv, uv);
/* If any of the color channels will get distorted outside of the screen beyond what is possible,
* write a zero transparent color and return. */
if (any(greaterThan(chromatic_distortion * distance_squared, vec3(1.0)))) {
imageStore(output_img, texel, vec4(0.0));
return;
}
/* Compute the number of integration steps that should be used to compute each channel of the
* distorted pixel. */
ivec3 number_of_steps = compute_number_of_integration_steps(uv, distance_squared);
/* Integrate the distortion of the red and green, then the green and blue channels. That means
* the green will be integrated twice, but this is accounted for in the number of steps which the
* color will later be divided by. See the compute_number_of_integration_steps function for more
* details. */
vec3 color = vec3(0.0);
color += integrate_distortion(0, 1, distance_squared, uv, number_of_steps.r);
color += integrate_distortion(1, 2, distance_squared, uv, number_of_steps.b);
/* The integration above performed weighted accumulation, and thus the color needs to be divided
* by the sum of the weights. Assuming no jitter, the weights are generated as an arithmetic
* progression starting from (0.5 / n) to ((n - 0.5) / n) for n terms. The sum of an arithmetic
* progression can be computed as (n * (start + end) / 2), which when subsisting the start and
* end reduces to (n / 2). So the color should be multiplied by 2 / n. The jitter sequence
* approximately sums to the same value because it is a uniform random value whose mean value is
* 0.5, so the expression doesn't change regardless of jitter. */
color *= 2.0 / vec3(number_of_steps);
imageStore(output_img, texel, vec4(color, 1.0));
}

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(compositor_alpha_crop)
.local_group_size(16, 16)
.push_constant(Type::IVEC2, "lower_bound")
.push_constant(Type::IVEC2, "upper_bound")
.sampler(0, ImageType::FLOAT_2D, "input_tx")
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.compute_source("compositor_alpha_crop.glsl")
.do_static_compilation(true);

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(compositor_flip)
.local_group_size(16, 16)
.push_constant(Type::BOOL, "flip_x")
.push_constant(Type::BOOL, "flip_y")
.sampler(0, ImageType::FLOAT_2D, "input_tx")
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.compute_source("compositor_flip.glsl")
.do_static_compilation(true);

View File

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(compositor_image_crop)
.local_group_size(16, 16)
.push_constant(Type::IVEC2, "lower_bound")
.sampler(0, ImageType::FLOAT_2D, "input_tx")
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.compute_source("compositor_image_crop.glsl")
.do_static_compilation(true);

View File

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(compositor_projector_lens_distortion)
.local_group_size(16, 16)
.push_constant(Type::FLOAT, "dispersion")
.sampler(0, ImageType::FLOAT_2D, "input_tx")
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.compute_source("compositor_projector_lens_distortion.glsl")
.do_static_compilation(true);

View File

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(compositor_screen_lens_distortion_shared)
.local_group_size(16, 16)
.push_constant(Type::VEC3, "chromatic_distortion")
.push_constant(Type::FLOAT, "scale")
.sampler(0, ImageType::FLOAT_2D, "input_tx")
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.compute_source("compositor_screen_lens_distortion.glsl");
GPU_SHADER_CREATE_INFO(compositor_screen_lens_distortion)
.additional_info("compositor_screen_lens_distortion_shared")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(compositor_screen_lens_distortion_jitter)
.additional_info("compositor_screen_lens_distortion_shared")
.define("JITTER")
.do_static_compilation(true);

View File

@ -1874,6 +1874,13 @@ typedef enum CMPNodeChannelMatteLimitAlgorithm {
CMP_NODE_CHANNEL_MATTE_LIMIT_ALGORITHM_MAX = 1,
} CMPNodeChannelMatteLimitAlgorithm;
/* Flip Node. Stored in custom1. */
typedef enum CMPNodeFlipMode {
CMP_NODE_FLIP_X = 0,
CMP_NODE_FLIP_Y = 1,
CMP_NODE_FLIP_X_Y = 2,
} CMPNodeFlipMode;
/* Plane track deform node. */
enum {

View File

@ -5,11 +5,22 @@
* \ingroup cmpnodes
*/
#include "BLI_math_base.h"
#include "BLI_math_vec_types.hh"
#include "DNA_node_types.h"
#include "RNA_access.h"
#include "UI_interface.h"
#include "UI_resources.h"
#include "GPU_shader.h"
#include "GPU_texture.h"
#include "COM_node_operation.hh"
#include "COM_utilities.hh"
#include "node_composite_util.hh"
/* **************** Crop ******************** */
@ -18,7 +29,9 @@ namespace blender::nodes::node_composite_crop_cc {
static void cmp_node_crop_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f});
b.add_input<decl::Color>(N_("Image"))
.default_value({1.0f, 1.0f, 1.0f, 1.0f})
.compositor_domain_priority(0);
b.add_output<decl::Color>(N_("Image"));
}
@ -54,6 +67,161 @@ static void node_composit_buts_crop(uiLayout *layout, bContext *UNUSED(C), Point
}
}
using namespace blender::realtime_compositor;
class CropOperation : public NodeOperation {
public:
using NodeOperation::NodeOperation;
void execute() override
{
/* The operation does nothing, so just pass the input through. */
if (is_identity()) {
get_input("Image").pass_through(get_result("Image"));
return;
}
if (get_is_image_crop()) {
execute_image_crop();
}
else {
execute_alpha_crop();
}
}
/* Crop by replacing areas outside of the cropping bounds with zero alpha. The output have the
* same domain as the input image. */
void execute_alpha_crop()
{
GPUShader *shader = shader_manager().get("compositor_alpha_crop");
GPU_shader_bind(shader);
int2 lower_bound, upper_bound;
compute_cropping_bounds(lower_bound, upper_bound);
GPU_shader_uniform_2iv(shader, "lower_bound", lower_bound);
GPU_shader_uniform_2iv(shader, "upper_bound", upper_bound);
const Result &input_image = get_input("Image");
input_image.bind_as_texture(shader, "input_tx");
const Domain domain = compute_domain();
Result &output_image = get_result("Image");
output_image.allocate_texture(domain);
output_image.bind_as_image(shader, "output_img");
compute_dispatch_threads_at_least(shader, domain.size);
input_image.unbind_as_texture();
output_image.unbind_as_image();
GPU_shader_unbind();
}
/* Crop the image into a new size that matches the cropping bounds. */
void execute_image_crop()
{
int2 lower_bound, upper_bound;
compute_cropping_bounds(lower_bound, upper_bound);
/* The image is cropped into nothing, so just return a single zero value. */
if (lower_bound.x == upper_bound.x || lower_bound.y == upper_bound.y) {
Result &result = get_result("Image");
result.allocate_invalid();
return;
}
GPUShader *shader = shader_manager().get("compositor_image_crop");
GPU_shader_bind(shader);
GPU_shader_uniform_2iv(shader, "lower_bound", lower_bound);
const Result &input_image = get_input("Image");
input_image.bind_as_texture(shader, "input_tx");
const int2 size = upper_bound - lower_bound;
Result &output_image = get_result("Image");
output_image.allocate_texture(Domain(size, compute_domain().transformation));
output_image.bind_as_image(shader, "output_img");
compute_dispatch_threads_at_least(shader, size);
input_image.unbind_as_texture();
output_image.unbind_as_image();
GPU_shader_unbind();
}
/* If true, the image should actually be cropped into a new size. Otherwise, if false, the region
* outside of the cropping bounds will be set to a zero alpha value. */
bool get_is_image_crop()
{
return bnode().custom1;
}
bool get_is_relative()
{
return bnode().custom2;
}
NodeTwoXYs &get_node_two_xys()
{
return *static_cast<NodeTwoXYs *>(bnode().storage);
}
/* Returns true if the operation does nothing and the input can be passed through. */
bool is_identity()
{
const Result &input = get_input("Image");
/* Single value inputs can't be cropped and are returned as is. */
if (input.is_single_value()) {
return true;
}
int2 lower_bound, upper_bound;
compute_cropping_bounds(lower_bound, upper_bound);
const int2 input_size = input.domain().size;
/* The cropping bounds cover the whole image, so no cropping happens. */
if (lower_bound == int2(0) && upper_bound == input_size) {
return true;
}
return false;
}
void compute_cropping_bounds(int2 &lower_bound, int2 &upper_bound)
{
const NodeTwoXYs &node_two_xys = get_node_two_xys();
const int2 input_size = get_input("Image").domain().size;
if (get_is_relative()) {
/* The cropping bounds are relative to the image size. The factors are in the [0, 1] range,
* so it is guaranteed that they won't go over the input image size. */
lower_bound.x = input_size.x * node_two_xys.fac_x1;
lower_bound.y = input_size.y * node_two_xys.fac_y2;
upper_bound.x = input_size.x * node_two_xys.fac_x2;
upper_bound.y = input_size.y * node_two_xys.fac_y1;
}
else {
/* Make sure the bounds don't go over the input image size. */
lower_bound.x = min_ii(node_two_xys.x1, input_size.x);
lower_bound.y = min_ii(node_two_xys.y2, input_size.y);
upper_bound.x = min_ii(node_two_xys.x2, input_size.x);
upper_bound.y = min_ii(node_two_xys.y1, input_size.y);
}
/* Make sure upper bound is actually higher than the lower bound. */
lower_bound.x = min_ii(lower_bound.x, upper_bound.x);
lower_bound.y = min_ii(lower_bound.y, upper_bound.y);
upper_bound.x = max_ii(lower_bound.x, upper_bound.x);
upper_bound.y = max_ii(lower_bound.y, upper_bound.y);
}
};
static NodeOperation *get_compositor_operation(Context &context, DNode node)
{
return new CropOperation(context, node);
}
} // namespace blender::nodes::node_composite_crop_cc
void register_node_type_cmp_crop()
@ -67,6 +235,7 @@ void register_node_type_cmp_crop()
ntype.draw_buttons = file_ns::node_composit_buts_crop;
node_type_init(&ntype, file_ns::node_composit_init_crop);
node_type_storage(&ntype, "NodeTwoXYs", node_free_standard_storage, node_copy_standard_storage);
ntype.get_compositor_operation = file_ns::get_compositor_operation;
nodeRegisterType(&ntype);
}

View File

@ -5,9 +5,18 @@
* \ingroup cmpnodes
*/
#include "BLI_assert.h"
#include "BLI_utildefines.h"
#include "UI_interface.h"
#include "UI_resources.h"
#include "GPU_shader.h"
#include "GPU_texture.h"
#include "COM_node_operation.hh"
#include "COM_utilities.hh"
#include "node_composite_util.hh"
/* **************** Flip ******************** */
@ -16,7 +25,9 @@ namespace blender::nodes::node_composite_flip_cc {
static void cmp_node_flip_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f});
b.add_input<decl::Color>(N_("Image"))
.default_value({1.0f, 1.0f, 1.0f, 1.0f})
.compositor_domain_priority(0);
b.add_output<decl::Color>(N_("Image"));
}
@ -25,6 +36,56 @@ static void node_composit_buts_flip(uiLayout *layout, bContext *UNUSED(C), Point
uiItemR(layout, ptr, "axis", UI_ITEM_R_SPLIT_EMPTY_NAME, "", ICON_NONE);
}
using namespace blender::realtime_compositor;
class FlipOperation : public NodeOperation {
public:
using NodeOperation::NodeOperation;
void execute() override
{
Result &input = get_input("Image");
Result &result = get_result("Image");
/* Can't flip a single value, pass it through to the output. */
if (input.is_single_value()) {
input.pass_through(result);
return;
}
GPUShader *shader = shader_manager().get("compositor_flip");
GPU_shader_bind(shader);
GPU_shader_uniform_1b(
shader, "flip_x", ELEM(get_flip_mode(), CMP_NODE_FLIP_X, CMP_NODE_FLIP_X_Y));
GPU_shader_uniform_1b(
shader, "flip_y", ELEM(get_flip_mode(), CMP_NODE_FLIP_Y, CMP_NODE_FLIP_X_Y));
input.bind_as_texture(shader, "input_tx");
const Domain domain = compute_domain();
result.allocate_texture(domain);
result.bind_as_image(shader, "output_img");
compute_dispatch_threads_at_least(shader, domain.size);
input.unbind_as_texture();
result.unbind_as_image();
GPU_shader_unbind();
}
CMPNodeFlipMode get_flip_mode()
{
return (CMPNodeFlipMode)bnode().custom1;
}
};
static NodeOperation *get_compositor_operation(Context &context, DNode node)
{
return new FlipOperation(context, node);
}
} // namespace blender::nodes::node_composite_flip_cc
void register_node_type_cmp_flip()
@ -36,6 +97,7 @@ void register_node_type_cmp_flip()
cmp_node_type_base(&ntype, CMP_NODE_FLIP, "Flip", NODE_CLASS_DISTORT);
ntype.declare = file_ns::cmp_node_flip_declare;
ntype.draw_buttons = file_ns::node_composit_buts_flip;
ntype.get_compositor_operation = file_ns::get_compositor_operation;
nodeRegisterType(&ntype);
}

View File

@ -5,20 +5,48 @@
* \ingroup cmpnodes
*/
#include "BLI_math_base.h"
#include "BLI_math_vec_types.hh"
#include "RNA_access.h"
#include "UI_interface.h"
#include "UI_resources.h"
#include "GPU_shader.h"
#include "GPU_texture.h"
#include "COM_node_operation.hh"
#include "COM_utilities.hh"
#include "node_composite_util.hh"
/* Distortion can't be exactly -1.0 as it will cause infinite pincushion distortion. */
#define MINIMUM_DISTORTION -0.999f
/* Arbitrary scaling factor for the dispersion input in projector distortion mode. */
#define PROJECTOR_DISPERSION_SCALE 5.0f
/* Arbitrary scaling factor for the dispersion input in screen distortion mode. */
#define SCREEN_DISPERSION_SCALE 4.0f
/* Arbitrary scaling factor for the distortion input. */
#define DISTORTION_SCALE 4.0f
namespace blender::nodes::node_composite_lensdist_cc {
static void cmp_node_lensdist_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f});
b.add_input<decl::Float>(N_("Distort")).default_value(0.0f).min(-0.999f).max(1.0f);
b.add_input<decl::Float>(N_("Dispersion")).default_value(0.0f).min(0.0f).max(1.0f);
b.add_input<decl::Color>(N_("Image"))
.default_value({1.0f, 1.0f, 1.0f, 1.0f})
.compositor_domain_priority(0);
b.add_input<decl::Float>(N_("Distort"))
.default_value(0.0f)
.min(MINIMUM_DISTORTION)
.max(1.0f)
.compositor_expects_single_value();
b.add_input<decl::Float>(N_("Dispersion"))
.default_value(0.0f)
.min(0.0f)
.max(1.0f)
.compositor_expects_single_value();
b.add_output<decl::Color>(N_("Image"));
}
@ -42,6 +70,178 @@ static void node_composit_buts_lensdist(uiLayout *layout, bContext *UNUSED(C), P
uiItemR(col, ptr, "use_fit", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE);
}
using namespace blender::realtime_compositor;
class LensDistortionOperation : public NodeOperation {
public:
using NodeOperation::NodeOperation;
void execute() override
{
if (is_identity()) {
get_input("Image").pass_through(get_result("Image"));
return;
}
if (get_is_projector()) {
execute_projector_distortion();
}
else {
execute_screen_distortion();
}
}
void execute_projector_distortion()
{
GPUShader *shader = shader_manager().get("compositor_projector_lens_distortion");
GPU_shader_bind(shader);
const Result &input_image = get_input("Image");
input_image.bind_as_texture(shader, "input_tx");
GPU_texture_filter_mode(input_image.texture(), true);
GPU_texture_wrap_mode(input_image.texture(), false, false);
const Domain domain = compute_domain();
const float dispersion = (get_dispersion() * PROJECTOR_DISPERSION_SCALE) / domain.size.x;
GPU_shader_uniform_1f(shader, "dispersion", dispersion);
Result &output_image = get_result("Image");
output_image.allocate_texture(domain);
output_image.bind_as_image(shader, "output_img");
compute_dispatch_threads_at_least(shader, domain.size);
input_image.unbind_as_texture();
output_image.unbind_as_image();
GPU_shader_unbind();
}
void execute_screen_distortion()
{
GPUShader *shader = shader_manager().get(get_screen_distortion_shader());
GPU_shader_bind(shader);
const Result &input_image = get_input("Image");
input_image.bind_as_texture(shader, "input_tx");
GPU_texture_filter_mode(input_image.texture(), true);
GPU_texture_wrap_mode(input_image.texture(), false, false);
const Domain domain = compute_domain();
const float3 chromatic_distortion = compute_chromatic_distortion();
GPU_shader_uniform_3fv(shader, "chromatic_distortion", chromatic_distortion);
GPU_shader_uniform_1f(shader, "scale", compute_scale());
Result &output_image = get_result("Image");
output_image.allocate_texture(domain);
output_image.bind_as_image(shader, "output_img");
compute_dispatch_threads_at_least(shader, domain.size);
input_image.unbind_as_texture();
output_image.unbind_as_image();
GPU_shader_unbind();
}
const char *get_screen_distortion_shader()
{
if (get_is_jitter()) {
return "compositor_screen_lens_distortion_jitter";
}
return "compositor_screen_lens_distortion";
}
float get_distortion()
{
const Result &input = get_input("Distort");
return clamp_f(input.get_float_value_default(0.0f), MINIMUM_DISTORTION, 1.0f);
}
float get_dispersion()
{
const Result &input = get_input("Dispersion");
return clamp_f(input.get_float_value_default(0.0f), 0.0f, 1.0f);
}
/* Get the distortion amount for each channel. The green channel has a distortion amount that
* matches that specified in the node inputs, while the red and blue channels have higher and
* lower distortion amounts respectively based on the dispersion value. */
float3 compute_chromatic_distortion()
{
const float green_distortion = get_distortion();
const float dispersion = get_dispersion() / SCREEN_DISPERSION_SCALE;
const float red_distortion = clamp_f(green_distortion + dispersion, MINIMUM_DISTORTION, 1.0f);
const float blue_distortion = clamp_f(green_distortion - dispersion, MINIMUM_DISTORTION, 1.0f);
return float3(red_distortion, green_distortion, blue_distortion) * DISTORTION_SCALE;
}
/* The distortion model model will distort the image in such a way that the result will no longer
* fit the domain of the original image, so we scale the image to account for that. If get_is_fit
* is false, then the scaling factor will be such that the furthest pixels horizontally and
* vertically are at the boundary of the image. Otherwise, if get_is_fit is true, the scaling
* factor will be such that the furthest pixels diagonally are at the corner of the image. */
float compute_scale()
{
const float3 distortion = compute_chromatic_distortion() / DISTORTION_SCALE;
const float maximum_distortion = max_fff(distortion[0], distortion[1], distortion[2]);
if (get_is_fit() && (maximum_distortion > 0.0f)) {
return 1.0f / (1.0f + 2.0f * maximum_distortion);
}
return 1.0f / (1.0f + maximum_distortion);
}
bool get_is_projector()
{
return get_node_lens_distortion().proj;
}
bool get_is_jitter()
{
return get_node_lens_distortion().jit;
}
bool get_is_fit()
{
return get_node_lens_distortion().fit;
}
NodeLensDist &get_node_lens_distortion()
{
return *static_cast<NodeLensDist *>(bnode().storage);
}
/* Returns true if the operation does nothing and the input can be passed through. */
bool is_identity()
{
/* The input is a single value and the operation does nothing. */
if (get_input("Image").is_single_value()) {
return true;
}
/* Projector have zero dispersion and does nothing. */
if (get_is_projector() && get_dispersion() == 0.0f) {
return true;
}
/* Both distortion and dispersion are zero and the operation does nothing. */
if (get_distortion() == 0.0f && get_dispersion() == 0.0f) {
return true;
}
return false;
}
};
static NodeOperation *get_compositor_operation(Context &context, DNode node)
{
return new LensDistortionOperation(context, node);
}
} // namespace blender::nodes::node_composite_lensdist_cc
void register_node_type_cmp_lensdist()
@ -56,6 +256,7 @@ void register_node_type_cmp_lensdist()
node_type_init(&ntype, file_ns::node_composit_init_lensdist);
node_type_storage(
&ntype, "NodeLensDist", node_free_standard_storage, node_copy_standard_storage);
ntype.get_compositor_operation = file_ns::get_compositor_operation;
nodeRegisterType(&ntype);
}

View File

@ -5,9 +5,14 @@
* \ingroup cmpnodes
*/
#include "BLI_assert.h"
#include "BLI_float3x3.hh"
#include "UI_interface.h"
#include "UI_resources.h"
#include "COM_node_operation.hh"
#include "node_composite_util.hh"
/* **************** Rotate ******************** */
@ -16,12 +21,15 @@ namespace blender::nodes::node_composite_rotate_cc {
static void cmp_node_rotate_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f});
b.add_input<decl::Color>(N_("Image"))
.default_value({1.0f, 1.0f, 1.0f, 1.0f})
.compositor_domain_priority(0);
b.add_input<decl::Float>(N_("Degr"))
.default_value(0.0f)
.min(-10000.0f)
.max(10000.0f)
.subtype(PROP_ANGLE);
.subtype(PROP_ANGLE)
.compositor_expects_single_value();
b.add_output<decl::Color>(N_("Image"));
}
@ -35,6 +43,47 @@ static void node_composit_buts_rotate(uiLayout *layout, bContext *UNUSED(C), Poi
uiItemR(layout, ptr, "filter_type", UI_ITEM_R_SPLIT_EMPTY_NAME, "", ICON_NONE);
}
using namespace blender::realtime_compositor;
class RotateOperation : public NodeOperation {
public:
using NodeOperation::NodeOperation;
void execute() override
{
Result &input = get_input("Image");
Result &result = get_result("Image");
input.pass_through(result);
const float rotation = get_input("Degr").get_float_value_default(0.0f);
const float3x3 transformation = float3x3::from_rotation(rotation);
result.transform(transformation);
result.get_realization_options().interpolation = get_interpolation();
}
Interpolation get_interpolation()
{
switch (bnode().custom1) {
case 0:
return Interpolation::Nearest;
case 1:
return Interpolation::Bilinear;
case 2:
return Interpolation::Bicubic;
}
BLI_assert_unreachable();
return Interpolation::Nearest;
}
};
static NodeOperation *get_compositor_operation(Context &context, DNode node)
{
return new RotateOperation(context, node);
}
} // namespace blender::nodes::node_composite_rotate_cc
void register_node_type_cmp_rotate()
@ -47,6 +96,7 @@ void register_node_type_cmp_rotate()
ntype.declare = file_ns::cmp_node_rotate_declare;
ntype.draw_buttons = file_ns::node_composit_buts_rotate;
node_type_init(&ntype, file_ns::node_composit_init_rotate);
ntype.get_compositor_operation = file_ns::get_compositor_operation;
nodeRegisterType(&ntype);
}

View File

@ -5,9 +5,15 @@
* \ingroup cmpnodes
*/
#include "BLI_assert.h"
#include "BLI_float3x3.hh"
#include "BLI_math_vector.h"
#include "UI_interface.h"
#include "UI_resources.h"
#include "COM_node_operation.hh"
#include "node_composite_util.hh"
/* **************** Transform ******************** */
@ -16,15 +22,30 @@ namespace blender::nodes::node_composite_transform_cc {
static void cmp_node_transform_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image")).default_value({0.8f, 0.8f, 0.8f, 1.0f});
b.add_input<decl::Float>(N_("X")).default_value(0.0f).min(-10000.0f).max(10000.0f);
b.add_input<decl::Float>(N_("Y")).default_value(0.0f).min(-10000.0f).max(10000.0f);
b.add_input<decl::Color>(N_("Image"))
.default_value({0.8f, 0.8f, 0.8f, 1.0f})
.compositor_domain_priority(0);
b.add_input<decl::Float>(N_("X"))
.default_value(0.0f)
.min(-10000.0f)
.max(10000.0f)
.compositor_expects_single_value();
b.add_input<decl::Float>(N_("Y"))
.default_value(0.0f)
.min(-10000.0f)
.max(10000.0f)
.compositor_expects_single_value();
b.add_input<decl::Float>(N_("Angle"))
.default_value(0.0f)
.min(-10000.0f)
.max(10000.0f)
.subtype(PROP_ANGLE);
b.add_input<decl::Float>(N_("Scale")).default_value(1.0f).min(0.0001f).max(CMP_SCALE_MAX);
.subtype(PROP_ANGLE)
.compositor_expects_single_value();
b.add_input<decl::Float>(N_("Scale"))
.default_value(1.0f)
.min(0.0001f)
.max(CMP_SCALE_MAX)
.compositor_expects_single_value();
b.add_output<decl::Color>(N_("Image"));
}
@ -33,6 +54,51 @@ static void node_composit_buts_transform(uiLayout *layout, bContext *UNUSED(C),
uiItemR(layout, ptr, "filter_type", UI_ITEM_R_SPLIT_EMPTY_NAME, "", ICON_NONE);
}
using namespace blender::realtime_compositor;
class TransformOperation : public NodeOperation {
public:
using NodeOperation::NodeOperation;
void execute() override
{
Result &input = get_input("Image");
Result &result = get_result("Image");
input.pass_through(result);
const float2 translation = float2(get_input("X").get_float_value_default(0.0f),
get_input("Y").get_float_value_default(0.0f));
const float rotation = get_input("Angle").get_float_value_default(0.0f);
const float2 scale = float2(get_input("Scale").get_float_value_default(1.0f));
const float3x3 transformation = float3x3::from_translation_rotation_scale(
translation, rotation, scale);
result.transform(transformation);
result.get_realization_options().interpolation = get_interpolation();
}
Interpolation get_interpolation()
{
switch (bnode().custom1) {
case 0:
return Interpolation::Nearest;
case 1:
return Interpolation::Bilinear;
case 2:
return Interpolation::Bicubic;
}
BLI_assert_unreachable();
return Interpolation::Nearest;
}
};
static NodeOperation *get_compositor_operation(Context &context, DNode node)
{
return new TransformOperation(context, node);
}
} // namespace blender::nodes::node_composite_transform_cc
void register_node_type_cmp_transform()
@ -44,6 +110,7 @@ void register_node_type_cmp_transform()
cmp_node_type_base(&ntype, CMP_NODE_TRANSFORM, "Transform", NODE_CLASS_DISTORT);
ntype.declare = file_ns::cmp_node_transform_declare;
ntype.draw_buttons = file_ns::node_composit_buts_transform;
ntype.get_compositor_operation = file_ns::get_compositor_operation;
nodeRegisterType(&ntype);
}

View File

@ -5,9 +5,14 @@
* \ingroup cmpnodes
*/
#include "BLI_float3x3.hh"
#include "BLI_math_vec_types.hh"
#include "UI_interface.h"
#include "UI_resources.h"
#include "COM_node_operation.hh"
#include "node_composite_util.hh"
/* **************** Translate ******************** */
@ -16,9 +21,19 @@ namespace blender::nodes::node_composite_translate_cc {
static void cmp_node_translate_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f});
b.add_input<decl::Float>(N_("X")).default_value(0.0f).min(-10000.0f).max(10000.0f);
b.add_input<decl::Float>(N_("Y")).default_value(0.0f).min(-10000.0f).max(10000.0f);
b.add_input<decl::Color>(N_("Image"))
.default_value({1.0f, 1.0f, 1.0f, 1.0f})
.compositor_domain_priority(0);
b.add_input<decl::Float>(N_("X"))
.default_value(0.0f)
.min(-10000.0f)
.max(10000.0f)
.compositor_expects_single_value();
b.add_input<decl::Float>(N_("Y"))
.default_value(0.0f)
.min(-10000.0f)
.max(10000.0f)
.compositor_expects_single_value();
b.add_output<decl::Color>(N_("Image"));
}
@ -34,6 +49,59 @@ static void node_composit_buts_translate(uiLayout *layout, bContext *UNUSED(C),
uiItemR(layout, ptr, "wrap_axis", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE);
}
using namespace blender::realtime_compositor;
class TranslateOperation : public NodeOperation {
public:
using NodeOperation::NodeOperation;
void execute() override
{
Result &input = get_input("Image");
Result &result = get_result("Image");
input.pass_through(result);
float x = get_input("X").get_float_value_default(0.0f);
float y = get_input("Y").get_float_value_default(0.0f);
if (get_use_relative()) {
x *= input.domain().size.x;
y *= input.domain().size.y;
}
const float2 translation = float2(x, y);
const float3x3 transformation = float3x3::from_translation(translation);
result.transform(transformation);
result.get_realization_options().repeat_x = get_repeat_x();
result.get_realization_options().repeat_y = get_repeat_y();
}
NodeTranslateData &get_node_translate()
{
return *static_cast<NodeTranslateData *>(bnode().storage);
}
bool get_use_relative()
{
return get_node_translate().relative;
}
bool get_repeat_x()
{
return ELEM(get_node_translate().wrap_axis, CMP_NODE_WRAP_X, CMP_NODE_WRAP_XY);
}
bool get_repeat_y()
{
return ELEM(get_node_translate().wrap_axis, CMP_NODE_WRAP_Y, CMP_NODE_WRAP_XY);
}
};
static NodeOperation *get_compositor_operation(Context &context, DNode node)
{
return new TranslateOperation(context, node);
}
} // namespace blender::nodes::node_composite_translate_cc
void register_node_type_cmp_translate()
@ -48,6 +116,7 @@ void register_node_type_cmp_translate()
node_type_init(&ntype, file_ns::node_composit_init_translate);
node_type_storage(
&ntype, "NodeTranslateData", node_free_standard_storage, node_copy_standard_storage);
ntype.get_compositor_operation = file_ns::get_compositor_operation;
nodeRegisterType(&ntype);
}