Sequencer: Improve Image Transform Quality When Exporting.

Image Transform use linear or nearest sampling during editing and exporting.
This gets sampling is fine for images that aren't scaled. When sequencing
however you mostly want use some sort of scaling, that leads to poorer
quality.

This change will use sub-sampling to improve the quality. This is only
enabled when rendering. During editing the subsampling is disabled to
keep the user interface reacting as expected.

Another improvement is that image transform is stopped at the moment
it hadn't sampled at least 4 samples for a scan line. In that case we
expect that there are no further samples that would change to result.

In a future patch this could be replaced by a ray/bounding bo intersection
as that would remove some unneeded loops in both the single sampled and
sub sampled approach.
This commit is contained in:
Jeroen Bakker 2023-01-26 13:38:59 +01:00
parent 6d79bc0073
commit f210842a72
Notes: blender-bot 2024-01-29 18:42:43 +01:00
Referenced by pull request #117584, VSE: replace Subsampled3x3 filter by a general Box filter
Referenced by commit 5bd1e0bb22, VSE: replace Subsampled3x3 filter by a general Box filter
4 changed files with 191 additions and 12 deletions

View File

@ -516,6 +516,7 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
&texture_buffer,
transform_mode,
IMB_FILTER_NEAREST,
1,
uv_to_texel.ptr(),
crop_rect_ptr);
}

View File

@ -846,6 +846,8 @@ typedef enum eIMBTransformMode {
* - Only one data type buffer will be used (rect_float has priority over rect)
* \param mode: Cropping/Wrap repeat effect to apply during transformation.
* \param filter: Interpolation to use during sampling.
* \param num_subsamples: Number of subsamples to use. Increasing this would improve the quality,
* but reduces the performance.
* \param transform_matrix: Transformation matrix to use.
* The given matrix should transform between dst pixel space to src pixel space.
* One unit is one pixel.
@ -860,6 +862,7 @@ void IMB_transform(const struct ImBuf *src,
struct ImBuf *dst,
eIMBTransformMode mode,
eIMBInterpolationFilterMode filter,
const int num_subsamples,
const float transform_matrix[4][4],
const struct rctf *src_crop);

View File

@ -9,6 +9,7 @@
#include <type_traits>
#include "BLI_math.h"
#include "BLI_math_color_blend.h"
#include "BLI_math_vector.hh"
#include "BLI_rect.h"
@ -37,6 +38,14 @@ struct TransformUserData {
*/
double2 add_y;
struct {
int num;
double2 offset_x;
double2 offset_y;
double2 add_x;
double2 add_y;
} subsampling;
/**
* \brief Cropping region in source image pixel space.
*/
@ -45,11 +54,12 @@ struct TransformUserData {
/**
* \brief Initialize the start_uv, add_x and add_y fields based on the given transform matrix.
*/
void init(const float transform_matrix[4][4])
void init(const float transform_matrix[4][4], const int num_subsamples)
{
init_start_uv(transform_matrix);
init_add_x(transform_matrix);
init_add_y(transform_matrix);
init_subsampling(num_subsamples);
}
private:
@ -83,6 +93,15 @@ struct TransformUserData {
mul_v3_m4v3_db(add_y_v3, transform_matrix_double, double3(0.0, height, 0.0));
add_y = double2((add_y_v3 - double3(start_uv)) * (1.0 / height));
}
void init_subsampling(const int num_subsamples)
{
subsampling.num = max_ii(num_subsamples, 1);
subsampling.add_x = add_x / (subsampling.num);
subsampling.add_y = add_y / (subsampling.num);
subsampling.offset_x = -add_x * 0.5 + subsampling.add_x * 0.5;
subsampling.offset_y = -add_y * 0.5 + subsampling.add_y * 0.5;
}
};
/**
@ -257,6 +276,39 @@ class WrapRepeatUV : public BaseUVWrapping {
}
};
// TODO: should we use math_vectors for this.
template<typename StorageType, int NumChannels>
class Pixel : public std::array<StorageType, NumChannels> {
public:
void clear()
{
for (int channel_index : IndexRange(NumChannels)) {
(*this)[channel_index] = 0;
}
}
void add_subsample(const Pixel<StorageType, NumChannels> other, int sample_number)
{
BLI_STATIC_ASSERT((std::is_same_v<StorageType, uchar>) || (std::is_same_v<StorageType, float>),
"Only uchar and float channels supported.");
float factor = 1.0 / (sample_number + 1);
if constexpr (std::is_same_v<StorageType, uchar>) {
BLI_STATIC_ASSERT(NumChannels == 4, "Pixels using uchar requires to have 4 channels.");
blend_color_interpolate_byte(this->data(), this->data(), other.data(), factor);
}
else if constexpr (std::is_same_v<StorageType, float> && NumChannels == 4) {
blend_color_interpolate_float(this->data(), this->data(), other.data(), factor);
}
else if constexpr (std::is_same_v<StorageType, float>) {
for (int channel_index : IndexRange(NumChannels)) {
(*this)[channel_index] = (*this)[channel_index] * (1.0 - factor) +
other[channel_index] * factor;
}
}
}
};
/**
* \brief Read a sample from an image buffer.
*
@ -286,7 +338,7 @@ class Sampler {
public:
using ChannelType = StorageType;
static const int ChannelLen = NumChannels;
using SampleType = std::array<StorageType, NumChannels>;
using SampleType = Pixel<StorageType, NumChannels>;
void sample(const ImBuf *source, const double2 uv, SampleType &r_sample)
{
@ -378,12 +430,12 @@ class Sampler {
template<typename StorageType, int SourceNumChannels, int DestinationNumChannels>
class ChannelConverter {
public:
using SampleType = std::array<StorageType, SourceNumChannels>;
using SampleType = Pixel<StorageType, SourceNumChannels>;
using PixelType = PixelPointer<StorageType, DestinationNumChannels>;
/**
* \brief Convert the number of channels of the given sample to match the pixel pointer and store
* it at the location the pixel_pointer points at.
* \brief Convert the number of channels of the given sample to match the pixel pointer and
* store it at the location the pixel_pointer points at.
*/
void convert_and_store(const SampleType &sample, PixelType &pixel_pointer)
{
@ -413,6 +465,19 @@ class ChannelConverter {
BLI_assert_unreachable();
}
}
void mix_and_store(const SampleType &sample, PixelType &pixel_pointer, const float mix_factor)
{
if constexpr (std::is_same_v<StorageType, uchar>) {
BLI_STATIC_ASSERT(SourceNumChannels == 4, "Unsigned chars always have 4 channels.");
BLI_STATIC_ASSERT(DestinationNumChannels == 4, "Unsigned chars always have 4 channels.");
blend_color_interpolate_byte(
pixel_pointer.get_pointer(), pixel_pointer.get_pointer(), sample.data(), mix_factor);
}
else {
BLI_assert_unreachable();
}
}
};
/**
@ -442,8 +507,8 @@ class ScanlineProcessor {
Sampler sampler;
/**
* \brief Channels sizzling logic to convert between the input image buffer and the output image
* buffer.
* \brief Channels sizzling logic to convert between the input image buffer and the output
* image buffer.
*/
ChannelConverter<typename Sampler::ChannelType,
Sampler::ChannelLen,
@ -455,22 +520,118 @@ class ScanlineProcessor {
* \brief Inner loop of the transformations, processing a full scanline.
*/
void process(const TransformUserData *user_data, int scanline)
{
// if (user_data->subsampling.num > 1) {
process_with_subsampling(user_data, scanline);
// }
// else {
// process_one_sample_per_pixel(user_data, scanline);
// }
}
private:
void process_one_sample_per_pixel(const TransformUserData *user_data, int scanline)
{
const int width = user_data->dst->x;
double2 uv = user_data->start_uv + user_data->add_y * scanline;
output.init_pixel_pointer(user_data->dst, int2(0, scanline));
for (int xi = 0; xi < width; xi++) {
int xi = 0;
while (xi < width) {
const bool discard_pixel = discarder.should_discard(*user_data, uv);
if (!discard_pixel) {
break;
}
uv += user_data->add_x;
output.increase_pixel_pointer();
xi += 1;
}
/*
* Draw until we didn't draw for at least 4 pixels.
*/
int num_output_pixels_skipped = 0;
const int num_missing_output_pixels_allowed = 4;
for (; xi < width && num_output_pixels_skipped < num_missing_output_pixels_allowed; xi++) {
if (!discarder.should_discard(*user_data, uv)) {
typename Sampler::SampleType sample;
sampler.sample(user_data->src, uv, sample);
channel_converter.convert_and_store(sample, output);
}
else {
num_output_pixels_skipped += 1;
}
uv += user_data->add_x;
output.increase_pixel_pointer();
}
}
void process_with_subsampling(const TransformUserData *user_data, int scanline)
{
const int width = user_data->dst->x;
double2 uv = user_data->start_uv + user_data->add_y * scanline;
output.init_pixel_pointer(user_data->dst, int2(0, scanline));
int xi = 0;
/*
* Skip leading pixels that would be fully discarded.
*
* NOTE: This could be improved by intersection between an ray and the image bounds.
*/
while (xi < width) {
const bool discard_pixel = discarder.should_discard(*user_data, uv) &&
discarder.should_discard(*user_data, uv + user_data->add_x) &&
discarder.should_discard(*user_data, uv + user_data->add_y) &&
discarder.should_discard(
*user_data, uv + user_data->add_x + user_data->add_y);
if (!discard_pixel) {
break;
}
uv += user_data->add_x;
output.increase_pixel_pointer();
xi += 1;
}
/*
* Draw until we didn't draw for at least 4 pixels.
*/
int num_output_pixels_skipped = 0;
const int num_missing_output_pixels_allowed = 4;
for (; xi < width && num_output_pixels_skipped < num_missing_output_pixels_allowed; xi++) {
typename Sampler::SampleType sample;
sample.clear();
int num_subsamples_added = 0;
double2 subsample_uv_y = uv + user_data->subsampling.offset_y;
for (int subsample_yi : IndexRange(user_data->subsampling.num)) {
UNUSED_VARS(subsample_yi);
double2 subsample_uv = subsample_uv_y + user_data->subsampling.offset_x;
for (int subsample_xi : IndexRange(user_data->subsampling.num)) {
UNUSED_VARS(subsample_xi);
if (!discarder.should_discard(*user_data, subsample_uv)) {
typename Sampler::SampleType sub_sample;
sampler.sample(user_data->src, subsample_uv, sub_sample);
sample.add_subsample(sub_sample, num_subsamples_added);
num_subsamples_added += 1;
}
subsample_uv += user_data->subsampling.add_x;
}
subsample_uv_y += user_data->subsampling.add_y;
}
if (num_subsamples_added != 0) {
float mix_weight = float(num_subsamples_added) /
(user_data->subsampling.num * user_data->subsampling.num);
channel_converter.mix_and_store(sample, output, mix_weight);
}
else {
num_output_pixels_skipped += 1;
}
uv += user_data->add_x;
output.increase_pixel_pointer();
}
}
};
/**
@ -562,6 +723,7 @@ void IMB_transform(const struct ImBuf *src,
struct ImBuf *dst,
const eIMBTransformMode mode,
const eIMBInterpolationFilterMode filter,
const int num_subsamples,
const float transform_matrix[4][4],
const struct rctf *src_crop)
{
@ -575,7 +737,7 @@ void IMB_transform(const struct ImBuf *src,
if (mode == IMB_TRANSFORM_MODE_CROP_SRC) {
user_data.src_crop = *src_crop;
}
user_data.init(transform_matrix);
user_data.init(transform_matrix, num_subsamples);
if (filter == IMB_FILTER_NEAREST) {
transform_threaded<IMB_FILTER_NEAREST>(&user_data, mode);

View File

@ -445,8 +445,14 @@ static void sequencer_thumbnail_transform(ImBuf *in, ImBuf *out)
(const float[]){scale_x, scale_y, 1.0f});
transform_pivot_set_m4(transform_matrix, pivot);
invert_m4(transform_matrix);
IMB_transform(in, out, IMB_TRANSFORM_MODE_REGULAR, IMB_FILTER_NEAREST, transform_matrix, NULL);
const int num_subsamples = 1;
IMB_transform(in,
out,
IMB_TRANSFORM_MODE_REGULAR,
IMB_FILTER_NEAREST,
num_subsamples,
transform_matrix,
NULL);
}
/* Check whether transform introduces transparent ares in the result (happens when the transformed
@ -518,7 +524,14 @@ static void sequencer_preprocess_transform_crop(
filter = IMB_FILTER_BILINEAR;
}
IMB_transform(in, out, IMB_TRANSFORM_MODE_CROP_SRC, filter, transform_matrix, &source_crop);
const int num_subsamples = G.is_rendering ? 3 : 1;
IMB_transform(in,
out,
IMB_TRANSFORM_MODE_CROP_SRC,
filter,
num_subsamples,
transform_matrix,
&source_crop);
if (!seq_image_transform_transparency_gained(context, seq)) {
out->planes = in->planes;