BLI: add missing materialize methods for virtual arrays

This does two things:
* Introduce new `materialize_compressed` methods. Those are used
  when the dst array should not have any gaps.
* Add materialize methods in various classes where they were missing
  (and therefore caused overhead, because slower fallbacks had to be used).
This commit is contained in:
Jacques Lucke 2022-04-07 10:02:34 +02:00
parent 2aff04917f
commit 384a02a214
4 changed files with 320 additions and 1 deletions

View File

@ -51,6 +51,9 @@ class GVArrayImpl {
virtual void materialize(const IndexMask mask, void *dst) const;
virtual void materialize_to_uninitialized(const IndexMask mask, void *dst) const;
virtual void materialize_compressed(IndexMask mask, void *dst) const;
virtual void materialize_compressed_to_uninitialized(IndexMask mask, void *dst) const;
virtual bool try_assign_VArray(void *varray) const;
virtual bool may_have_ownership() const;
};
@ -133,6 +136,9 @@ class GVArrayCommon {
void materialize_to_uninitialized(void *dst) const;
void materialize_to_uninitialized(const IndexMask mask, void *dst) const;
void materialize_compressed(IndexMask mask, void *dst) const;
void materialize_compressed_to_uninitialized(IndexMask mask, void *dst) const;
/**
* Returns true when the virtual array is stored as a span internally.
*/
@ -336,6 +342,16 @@ template<typename T> class GVArrayImpl_For_VArray : public GVArrayImpl {
varray_.materialize_to_uninitialized(mask, MutableSpan((T *)dst, mask.min_array_size()));
}
void materialize_compressed(const IndexMask mask, void *dst) const override
{
varray_.materialize_compressed(mask, MutableSpan((T *)dst, mask.size()));
}
void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const override
{
varray_.materialize_compressed_to_uninitialized(mask, MutableSpan((T *)dst, mask.size()));
}
bool try_assign_VArray(void *varray) const override
{
*(VArray<T> *)varray = varray_;
@ -400,6 +416,27 @@ template<typename T> class VArrayImpl_For_GVArray : public VArrayImpl<T> {
{
return varray_.may_have_ownership();
}
void materialize(IndexMask mask, MutableSpan<T> r_span) const override
{
varray_.materialize(mask, r_span.data());
}
void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override
{
varray_.materialize_to_uninitialized(mask, r_span.data());
}
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
{
varray_.materialize_compressed(mask, r_span.data());
}
void materialize_compressed_to_uninitialized(IndexMask mask,
MutableSpan<T> r_span) const override
{
varray_.materialize_compressed_to_uninitialized(mask, r_span.data());
}
};
/* Used to convert any typed virtual mutable array into a generic one. */
@ -479,6 +516,16 @@ template<typename T> class GVMutableArrayImpl_For_VMutableArray : public GVMutab
varray_.materialize_to_uninitialized(mask, MutableSpan((T *)dst, mask.min_array_size()));
}
void materialize_compressed(const IndexMask mask, void *dst) const override
{
varray_.materialize_compressed(mask, MutableSpan((T *)dst, mask.size()));
}
void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const override
{
varray_.materialize_compressed_to_uninitialized(mask, MutableSpan((T *)dst, mask.size()));
}
bool try_assign_VArray(void *varray) const override
{
*(VArray<T> *)varray = varray_;
@ -561,6 +608,27 @@ template<typename T> class VMutableArrayImpl_For_GVMutableArray : public VMutabl
{
return varray_.may_have_ownership();
}
void materialize(IndexMask mask, MutableSpan<T> r_span) const override
{
varray_.materialize(mask, r_span.data());
}
void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override
{
varray_.materialize_to_uninitialized(mask, r_span.data());
}
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
{
varray_.materialize_compressed(mask, r_span.data());
}
void materialize_compressed_to_uninitialized(IndexMask mask,
MutableSpan<T> r_span) const override
{
varray_.materialize_compressed_to_uninitialized(mask, r_span.data());
}
};
/** \} */
@ -590,6 +658,13 @@ class GVArrayImpl_For_GSpan : public GVMutableArrayImpl {
bool is_span() const override;
GSpan get_internal_span() const override;
virtual void materialize(const IndexMask mask, void *dst) const override;
virtual void materialize_to_uninitialized(const IndexMask mask, void *dst) const override;
virtual void materialize_compressed(const IndexMask mask, void *dst) const override;
virtual void materialize_compressed_to_uninitialized(const IndexMask mask,
void *dst) const override;
};
/** \} */

View File

@ -107,7 +107,7 @@ template<typename T> class VArrayImpl {
/**
* Copy values from the virtual array into the provided span. The index of the value in the
* virtual is the same as the index in the span.
* virtual array is the same as the index in the span.
*/
virtual void materialize(IndexMask mask, MutableSpan<T> r_span) const
{
@ -146,6 +146,35 @@ template<typename T> class VArrayImpl {
}
}
/**
* Copy values from the virtual array into the provided span. Contrary to #materialize, the index
* in virtual array is not the same as the index in the output span. Instead, the span is filled
* without gaps.
*/
virtual void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const
{
BLI_assert(mask.size() == r_span.size());
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
r_span[i] = this->get(best_mask[i]);
}
});
}
/**
* Same as #materialize_compressed but #r_span is expected to be uninitialized.
*/
virtual void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
{
BLI_assert(mask.size() == r_span.size());
T *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
new (dst + i) T(this->get(best_mask[i]));
}
});
}
/**
* If this virtual wraps another #GVArray, this method should assign the wrapped array to the
* provided reference. This allows losslessly converting between generic and typed virtual
@ -265,6 +294,25 @@ template<typename T> class VArrayImpl_For_Span : public VMutableArrayImpl<T> {
const Span<T> other_span = other.get_internal_span();
return data_ == other_span.data();
}
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const
{
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
r_span[i] = data_[best_mask[i]];
}
});
}
void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
{
T *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
new (dst + i) T(data_[best_mask[i]]);
}
});
}
};
/**
@ -341,6 +389,20 @@ template<typename T> class VArrayImpl_For_Single final : public VArrayImpl<T> {
{
return value_;
}
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
{
BLI_assert(mask.size() == r_span.size());
UNUSED_VARS_NDEBUG(mask);
r_span.fill(value_);
}
void materialize_compressed_to_uninitialized(IndexMask mask,
MutableSpan<T> r_span) const override
{
BLI_assert(mask.size() == r_span.size());
uninitialized_fill_n(r_span.data(), mask.size(), value_);
}
};
/**
@ -374,6 +436,29 @@ template<typename T, typename GetFunc> class VArrayImpl_For_Func final : public
T *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { new (dst + i) T(get_func_(i)); });
}
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
{
BLI_assert(mask.size() == r_span.size());
T *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
dst[i] = get_func_(best_mask[i]);
}
});
}
void materialize_compressed_to_uninitialized(IndexMask mask,
MutableSpan<T> r_span) const override
{
BLI_assert(mask.size() == r_span.size());
T *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
new (dst + i) T(get_func_(best_mask[i]));
}
});
}
};
/**
@ -422,6 +507,29 @@ class VArrayImpl_For_DerivedSpan final : public VMutableArrayImpl<ElemT> {
mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
}
void materialize_compressed(IndexMask mask, MutableSpan<ElemT> r_span) const override
{
BLI_assert(mask.size() == r_span.size());
ElemT *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
dst[i] = GetFunc(data_[best_mask[i]]);
}
});
}
void materialize_compressed_to_uninitialized(IndexMask mask,
MutableSpan<ElemT> r_span) const override
{
BLI_assert(mask.size() == r_span.size());
ElemT *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
new (dst + i) ElemT(GetFunc(data_[best_mask[i]]));
}
});
}
bool may_have_ownership() const override
{
return false;
@ -740,6 +848,17 @@ template<typename T> class VArrayCommon {
impl_->materialize_to_uninitialized(mask, r_span);
}
/** Copy some elements of the virtual array into a span. */
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const
{
impl_->materialize_compressed(mask, r_span);
}
void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
{
impl_->materialize_compressed_to_uninitialized(mask, r_span);
}
/** See #GVArrayImpl::try_assign_GVArray. */
bool try_assign_GVArray(GVArray &varray) const
{

View File

@ -24,6 +24,22 @@ void GVArrayImpl::materialize_to_uninitialized(const IndexMask mask, void *dst)
}
}
void GVArrayImpl::materialize_compressed(IndexMask mask, void *dst) const
{
for (const int64_t i : mask.index_range()) {
void *elem_dst = POINTER_OFFSET(dst, type_->size() * i);
this->get(mask[i], elem_dst);
}
}
void GVArrayImpl::materialize_compressed_to_uninitialized(IndexMask mask, void *dst) const
{
for (const int64_t i : mask.index_range()) {
void *elem_dst = POINTER_OFFSET(dst, type_->size() * i);
this->get_to_uninitialized(mask[i], elem_dst);
}
}
void GVArrayImpl::get(const int64_t index, void *r_value) const
{
type_->destruct(r_value);
@ -172,6 +188,27 @@ GSpan GVArrayImpl_For_GSpan::get_internal_span() const
return GSpan(*type_, data_, size_);
}
void GVArrayImpl_For_GSpan::materialize(const IndexMask mask, void *dst) const
{
type_->copy_assign_indices(data_, dst, mask);
}
void GVArrayImpl_For_GSpan::materialize_to_uninitialized(const IndexMask mask, void *dst) const
{
type_->copy_construct_indices(data_, dst, mask);
}
void GVArrayImpl_For_GSpan::materialize_compressed(const IndexMask mask, void *dst) const
{
type_->copy_assign_compressed(data_, dst, mask);
}
void GVArrayImpl_For_GSpan::materialize_compressed_to_uninitialized(const IndexMask mask,
void *dst) const
{
type_->copy_construct_compressed(data_, dst, mask);
}
class GVArrayImpl_For_GSpan_final final : public GVArrayImpl_For_GSpan {
public:
using GVArrayImpl_For_GSpan::GVArrayImpl_For_GSpan;
@ -231,6 +268,26 @@ class GVArrayImpl_For_SingleValueRef : public GVArrayImpl {
{
type_->copy_assign(value_, r_value);
}
void materialize(const IndexMask mask, void *dst) const override
{
type_->fill_assign_indices(value_, dst, mask);
}
void materialize_to_uninitialized(const IndexMask mask, void *dst) const override
{
type_->fill_construct_indices(value_, dst, mask);
}
void materialize_compressed(const IndexMask mask, void *dst) const
{
type_->fill_assign_n(value_, dst, mask.size());
}
void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const
{
type_->fill_construct_n(value_, dst, mask.size());
}
};
class GVArrayImpl_For_SingleValueRef_final final : public GVArrayImpl_For_SingleValueRef {
@ -448,6 +505,22 @@ class GVArrayImpl_For_SlicedGVArray : public GVArrayImpl {
{
varray_.get_internal_single(r_value);
}
void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const override
{
if (mask.is_range()) {
const IndexRange mask_range = mask.as_range();
const IndexRange offset_mask_range{mask_range.start() + offset_, mask_range.size()};
varray_.materialize_compressed_to_uninitialized(offset_mask_range, dst);
}
else {
Vector<int64_t, 32> offset_mask_indices(mask.size());
for (const int64_t i : mask.index_range()) {
offset_mask_indices[i] = mask[i] + offset_;
}
varray_.materialize_compressed_to_uninitialized(offset_mask_indices.as_span(), dst);
}
}
};
/** \} */
@ -505,6 +578,16 @@ void GVArrayCommon::materialize_to_uninitialized(const IndexMask mask, void *dst
impl_->materialize_to_uninitialized(mask, dst);
}
void GVArrayCommon::materialize_compressed(IndexMask mask, void *dst) const
{
impl_->materialize_compressed(mask, dst);
}
void GVArrayCommon::materialize_compressed_to_uninitialized(IndexMask mask, void *dst) const
{
impl_->materialize_compressed_to_uninitialized(mask, dst);
}
bool GVArrayCommon::may_have_ownership() const
{
return impl_->may_have_ownership();

View File

@ -180,4 +180,46 @@ TEST(virtual_array, MutableToImmutable)
}
}
TEST(virtual_array, MaterializeCompressed)
{
{
std::array<int, 10> array = {0, 10, 20, 30, 40, 50, 60, 70, 80, 90};
VArray<int> varray = VArray<int>::ForSpan(array);
std::array<int, 3> compressed_array;
varray.materialize_compressed({3, 6, 7}, compressed_array);
EXPECT_EQ(compressed_array[0], 30);
EXPECT_EQ(compressed_array[1], 60);
EXPECT_EQ(compressed_array[2], 70);
varray.materialize_compressed_to_uninitialized({2, 8, 9}, compressed_array);
EXPECT_EQ(compressed_array[0], 20);
EXPECT_EQ(compressed_array[1], 80);
EXPECT_EQ(compressed_array[2], 90);
}
{
VArray<int> varray = VArray<int>::ForSingle(4, 10);
std::array<int, 3> compressed_array;
varray.materialize_compressed({2, 6, 7}, compressed_array);
EXPECT_EQ(compressed_array[0], 4);
EXPECT_EQ(compressed_array[1], 4);
EXPECT_EQ(compressed_array[2], 4);
compressed_array.fill(0);
varray.materialize_compressed_to_uninitialized({0, 1, 2}, compressed_array);
EXPECT_EQ(compressed_array[0], 4);
EXPECT_EQ(compressed_array[1], 4);
EXPECT_EQ(compressed_array[2], 4);
}
{
VArray<int> varray = VArray<int>::ForFunc(10, [](const int64_t i) { return (int)(i * i); });
std::array<int, 3> compressed_array;
varray.materialize_compressed({5, 7, 8}, compressed_array);
EXPECT_EQ(compressed_array[0], 25);
EXPECT_EQ(compressed_array[1], 49);
EXPECT_EQ(compressed_array[2], 64);
varray.materialize_compressed_to_uninitialized({1, 2, 3}, compressed_array);
EXPECT_EQ(compressed_array[0], 1);
EXPECT_EQ(compressed_array[1], 4);
EXPECT_EQ(compressed_array[2], 9);
}
}
} // namespace blender::tests