Cleanup: use C++ style casts in various places

This commit is contained in:
Jacques Lucke 2020-08-07 18:24:59 +02:00
parent 28b1022434
commit c50e5fcc34
50 changed files with 296 additions and 261 deletions

View File

@ -68,7 +68,7 @@ class PersistentDataHandle {
uint64_t hash() const
{
return (uint64_t)handle_;
return static_cast<uint64_t>(handle_);
}
};

View File

@ -295,21 +295,20 @@ using StateTypeMap = blender::Map<std::string, std::unique_ptr<SimulationStateTy
template<typename T>
static void add_state_type(StateTypeMap &map,
const char *name,
void (*init)(T *state),
void (*reset)(T *state),
void (*remove)(T *state),
void (*copy)(const T *src, T *dst))
{
SimulationStateType state_type{
name,
(int)sizeof(T),
BKE_simulation_get_state_type_name<T>(),
static_cast<int>(sizeof(T)),
(StateInitFunction)init,
(StateResetFunction)reset,
(StateRemoveFunction)remove,
(StateCopyFunction)copy,
};
map.add_new(name, std::make_unique<SimulationStateType>(state_type));
map.add_new(state_type.name, std::make_unique<SimulationStateType>(state_type));
}
static StateTypeMap init_state_types()
@ -317,7 +316,6 @@ static StateTypeMap init_state_types()
StateTypeMap map;
add_state_type<ParticleSimulationState>(
map,
SIM_TYPE_NAME_PARTICLE_SIMULATION,
[](ParticleSimulationState *state) { CustomData_reset(&state->attributes); },
[](ParticleSimulationState *state) {
CustomData_free(&state->attributes, state->tot_particles);
@ -337,7 +335,6 @@ static StateTypeMap init_state_types()
add_state_type<ParticleMeshEmitterSimulationState>(
map,
SIM_TYPE_NAME_PARTICLE_MESH_EMITTER,
[](ParticleMeshEmitterSimulationState *UNUSED(state)) {},
[](ParticleMeshEmitterSimulationState *state) { state->last_birth_time = 0.0f; },
[](ParticleMeshEmitterSimulationState *UNUSED(state)) {},

View File

@ -79,19 +79,20 @@ class RawAllocator {
public:
void *allocate(size_t size, size_t alignment, const char *UNUSED(name))
{
BLI_assert(is_power_of_2_i((int)alignment));
BLI_assert(is_power_of_2_i(static_cast<int>(alignment)));
void *ptr = malloc(size + alignment + sizeof(MemHead));
void *used_ptr = (void *)((uintptr_t)POINTER_OFFSET(ptr, alignment + sizeof(MemHead)) &
~((uintptr_t)alignment - 1));
int offset = (int)((intptr_t)used_ptr - (intptr_t)ptr);
BLI_assert(offset >= (int)sizeof(MemHead));
((MemHead *)used_ptr - 1)->offset = (int)offset;
void *used_ptr = reinterpret_cast<void *>(
reinterpret_cast<uintptr_t>(POINTER_OFFSET(ptr, alignment + sizeof(MemHead))) &
~(static_cast<uintptr_t>(alignment) - 1));
int offset = static_cast<int>((intptr_t)used_ptr - (intptr_t)ptr);
BLI_assert(offset >= static_cast<int>(sizeof(MemHead)));
(static_cast<MemHead *>(used_ptr) - 1)->offset = offset;
return used_ptr;
}
void deallocate(void *ptr)
{
MemHead *head = (MemHead *)ptr - 1;
MemHead *head = static_cast<MemHead *>(ptr) - 1;
int offset = -head->offset;
void *actual_pointer = POINTER_OFFSET(ptr, offset);
free(actual_pointer);

View File

@ -176,7 +176,7 @@ class Array {
{
destruct_n(data_, size_);
if (!this->uses_inline_buffer()) {
allocator_.deallocate((void *)data_);
allocator_.deallocate(static_cast<void *>(data_));
}
}
@ -351,7 +351,8 @@ class Array {
T *allocate(int64_t size)
{
return (T *)allocator_.allocate((size_t)size * sizeof(T), alignof(T), AT);
return static_cast<T *>(
allocator_.allocate(static_cast<size_t>(size) * sizeof(T), alignof(T), AT));
}
bool uses_inline_buffer() const

View File

@ -63,10 +63,10 @@ struct Color4f {
uint64_t hash() const
{
uint64_t x1 = *(uint32_t *)&r;
uint64_t x2 = *(uint32_t *)&g;
uint64_t x3 = *(uint32_t *)&b;
uint64_t x4 = *(uint32_t *)&a;
uint64_t x1 = *reinterpret_cast<const uint32_t *>(&r);
uint64_t x2 = *reinterpret_cast<const uint32_t *>(&g);
uint64_t x3 = *reinterpret_cast<const uint32_t *>(&b);
uint64_t x4 = *reinterpret_cast<const uint32_t *>(&a);
return (x1 * 1283591) ^ (x2 * 850177) ^ (x3 * 735391) ^ (x4 * 442319);
}
};
@ -120,8 +120,8 @@ struct Color4b {
uint64_t hash() const
{
return ((uint64_t)r * 1283591) ^ ((uint64_t)g * 850177) ^ ((uint64_t)b * 735391) ^
((uint64_t)a * 442319);
return static_cast<uint64_t>(r * 1283591) ^ static_cast<uint64_t>(g * 850177) ^
static_cast<uint64_t>(b * 735391) ^ static_cast<uint64_t>(a * 442319);
}
};

View File

@ -31,7 +31,7 @@ struct float3 {
{
}
float3(const float (*ptr)[3]) : float3((const float *)ptr)
float3(const float (*ptr)[3]) : float3(static_cast<const float *>(ptr[0]))
{
}
@ -204,9 +204,9 @@ struct float3 {
uint64_t hash() const
{
uint64_t x1 = *(uint32_t *)&x;
uint64_t x2 = *(uint32_t *)&y;
uint64_t x3 = *(uint32_t *)&z;
uint64_t x1 = *reinterpret_cast<const uint32_t *>(&x);
uint64_t x2 = *reinterpret_cast<const uint32_t *>(&y);
uint64_t x3 = *reinterpret_cast<const uint32_t *>(&z);
return (x1 * 435109) ^ (x2 * 380867) ^ (x3 * 1059217);
}

View File

@ -31,18 +31,18 @@ struct float4x4 {
memcpy(values, matrix, sizeof(float) * 16);
}
float4x4(const float matrix[4][4]) : float4x4((float *)matrix)
float4x4(const float matrix[4][4]) : float4x4(static_cast<const float *>(matrix[0]))
{
}
operator float *()
{
return (float *)this;
return &values[0][0];
}
operator const float *() const
{
return (const float *)this;
return &values[0][0];
}
friend float4x4 operator*(const float4x4 &a, const float4x4 &b)
@ -124,8 +124,8 @@ struct float4x4 {
{
uint64_t h = 435109;
for (int i = 0; i < 16; i++) {
float value = ((const float *)this)[i];
h = h * 33 + (*(uint32_t *)&value);
float value = (static_cast<const float *>(values[0]))[i];
h = h * 33 + *reinterpret_cast<const uint32_t *>(&value);
}
return h;
}

View File

@ -110,7 +110,7 @@ template<typename T> struct DefaultHash<const T> {
template<> struct DefaultHash<TYPE> { \
uint64_t operator()(TYPE value) const \
{ \
return (uint64_t)value; \
return static_cast<uint64_t>(value); \
} \
}
@ -135,14 +135,14 @@ TRIVIAL_DEFAULT_INT_HASH(uint64_t);
template<> struct DefaultHash<float> {
uint64_t operator()(float value) const
{
return *(uint32_t *)&value;
return *reinterpret_cast<uint32_t *>(&value);
}
};
template<> struct DefaultHash<bool> {
uint64_t operator()(bool value) const
{
return (uint64_t)(value != false) * 1298191;
return static_cast<uint64_t>((value != false) * 1298191);
}
};
@ -186,8 +186,8 @@ template<> struct DefaultHash<StringRefNull> {
template<typename T> struct DefaultHash<T *> {
uint64_t operator()(const T *value) const
{
uintptr_t ptr = (uintptr_t)value;
uint64_t hash = (uint64_t)(ptr >> 4);
uintptr_t ptr = reinterpret_cast<uintptr_t>(value);
uint64_t hash = static_cast<uint64_t>(ptr >> 4);
return hash;
}
};

View File

@ -56,7 +56,8 @@ inline constexpr int64_t log2_floor_constexpr(const int64_t x)
inline constexpr int64_t log2_ceil_constexpr(const int64_t x)
{
BLI_assert(x >= 0);
return (is_power_of_2_constexpr((int)x)) ? log2_floor_constexpr(x) : log2_floor_constexpr(x) + 1;
return (is_power_of_2_constexpr(static_cast<int>(x))) ? log2_floor_constexpr(x) :
log2_floor_constexpr(x) + 1;
}
inline constexpr int64_t power_of_2_max_constexpr(const int64_t x)
@ -83,14 +84,17 @@ inline constexpr int64_t ceil_division_by_fraction(const int64_t x,
const int64_t numerator,
const int64_t denominator)
{
return (int64_t)ceil_division((uint64_t)x * (uint64_t)denominator, (uint64_t)numerator);
return static_cast<int64_t>(
ceil_division(static_cast<uint64_t>(x) * static_cast<uint64_t>(denominator),
static_cast<uint64_t>(numerator)));
}
inline constexpr int64_t floor_multiplication_with_fraction(const int64_t x,
const int64_t numerator,
const int64_t denominator)
{
return (int64_t)((uint64_t)x * (uint64_t)numerator / (uint64_t)denominator);
return static_cast<int64_t>((static_cast<uint64_t>(x) * static_cast<uint64_t>(numerator) /
static_cast<uint64_t>(denominator)));
}
inline constexpr int64_t total_slot_amount_for_usable_slots(
@ -130,7 +134,7 @@ class LoadFactor {
int64_t *r_total_slots,
int64_t *r_usable_slots) const
{
BLI_assert(is_power_of_2_i((int)min_total_slots));
BLI_assert(is_power_of_2_i(static_cast<int>(min_total_slots)));
int64_t total_slots = this->compute_total_slots(min_usable_slots, numerator_, denominator_);
total_slots = std::max(total_slots, min_total_slots);
@ -297,7 +301,7 @@ class HashTableStats {
removed_amount_ = hash_table.removed_amount();
size_per_element_ = hash_table.size_per_element();
size_in_bytes_ = hash_table.size_in_bytes();
address_ = (const void *)&hash_table;
address_ = static_cast<const void *>(&hash_table);
for (const auto &key : keys) {
int64_t collisions = hash_table.count_collisions(key);

View File

@ -82,7 +82,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
if (potential_allocation_end <= current_end_) {
current_begin_ = potential_allocation_end;
return (void *)potential_allocation_begin;
return reinterpret_cast<void *>(potential_allocation_begin);
}
else {
this->allocate_new_buffer(size + alignment);
@ -97,7 +97,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
*/
template<typename T> T *allocate()
{
return (T *)this->allocate(sizeof(T), alignof(T));
return static_cast<T *>(this->allocate(sizeof(T), alignof(T)));
}
/**
@ -107,7 +107,8 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
*/
template<typename T> MutableSpan<T> allocate_array(int64_t size)
{
return MutableSpan<T>((T *)this->allocate(sizeof(T) * size, alignof(T)), size);
T *array = static_cast<T *>(this->allocate(sizeof(T) * size, alignof(T)));
return MutableSpan<T>(array, size);
}
/**
@ -142,9 +143,9 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
StringRefNull copy_string(StringRef str)
{
const int64_t alloc_size = str.size() + 1;
char *buffer = (char *)this->allocate(alloc_size, 1);
char *buffer = static_cast<char *>(this->allocate(alloc_size, 1));
str.copy(buffer, alloc_size);
return StringRefNull((const char *)buffer);
return StringRefNull(static_cast<const char *>(buffer));
}
MutableSpan<void *> allocate_elements_and_pointer_array(int64_t element_amount,
@ -172,7 +173,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
MutableSpan<T *> pointers = void_pointers.cast<T *>();
for (int64_t i : IndexRange(n)) {
new ((void *)pointers[i]) T(std::forward<Args>(args)...);
new (static_cast<void *>(pointers[i])) T(std::forward<Args>(args)...);
}
return pointers;
@ -184,7 +185,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
*/
void provide_buffer(void *buffer, uint size)
{
unused_borrowed_buffers_.append(Span<char>((char *)buffer, size));
unused_borrowed_buffers_.append(Span<char>(static_cast<char *>(buffer), size));
}
template<size_t Size, size_t Alignment>

View File

@ -80,7 +80,7 @@ template<typename T> class ListBaseWrapper {
Iterator begin() const
{
return Iterator(listbase_, (T *)listbase_->first);
return Iterator(listbase_, static_cast<T *>(listbase_->first));
}
Iterator end() const
@ -92,7 +92,7 @@ template<typename T> class ListBaseWrapper {
{
void *ptr = BLI_findlink(listbase_, index);
BLI_assert(ptr);
return (T *)ptr;
return static_cast<T *>(ptr);
}
int64_t index_of(const T *value) const

View File

@ -824,7 +824,7 @@ class Map {
*/
int64_t size_in_bytes() const
{
return (int64_t)(sizeof(Slot) * slots_.size());
return static_cast<int64_t>(sizeof(Slot) * slots_.size());
}
/**
@ -863,7 +863,7 @@ class Map {
max_load_factor_.compute_total_and_usable_slots(
SlotArray::inline_buffer_capacity(), min_usable_slots, &total_slots, &usable_slots);
BLI_assert(total_slots >= 1);
const uint64_t new_slot_mask = (uint64_t)total_slots - 1;
const uint64_t new_slot_mask = static_cast<uint64_t>(total_slots) - 1;
/**
* Optimize the case when the map was empty beforehand. We can avoid some copies here.
@ -1107,7 +1107,7 @@ class Map {
bool add_overwrite__impl(ForwardKey &&key, ForwardValue &&value, uint64_t hash)
{
auto create_func = [&](Value *ptr) {
new ((void *)ptr) Value(std::forward<ForwardValue>(value));
new (static_cast<void *>(ptr)) Value(std::forward<ForwardValue>(value));
return true;
};
auto modify_func = [&](Value *ptr) {
@ -1185,7 +1185,7 @@ template<typename Key, typename Value> class StdUnorderedMapWrapper {
public:
int64_t size() const
{
return (int64_t)map_.size();
return static_cast<int64_t>(map_.size());
}
bool is_empty() const

View File

@ -84,7 +84,7 @@ template<typename T> void default_construct_n(T *ptr, int64_t n)
int64_t current = 0;
try {
for (; current < n; current++) {
new ((void *)(ptr + current)) T;
new (static_cast<void *>(ptr + current)) T;
}
}
catch (...) {
@ -133,7 +133,7 @@ template<typename T> void uninitialized_copy_n(const T *src, int64_t n, T *dst)
int64_t current = 0;
try {
for (; current < n; current++) {
new ((void *)(dst + current)) T(src[current]);
new (static_cast<void *>(dst + current)) T(src[current]);
}
}
catch (...) {
@ -162,7 +162,7 @@ void uninitialized_convert_n(const From *src, int64_t n, To *dst)
int64_t current = 0;
try {
for (; current < n; current++) {
new ((void *)(dst + current)) To((To)src[current]);
new (static_cast<void *>(dst + current)) To((To)src[current]);
}
}
catch (...) {
@ -211,7 +211,7 @@ template<typename T> void uninitialized_move_n(T *src, int64_t n, T *dst)
int64_t current = 0;
try {
for (; current < n; current++) {
new ((void *)(dst + current)) T(std::move(src[current]));
new (static_cast<void *>(dst + current)) T(std::move(src[current]));
}
}
catch (...) {
@ -298,7 +298,7 @@ template<typename T> void uninitialized_fill_n(T *dst, int64_t n, const T &value
int64_t current = 0;
try {
for (; current < n; current++) {
new ((void *)(dst + current)) T(value);
new (static_cast<void *>(dst + current)) T(value);
}
}
catch (...) {
@ -332,22 +332,22 @@ template<size_t Size, size_t Alignment> class alignas(Alignment) AlignedBuffer {
public:
operator void *()
{
return (void *)buffer_;
return buffer_;
}
operator const void *() const
{
return (void *)buffer_;
return buffer_;
}
void *ptr()
{
return (void *)buffer_;
return buffer_;
}
const void *ptr() const
{
return (const void *)buffer_;
return buffer_;
}
};
@ -363,42 +363,42 @@ template<typename T, int64_t Size = 1> class TypedBuffer {
public:
operator T *()
{
return (T *)&buffer_;
return static_cast<T *>(buffer_.ptr());
}
operator const T *() const
{
return (const T *)&buffer_;
return static_cast<const T *>(buffer_.ptr());
}
T &operator*()
{
return *(T *)&buffer_;
return *static_cast<T *>(buffer_.ptr());
}
const T &operator*() const
{
return *(const T *)&buffer_;
return *static_cast<const T *>(buffer_.ptr());
}
T *ptr()
{
return (T *)&buffer_;
return static_cast<T *>(buffer_.ptr());
}
const T *ptr() const
{
return (const T *)&buffer_;
return static_cast<const T *>(buffer_.ptr());
}
T &ref()
{
return *(T *)&buffer_;
return *static_cast<T *>(buffer_.ptr());
}
const T &ref() const
{
return *(const T *)&buffer_;
return *static_cast<const T *>(buffer_.ptr());
}
};
@ -424,7 +424,7 @@ inline constexpr bool is_convertible_pointer_v =
*/
inline constexpr int64_t default_inline_buffer_capacity(size_t element_size)
{
return ((int64_t)element_size < 100) ? 4 : 0;
return (static_cast<int64_t>(element_size) < 100) ? 4 : 0;
}
} // namespace blender

View File

@ -235,7 +235,7 @@ using DefaultProbingStrategy = PythonProbingStrategy<>;
int64_t linear_offset = 0; \
uint64_t current_hash = probing_strategy.get(); \
do { \
int64_t R_SLOT_INDEX = (int64_t)((current_hash + (uint64_t)linear_offset) & MASK);
int64_t R_SLOT_INDEX = static_cast<int64_t>((current_hash + static_cast<uint64_t>(linear_offset)) & MASK);
#define SLOT_PROBING_END() \
} while (++linear_offset < probing_strategy.linear_steps()); \

View File

@ -44,7 +44,7 @@ class RandomNumberGenerator {
void seed(uint32_t seed)
{
constexpr uint64_t lowseed = 0x330E;
x_ = (((uint64_t)seed) << 16) | lowseed;
x_ = (static_cast<uint64_t>(seed) << 16) | lowseed;
}
void seed_random(uint32_t seed);
@ -52,13 +52,13 @@ class RandomNumberGenerator {
uint32_t get_uint32()
{
this->step();
return (uint32_t)(x_ >> 17);
return static_cast<uint32_t>(x_ >> 17);
}
int32_t get_int32()
{
this->step();
return (int32_t)(x_ >> 17);
return static_cast<int32_t>(x_ >> 17);
}
/**

View File

@ -556,7 +556,7 @@ class Set {
max_load_factor_.compute_total_and_usable_slots(
SlotArray::inline_buffer_capacity(), min_usable_slots, &total_slots, &usable_slots);
BLI_assert(total_slots >= 1);
const uint64_t new_slot_mask = (uint64_t)total_slots - 1;
const uint64_t new_slot_mask = static_cast<uint64_t>(total_slots) - 1;
/**
* Optimize the case when the set was empty beforehand. We can avoid some copies here.
@ -748,7 +748,7 @@ template<typename Key> class StdUnorderedSetWrapper {
public:
int64_t size() const
{
return (int64_t)set_.size();
return static_cast<int64_t>(set_.size());
}
bool is_empty() const

View File

@ -101,7 +101,7 @@ template<typename T> class Span {
}
template<typename U, typename std::enable_if_t<is_convertible_pointer_v<U, T>> * = nullptr>
Span(const U *start, int64_t size) : data_((const T *)start), size_(size)
Span(const U *start, int64_t size) : data_(static_cast<const T *>(start)), size_(size)
{
BLI_assert(size >= 0);
}
@ -117,11 +117,12 @@ template<typename T> class Span {
* Span<int> span = {1, 2, 3, 4};
* call_function_with_array(span);
*/
Span(const std::initializer_list<T> &list) : Span(list.begin(), (int64_t)list.size())
Span(const std::initializer_list<T> &list)
: Span(list.begin(), static_cast<int64_t>(list.size()))
{
}
Span(const std::vector<T> &vector) : Span(vector.data(), (int64_t)vector.size())
Span(const std::vector<T> &vector) : Span(vector.data(), static_cast<int64_t>(vector.size()))
{
}
@ -132,10 +133,9 @@ template<typename T> class Span {
/**
* Support implicit conversions like the ones below:
* Span<T *> -> Span<const T *>
* Span<Derived *> -> Span<Base *>
*/
template<typename U, typename std::enable_if_t<is_convertible_pointer_v<U, T>> * = nullptr>
Span(Span<U> array) : data_((T *)array.data()), size_(array.size())
Span(Span<U> array) : data_(static_cast<const T *>(array.data())), size_(array.size())
{
}
@ -373,7 +373,7 @@ template<typename T> class Span {
{
const int64_t index = this->first_index_try(search_value);
BLI_assert(index >= 0);
return (int64_t)index;
return index;
}
/**

View File

@ -350,11 +350,12 @@ class Stack {
void *buffer = allocator_.allocate(
sizeof(Chunk) + sizeof(T) * new_capacity + alignof(T), alignof(Chunk), AT);
void *chunk_buffer = buffer;
void *data_buffer = (void *)(((uintptr_t)buffer + sizeof(Chunk) + alignof(T) - 1) &
~(alignof(T) - 1));
void *data_buffer = reinterpret_cast<void *>(
(reinterpret_cast<uintptr_t>(buffer) + sizeof(Chunk) + alignof(T) - 1) &
~(alignof(T) - 1));
Chunk *new_chunk = new (chunk_buffer) Chunk();
new_chunk->begin = (T *)data_buffer;
new_chunk->begin = static_cast<T *>(data_buffer);
new_chunk->capacity_end = new_chunk->begin + new_capacity;
new_chunk->above = nullptr;
new_chunk->below = top_chunk_;

View File

@ -167,7 +167,7 @@ class StringRefNull : public StringRefBase {
/**
* Construct a StringRefNull from a null terminated c-string. The pointer must not point to NULL.
*/
StringRefNull(const char *str) : StringRefBase(str, (int64_t)strlen(str))
StringRefNull(const char *str) : StringRefBase(str, static_cast<int64_t>(strlen(str)))
{
BLI_assert(str != NULL);
BLI_assert(data_[size_] == '\0');
@ -179,7 +179,7 @@ class StringRefNull : public StringRefBase {
*/
StringRefNull(const char *str, const int64_t size) : StringRefBase(str, size)
{
BLI_assert((int64_t)strlen(str) == size);
BLI_assert(static_cast<int64_t>(strlen(str)) == size);
}
/**
@ -231,7 +231,7 @@ class StringRef : public StringRefBase {
/**
* Create a StringRef from a null-terminated c-string.
*/
StringRef(const char *str) : StringRefBase(str, str ? (int64_t)strlen(str) : 0)
StringRef(const char *str) : StringRefBase(str, str ? static_cast<int64_t>(strlen(str)) : 0)
{
}
@ -244,7 +244,7 @@ class StringRef : public StringRefBase {
* second point points to a smaller address than the first one.
*/
StringRef(const char *begin, const char *one_after_end)
: StringRefBase(begin, (int64_t)(one_after_end - begin))
: StringRefBase(begin, static_cast<int64_t>(one_after_end - begin))
{
BLI_assert(begin <= one_after_end);
}
@ -253,7 +253,7 @@ class StringRef : public StringRefBase {
* Reference a std::string. Remember that when the std::string is destructed, the StringRef
* will point to uninitialized memory.
*/
StringRef(const std::string &str) : StringRefBase(str.data(), (int64_t)str.size())
StringRef(const std::string &str) : StringRefBase(str.data(), static_cast<int64_t>(str.size()))
{
}

View File

@ -100,7 +100,8 @@ class Vector {
*/
#ifndef NDEBUG
int64_t debug_size_;
# define UPDATE_VECTOR_SIZE(ptr) (ptr)->debug_size_ = (int64_t)((ptr)->end_ - (ptr)->begin_)
# define UPDATE_VECTOR_SIZE(ptr) \
(ptr)->debug_size_ = static_cast<int64_t>((ptr)->end_ - (ptr)->begin_)
#else
# define UPDATE_VECTOR_SIZE(ptr) ((void)0)
#endif
@ -243,7 +244,8 @@ class Vector {
else {
/* Copy from inline buffer to newly allocated buffer. */
const int64_t capacity = size;
begin_ = (T *)allocator_.allocate(sizeof(T) * (size_t)capacity, alignof(T), AT);
begin_ = static_cast<T *>(
allocator_.allocate(sizeof(T) * static_cast<size_t>(capacity), alignof(T), AT));
end_ = begin_ + size;
capacity_end_ = begin_ + capacity;
uninitialized_relocate_n(other.begin_, size, begin_);
@ -578,8 +580,9 @@ class Vector {
*/
int64_t size() const
{
BLI_assert(debug_size_ == (int64_t)(end_ - begin_));
return (int64_t)(end_ - begin_);
const int64_t current_size = static_cast<int64_t>(end_ - begin_);
BLI_assert(debug_size_ == current_size);
return current_size;
}
/**
@ -675,7 +678,7 @@ class Vector {
{
for (const T *current = begin_; current != end_; current++) {
if (*current == value) {
return (int64_t)(current - begin_);
return static_cast<int64_t>(current - begin_);
}
}
return -1;
@ -749,7 +752,7 @@ class Vector {
*/
int64_t capacity() const
{
return (int64_t)(capacity_end_ - begin_);
return static_cast<int64_t>(capacity_end_ - begin_);
}
/**
@ -808,7 +811,8 @@ class Vector {
const int64_t new_capacity = std::max(min_capacity, min_new_capacity);
const int64_t size = this->size();
T *new_array = (T *)allocator_.allocate((size_t)new_capacity * sizeof(T), alignof(T), AT);
T *new_array = static_cast<T *>(
allocator_.allocate(static_cast<size_t>(new_capacity) * sizeof(T), alignof(T), AT));
uninitialized_relocate_n(begin_, size, new_array);
if (!this->is_inline()) {

View File

@ -457,7 +457,7 @@ class VectorSet {
*/
int64_t size_in_bytes() const
{
return (int64_t)(sizeof(Slot) * slots_.size() + sizeof(Key) * usable_slots_);
return static_cast<int64_t>(sizeof(Slot) * slots_.size() + sizeof(Key) * usable_slots_);
}
/**
@ -486,7 +486,7 @@ class VectorSet {
max_load_factor_.compute_total_and_usable_slots(
SlotArray::inline_buffer_capacity(), min_usable_slots, &total_slots, &usable_slots);
BLI_assert(total_slots >= 1);
const uint64_t new_slot_mask = (uint64_t)total_slots - 1;
const uint64_t new_slot_mask = static_cast<uint64_t>(total_slots) - 1;
/* Optimize the case when the set was empty beforehand. We can avoid some copies here. */
if (this->size() == 0) {
@ -722,7 +722,8 @@ class VectorSet {
Key *allocate_keys_array(const int64_t size)
{
return (Key *)slots_.allocator().allocate(sizeof(Key) * (size_t)size, alignof(Key), AT);
return static_cast<Key *>(
slots_.allocator().allocate(sizeof(Key) * static_cast<size_t>(size), alignof(Key), AT));
}
void deallocate_keys_array(Key *keys)

View File

@ -93,7 +93,7 @@ void BLI_rng_srandom(RNG *rng, unsigned int seed)
void BLI_rng_get_char_n(RNG *rng, char *bytes, size_t bytes_len)
{
rng->rng.get_bytes(blender::MutableSpan(bytes, (int64_t)bytes_len));
rng->rng.get_bytes(blender::MutableSpan(bytes, static_cast<int64_t>(bytes_len)));
}
int BLI_rng_get_int(RNG *rng)
@ -428,7 +428,7 @@ float2 RandomNumberGenerator::get_triangle_sample(float2 v1, float2 v2, float2 v
void RandomNumberGenerator::get_bytes(MutableSpan<char> r_bytes)
{
constexpr int64_t mask_bytes = 2;
constexpr int64_t rand_stride = (int64_t)sizeof(x_) - mask_bytes;
constexpr int64_t rand_stride = static_cast<int64_t>(sizeof(x_)) - mask_bytes;
int64_t last_len = 0;
int64_t trim_len = r_bytes.size();

View File

@ -8,7 +8,7 @@ namespace blender::tests {
static bool is_aligned(void *ptr, uint alignment)
{
BLI_assert(is_power_of_2_i((int)alignment));
BLI_assert(is_power_of_2_i(static_cast<int>(alignment)));
return (POINTER_AS_UINT(ptr) & (alignment - 1)) == 0;
}

View File

@ -197,7 +197,7 @@ TEST(span, SizeInBytes)
{
std::array<int, 10> a;
Span<int> a_span(a);
EXPECT_EQ(a_span.size_in_bytes(), (int64_t)sizeof(a));
EXPECT_EQ(a_span.size_in_bytes(), static_cast<int64_t>(sizeof(a)));
EXPECT_EQ(a_span.size_in_bytes(), 40);
}

View File

@ -76,7 +76,7 @@ uint64_t AnimatedPropertyID::hash() const
{
uintptr_t ptr1 = (uintptr_t)data;
uintptr_t ptr2 = (uintptr_t)property_rna;
return (uint64_t)(((ptr1 >> 4) * 33) ^ (ptr2 >> 4));
return static_cast<uint64_t>(((ptr1 >> 4) * 33) ^ (ptr2 >> 4));
}
namespace {

View File

@ -51,7 +51,7 @@ class AttributesInfoBuilder : NonCopyable, NonMovable {
template<typename T> bool add(StringRef name, const T &default_value)
{
return this->add(name, CPPType::get<T>(), (const void *)&default_value);
return this->add(name, CPPType::get<T>(), static_cast<const void *>(&default_value));
}
bool add(StringRef name, const CPPType &type, const void *default_value = nullptr);
@ -107,7 +107,7 @@ class AttributesInfo : NonCopyable, NonMovable {
template<typename T> const T &default_of(int index) const
{
BLI_assert(type_by_index_[index]->is<T>());
return *(T *)defaults_[index];
return *static_cast<T *>(defaults_[index]);
}
template<typename T> const T &default_of(StringRef name) const
@ -203,7 +203,7 @@ class MutableAttributesRef {
template<typename T> MutableSpan<T> get(int index) const
{
BLI_assert(info_->type_of(index).is<T>());
return MutableSpan<T>((T *)buffers_[index] + range_.start(), range_.size());
return MutableSpan<T>(static_cast<T *>(buffers_[index]) + range_.start(), range_.size());
}
template<typename T> MutableSpan<T> get(StringRef name) const
@ -294,7 +294,7 @@ class AttributesRef {
template<typename T> Span<T> get(int index) const
{
BLI_assert(info_->type_of(index).is<T>());
return Span<T>((T *)buffers_[index] + range_.start(), range_.size());
return Span<T>(static_cast<T *>(buffers_[index]) + range_.start(), range_.size());
}
template<typename T> Span<T> get(StringRef name) const

View File

@ -584,35 +584,35 @@ template<typename T> void construct_default_cb(void *ptr)
}
template<typename T> void construct_default_n_cb(void *ptr, int64_t n)
{
blender::default_construct_n((T *)ptr, n);
blender::default_construct_n(static_cast<T *>(ptr), n);
}
template<typename T> void construct_default_indices_cb(void *ptr, IndexMask mask)
{
mask.foreach_index([&](int64_t i) { new ((T *)ptr + i) T; });
mask.foreach_index([&](int64_t i) { new (static_cast<T *>(ptr) + i) T; });
}
template<typename T> void destruct_cb(void *ptr)
{
((T *)ptr)->~T();
(static_cast<T *>(ptr))->~T();
}
template<typename T> void destruct_n_cb(void *ptr, int64_t n)
{
blender::destruct_n((T *)ptr, n);
blender::destruct_n(static_cast<T *>(ptr), n);
}
template<typename T> void destruct_indices_cb(void *ptr, IndexMask mask)
{
T *ptr_ = (T *)ptr;
T *ptr_ = static_cast<T *>(ptr);
mask.foreach_index([&](int64_t i) { ptr_[i].~T(); });
}
template<typename T> void copy_to_initialized_cb(const void *src, void *dst)
{
*(T *)dst = *(T *)src;
*static_cast<T *>(dst) = *static_cast<const T *>(src);
}
template<typename T> void copy_to_initialized_n_cb(const void *src, void *dst, int64_t n)
{
const T *src_ = (const T *)src;
T *dst_ = (T *)dst;
const T *src_ = static_cast<const T *>(src);
T *dst_ = static_cast<T *>(dst);
for (int64_t i = 0; i < n; i++) {
dst_[i] = src_[i];
@ -621,45 +621,45 @@ template<typename T> void copy_to_initialized_n_cb(const void *src, void *dst, i
template<typename T>
void copy_to_initialized_indices_cb(const void *src, void *dst, IndexMask mask)
{
const T *src_ = (const T *)src;
T *dst_ = (T *)dst;
const T *src_ = static_cast<const T *>(src);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index([&](int64_t i) { dst_[i] = src_[i]; });
}
template<typename T> void copy_to_uninitialized_cb(const void *src, void *dst)
{
blender::uninitialized_copy_n((T *)src, 1, (T *)dst);
blender::uninitialized_copy_n(static_cast<const T *>(src), 1, static_cast<T *>(dst));
}
template<typename T> void copy_to_uninitialized_n_cb(const void *src, void *dst, int64_t n)
{
blender::uninitialized_copy_n((T *)src, n, (T *)dst);
blender::uninitialized_copy_n(static_cast<const T *>(src), n, static_cast<T *>(dst));
}
template<typename T>
void copy_to_uninitialized_indices_cb(const void *src, void *dst, IndexMask mask)
{
const T *src_ = (const T *)src;
T *dst_ = (T *)dst;
const T *src_ = static_cast<const T *>(src);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index([&](int64_t i) { new (dst_ + i) T(src_[i]); });
}
template<typename T> void relocate_to_initialized_cb(void *src, void *dst)
{
T *src_ = (T *)src;
T *dst_ = (T *)dst;
T *src_ = static_cast<T *>(src);
T *dst_ = static_cast<T *>(dst);
*dst_ = std::move(*src_);
src_->~T();
}
template<typename T> void relocate_to_initialized_n_cb(void *src, void *dst, int64_t n)
{
blender::initialized_relocate_n((T *)src, n, (T *)dst);
blender::initialized_relocate_n(static_cast<T *>(src), n, static_cast<T *>(dst));
}
template<typename T> void relocate_to_initialized_indices_cb(void *src, void *dst, IndexMask mask)
{
T *src_ = (T *)src;
T *dst_ = (T *)dst;
T *src_ = static_cast<T *>(src);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index([&](int64_t i) {
dst_[i] = std::move(src_[i]);
@ -669,21 +669,21 @@ template<typename T> void relocate_to_initialized_indices_cb(void *src, void *ds
template<typename T> void relocate_to_uninitialized_cb(void *src, void *dst)
{
T *src_ = (T *)src;
T *dst_ = (T *)dst;
T *src_ = static_cast<T *>(src);
T *dst_ = static_cast<T *>(dst);
new (dst_) T(std::move(*src_));
src_->~T();
}
template<typename T> void relocate_to_uninitialized_n_cb(void *src, void *dst, int64_t n)
{
blender::uninitialized_relocate_n((T *)src, n, (T *)dst);
blender::uninitialized_relocate_n(static_cast<T *>(src), n, static_cast<T *>(dst));
}
template<typename T>
void relocate_to_uninitialized_indices_cb(void *src, void *dst, IndexMask mask)
{
T *src_ = (T *)src;
T *dst_ = (T *)dst;
T *src_ = static_cast<T *>(src);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index([&](int64_t i) {
new (dst_ + i) T(std::move(src_[i]));
@ -693,8 +693,8 @@ void relocate_to_uninitialized_indices_cb(void *src, void *dst, IndexMask mask)
template<typename T> void fill_initialized_cb(const void *value, void *dst, int64_t n)
{
const T &value_ = *(const T *)value;
T *dst_ = (T *)dst;
const T &value_ = *static_cast<const T *>(value);
T *dst_ = static_cast<T *>(dst);
for (int64_t i = 0; i < n; i++) {
dst_[i] = value_;
@ -702,16 +702,16 @@ template<typename T> void fill_initialized_cb(const void *value, void *dst, int6
}
template<typename T> void fill_initialized_indices_cb(const void *value, void *dst, IndexMask mask)
{
const T &value_ = *(const T *)value;
T *dst_ = (T *)dst;
const T &value_ = *static_cast<const T *>(value);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index([&](int64_t i) { dst_[i] = value_; });
}
template<typename T> void fill_uninitialized_cb(const void *value, void *dst, int64_t n)
{
const T &value_ = *(const T *)value;
T *dst_ = (T *)dst;
const T &value_ = *static_cast<const T *>(value);
T *dst_ = static_cast<T *>(dst);
for (int64_t i = 0; i < n; i++) {
new (dst_ + i) T(value_);
@ -720,28 +720,28 @@ template<typename T> void fill_uninitialized_cb(const void *value, void *dst, in
template<typename T>
void fill_uninitialized_indices_cb(const void *value, void *dst, IndexMask mask)
{
const T &value_ = *(const T *)value;
T *dst_ = (T *)dst;
const T &value_ = *static_cast<const T *>(value);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index([&](int64_t i) { new (dst_ + i) T(value_); });
}
template<typename T> void debug_print_cb(const void *value, std::stringstream &ss)
{
const T &value_ = *(const T *)value;
const T &value_ = *static_cast<const T *>(value);
ss << value_;
}
template<typename T> bool is_equal_cb(const void *a, const void *b)
{
const T &a_ = *(T *)a;
const T &b_ = *(T *)b;
const T &a_ = *static_cast<const T *>(a);
const T &b_ = *static_cast<const T *>(b);
return a_ == b_;
}
template<typename T> uint64_t hash_cb(const void *value)
{
const T &value_ = *(const T *)value;
const T &value_ = *static_cast<const T *>(value);
return DefaultHash<T>{}(value_);
}
@ -780,7 +780,7 @@ inline std::unique_ptr<const CPPType> create_cpp_type(StringRef name, const T &d
debug_print_cb<T>,
is_equal_cb<T>,
hash_cb<T>,
(const void *)&default_value);
static_cast<const void *>(&default_value));
return std::unique_ptr<const CPPType>(type);
}

View File

@ -186,7 +186,8 @@ template<typename T> class GVectorArrayRef {
MutableSpan<T> operator[](int64_t index)
{
BLI_assert(index < vector_array_->starts_.size());
return MutableSpan<T>((T *)vector_array_->starts_[index], vector_array_->lengths_[index]);
return MutableSpan<T>(static_cast<T *>(vector_array_->starts_[index]),
vector_array_->lengths_[index]);
}
int64_t size() const

View File

@ -58,7 +58,8 @@ template<typename In1, typename Out1> class CustomMF_SI_SO : public MultiFunctio
template<typename ElementFuncT> static FunctionT create_function(ElementFuncT element_fn)
{
return [=](IndexMask mask, VSpan<In1> in1, MutableSpan<Out1> out1) {
mask.foreach_index([&](int i) { new ((void *)&out1[i]) Out1(element_fn(in1[i])); });
mask.foreach_index(
[&](int i) { new (static_cast<void *>(&out1[i])) Out1(element_fn(in1[i])); });
};
}
@ -100,7 +101,8 @@ class CustomMF_SI_SI_SO : public MultiFunction {
template<typename ElementFuncT> static FunctionT create_function(ElementFuncT element_fn)
{
return [=](IndexMask mask, VSpan<In1> in1, VSpan<In2> in2, MutableSpan<Out1> out1) {
mask.foreach_index([&](int i) { new ((void *)&out1[i]) Out1(element_fn(in1[i], in2[i])); });
mask.foreach_index(
[&](int i) { new (static_cast<void *>(&out1[i])) Out1(element_fn(in1[i], in2[i])); });
};
}
@ -150,8 +152,9 @@ class CustomMF_SI_SI_SI_SO : public MultiFunction {
VSpan<In2> in2,
VSpan<In3> in3,
MutableSpan<Out1> out1) {
mask.foreach_index(
[&](int i) { new ((void *)&out1[i]) Out1(element_fn(in1[i], in2[i], in3[i])); });
mask.foreach_index([&](int i) {
new (static_cast<void *>(&out1[i])) Out1(element_fn(in1[i], in2[i], in3[i]));
});
};
}
@ -220,7 +223,7 @@ template<typename From, typename To> class CustomMF_Convert : public MultiFuncti
MutableSpan<To> outputs = params.uninitialized_single_output<To>(1);
for (int64_t i : mask) {
new ((void *)&outputs[i]) To(inputs[i]);
new (static_cast<void *>(&outputs[i])) To(inputs[i]);
}
}
};
@ -294,7 +297,7 @@ template<typename T> class CustomMF_Constant : public MultiFunction {
if (other2 != nullptr) {
const CPPType &type = CPPType::get<T>();
if (type == other2->type_) {
return type.is_equal((const void *)&value_, other2->value_);
return type.is_equal(static_cast<const void *>(&value_), other2->value_);
}
}
return false;

View File

@ -43,7 +43,7 @@ class MFContextBuilder {
public:
template<typename T> void add_global_context(std::string name, const T *context)
{
global_contexts_.add_new(std::move(name), (const void *)context);
global_contexts_.add_new(std::move(name), static_cast<const void *>(context));
}
};
@ -60,7 +60,7 @@ class MFContext {
{
const void *context = builder_.global_contexts_.lookup_default_as(name, nullptr);
/* TODO: Implement type checking. */
return (const T *)context;
return static_cast<const T *>(context);
}
};

View File

@ -110,7 +110,7 @@ class MFDataType {
uint64_t hash() const
{
return DefaultHash<CPPType>{}(*type_) + (uint64_t)category_;
return DefaultHash<CPPType>{}(*type_) + static_cast<uint64_t>(category_);
}
};

View File

@ -118,7 +118,7 @@ class MFFunctionNode : public MFNode {
};
class MFDummyNode : public MFNode {
private:
protected:
StringRefNull name_;
MutableSpan<StringRefNull> input_names_;
MutableSpan<StringRefNull> output_names_;
@ -279,25 +279,25 @@ inline bool MFNode::is_function() const
inline MFDummyNode &MFNode::as_dummy()
{
BLI_assert(is_dummy_);
return *(MFDummyNode *)this;
return static_cast<MFDummyNode &>(*this);
}
inline const MFDummyNode &MFNode::as_dummy() const
{
BLI_assert(is_dummy_);
return *(const MFDummyNode *)this;
return static_cast<const MFDummyNode &>(*this);
}
inline MFFunctionNode &MFNode::as_function()
{
BLI_assert(!is_dummy_);
return *(MFFunctionNode *)this;
return static_cast<MFFunctionNode &>(*this);
}
inline const MFFunctionNode &MFNode::as_function() const
{
BLI_assert(!is_dummy_);
return *(const MFFunctionNode *)this;
return static_cast<const MFFunctionNode &>(*this);
}
inline MFInputSocket &MFNode::input(int index)
@ -440,25 +440,25 @@ inline bool MFSocket::is_output() const
inline MFInputSocket &MFSocket::as_input()
{
BLI_assert(this->is_input());
return *(MFInputSocket *)this;
return static_cast<MFInputSocket &>(*this);
}
inline const MFInputSocket &MFSocket::as_input() const
{
BLI_assert(this->is_input());
return *(const MFInputSocket *)this;
return static_cast<const MFInputSocket &>(*this);
}
inline MFOutputSocket &MFSocket::as_output()
{
BLI_assert(this->is_output());
return *(MFOutputSocket *)this;
return static_cast<MFOutputSocket &>(*this);
}
inline const MFOutputSocket &MFSocket::as_output() const
{
BLI_assert(this->is_output());
return *(const MFOutputSocket *)this;
return static_cast<const MFOutputSocket &>(*this);
}
/* --------------------------------------------------------------------

View File

@ -67,7 +67,8 @@ class GSpan {
}
template<typename T>
GSpan(Span<T> array) : GSpan(CPPType::get<T>(), (const void *)array.data(), array.size())
GSpan(Span<T> array)
: GSpan(CPPType::get<T>(), static_cast<const void *>(array.data()), array.size())
{
}
@ -100,7 +101,7 @@ class GSpan {
template<typename T> Span<T> typed() const
{
BLI_assert(type_->is<T>());
return Span<T>((const T *)data_, size_);
return Span<T>(static_cast<const T *>(data_), size_);
}
};
@ -129,7 +130,7 @@ class GMutableSpan {
template<typename T>
GMutableSpan(MutableSpan<T> array)
: GMutableSpan(CPPType::get<T>(), (void *)array.begin(), array.size())
: GMutableSpan(CPPType::get<T>(), static_cast<void *>(array.begin()), array.size())
{
}
@ -167,7 +168,7 @@ class GMutableSpan {
template<typename T> MutableSpan<T> typed()
{
BLI_assert(type_->is<T>());
return MutableSpan<T>((T *)data_, size_);
return MutableSpan<T>(static_cast<T *>(data_), size_);
}
};

View File

@ -285,8 +285,8 @@ std::string MFNetwork::to_dot(Span<const MFNode *> marked_nodes) const
Map<const MFNode *, dot::NodeWithSocketsRef> dot_nodes;
Vector<const MFNode *> all_nodes;
all_nodes.extend(function_nodes_.as_span());
all_nodes.extend(dummy_nodes_.as_span());
all_nodes.extend(function_nodes_.as_span().cast<const MFNode *>());
all_nodes.extend(dummy_nodes_.as_span().cast<const MFNode *>());
for (const MFNode *node : all_nodes) {
dot::Node &dot_node = digraph.new_node("");

View File

@ -520,7 +520,7 @@ MFNetworkEvaluationStorage::~MFNetworkEvaluationStorage()
continue;
}
if (any_value->type == ValueType::OwnSingle) {
OwnSingleValue *value = (OwnSingleValue *)any_value;
OwnSingleValue *value = static_cast<OwnSingleValue *>(any_value);
GMutableSpan span = value->span;
const CPPType &type = span.type();
if (value->is_single_allocated) {
@ -532,7 +532,7 @@ MFNetworkEvaluationStorage::~MFNetworkEvaluationStorage()
}
}
else if (any_value->type == ValueType::OwnVector) {
OwnVectorValue *value = (OwnVectorValue *)any_value;
OwnVectorValue *value = static_cast<OwnVectorValue *>(any_value);
delete value->vector_array;
}
}
@ -550,7 +550,7 @@ bool MFNetworkEvaluationStorage::socket_is_computed(const MFOutputSocket &socket
return false;
}
if (ELEM(any_value->type, ValueType::OutputSingle, ValueType::OutputVector)) {
return ((OutputValue *)any_value)->is_computed;
return static_cast<OutputValue *>(any_value)->is_computed;
}
return true;
}
@ -560,17 +560,17 @@ bool MFNetworkEvaluationStorage::is_same_value_for_every_index(const MFOutputSoc
Value *any_value = value_per_output_id_[socket.id()];
switch (any_value->type) {
case ValueType::OwnSingle:
return ((OwnSingleValue *)any_value)->span.size() == 1;
return static_cast<OwnSingleValue *>(any_value)->span.size() == 1;
case ValueType::OwnVector:
return ((OwnVectorValue *)any_value)->vector_array->size() == 1;
return static_cast<OwnVectorValue *>(any_value)->vector_array->size() == 1;
case ValueType::InputSingle:
return ((InputSingleValue *)any_value)->virtual_span.is_single_element();
return static_cast<InputSingleValue *>(any_value)->virtual_span.is_single_element();
case ValueType::InputVector:
return ((InputVectorValue *)any_value)->virtual_array_span.is_single_array();
return static_cast<InputVectorValue *>(any_value)->virtual_array_span.is_single_array();
case ValueType::OutputSingle:
return ((OutputSingleValue *)any_value)->span.size() == 1;
return static_cast<OutputSingleValue *>(any_value)->span.size() == 1;
case ValueType::OutputVector:
return ((OutputVectorValue *)any_value)->vector_array->size() == 1;
return static_cast<OutputVectorValue *>(any_value)->vector_array->size() == 1;
}
BLI_assert(false);
return false;
@ -605,7 +605,7 @@ void MFNetworkEvaluationStorage::finish_output_socket(const MFOutputSocket &sock
}
if (ELEM(any_value->type, ValueType::OutputSingle, ValueType::OutputVector)) {
((OutputValue *)any_value)->is_computed = true;
static_cast<OutputValue *>(any_value)->is_computed = true;
}
}
@ -627,7 +627,7 @@ void MFNetworkEvaluationStorage::finish_input_socket(const MFInputSocket &socket
break;
}
case ValueType::OwnSingle: {
OwnSingleValue *value = (OwnSingleValue *)any_value;
OwnSingleValue *value = static_cast<OwnSingleValue *>(any_value);
BLI_assert(value->max_remaining_users >= 1);
value->max_remaining_users--;
if (value->max_remaining_users == 0) {
@ -645,7 +645,7 @@ void MFNetworkEvaluationStorage::finish_input_socket(const MFInputSocket &socket
break;
}
case ValueType::OwnVector: {
OwnVectorValue *value = (OwnVectorValue *)any_value;
OwnVectorValue *value = static_cast<OwnVectorValue *>(any_value);
BLI_assert(value->max_remaining_users >= 1);
value->max_remaining_users--;
if (value->max_remaining_users == 0) {
@ -712,7 +712,7 @@ GMutableSpan MFNetworkEvaluationStorage::get_single_output__full(const MFOutputS
}
BLI_assert(any_value->type == ValueType::OutputSingle);
return ((OutputSingleValue *)any_value)->span;
return static_cast<OutputSingleValue *>(any_value)->span;
}
GMutableSpan MFNetworkEvaluationStorage::get_single_output__single(const MFOutputSocket &socket)
@ -730,7 +730,7 @@ GMutableSpan MFNetworkEvaluationStorage::get_single_output__single(const MFOutpu
}
BLI_assert(any_value->type == ValueType::OutputSingle);
GMutableSpan span = ((OutputSingleValue *)any_value)->span;
GMutableSpan span = static_cast<OutputSingleValue *>(any_value)->span;
BLI_assert(span.size() == 1);
return span;
}
@ -749,7 +749,7 @@ GVectorArray &MFNetworkEvaluationStorage::get_vector_output__full(const MFOutput
}
BLI_assert(any_value->type == ValueType::OutputVector);
return *((OutputVectorValue *)any_value)->vector_array;
return *static_cast<OutputVectorValue *>(any_value)->vector_array;
}
GVectorArray &MFNetworkEvaluationStorage::get_vector_output__single(const MFOutputSocket &socket)
@ -766,7 +766,7 @@ GVectorArray &MFNetworkEvaluationStorage::get_vector_output__single(const MFOutp
}
BLI_assert(any_value->type == ValueType::OutputVector);
GVectorArray &vector_array = *((OutputVectorValue *)any_value)->vector_array;
GVectorArray &vector_array = *static_cast<OutputVectorValue *>(any_value)->vector_array;
BLI_assert(vector_array.size() == 1);
return vector_array;
}
@ -785,14 +785,14 @@ GMutableSpan MFNetworkEvaluationStorage::get_mutable_single__full(const MFInputS
if (to_any_value != nullptr) {
BLI_assert(to_any_value->type == ValueType::OutputSingle);
GMutableSpan span = ((OutputSingleValue *)to_any_value)->span;
GMutableSpan span = static_cast<OutputSingleValue *>(to_any_value)->span;
GVSpan virtual_span = this->get_single_input__full(input);
virtual_span.materialize_to_uninitialized(mask_, span.data());
return span;
}
if (from_any_value->type == ValueType::OwnSingle) {
OwnSingleValue *value = (OwnSingleValue *)from_any_value;
OwnSingleValue *value = static_cast<OwnSingleValue *>(from_any_value);
if (value->max_remaining_users == 1 && !value->is_single_allocated) {
value_per_output_id_[to.id()] = value;
value_per_output_id_[from.id()] = nullptr;
@ -826,7 +826,7 @@ GMutableSpan MFNetworkEvaluationStorage::get_mutable_single__single(const MFInpu
if (to_any_value != nullptr) {
BLI_assert(to_any_value->type == ValueType::OutputSingle);
GMutableSpan span = ((OutputSingleValue *)to_any_value)->span;
GMutableSpan span = static_cast<OutputSingleValue *>(to_any_value)->span;
BLI_assert(span.size() == 1);
GVSpan virtual_span = this->get_single_input__single(input);
type.copy_to_uninitialized(virtual_span.as_single_element(), span[0]);
@ -834,7 +834,7 @@ GMutableSpan MFNetworkEvaluationStorage::get_mutable_single__single(const MFInpu
}
if (from_any_value->type == ValueType::OwnSingle) {
OwnSingleValue *value = (OwnSingleValue *)from_any_value;
OwnSingleValue *value = static_cast<OwnSingleValue *>(from_any_value);
if (value->max_remaining_users == 1) {
value_per_output_id_[to.id()] = value;
value_per_output_id_[from.id()] = nullptr;
@ -870,14 +870,14 @@ GVectorArray &MFNetworkEvaluationStorage::get_mutable_vector__full(const MFInput
if (to_any_value != nullptr) {
BLI_assert(to_any_value->type == ValueType::OutputVector);
GVectorArray &vector_array = *((OutputVectorValue *)to_any_value)->vector_array;
GVectorArray &vector_array = *static_cast<OutputVectorValue *>(to_any_value)->vector_array;
GVArraySpan virtual_array_span = this->get_vector_input__full(input);
vector_array.extend(mask_, virtual_array_span);
return vector_array;
}
if (from_any_value->type == ValueType::OwnVector) {
OwnVectorValue *value = (OwnVectorValue *)from_any_value;
OwnVectorValue *value = static_cast<OwnVectorValue *>(from_any_value);
if (value->max_remaining_users == 1) {
value_per_output_id_[to.id()] = value;
value_per_output_id_[from.id()] = nullptr;
@ -912,7 +912,7 @@ GVectorArray &MFNetworkEvaluationStorage::get_mutable_vector__single(const MFInp
if (to_any_value != nullptr) {
BLI_assert(to_any_value->type == ValueType::OutputVector);
GVectorArray &vector_array = *((OutputVectorValue *)to_any_value)->vector_array;
GVectorArray &vector_array = *static_cast<OutputVectorValue *>(to_any_value)->vector_array;
BLI_assert(vector_array.size() == 1);
GVArraySpan virtual_array_span = this->get_vector_input__single(input);
vector_array.extend(0, virtual_array_span[0]);
@ -920,7 +920,7 @@ GVectorArray &MFNetworkEvaluationStorage::get_mutable_vector__single(const MFInp
}
if (from_any_value->type == ValueType::OwnVector) {
OwnVectorValue *value = (OwnVectorValue *)from_any_value;
OwnVectorValue *value = static_cast<OwnVectorValue *>(from_any_value);
if (value->max_remaining_users == 1) {
value_per_output_id_[to.id()] = value;
value_per_output_id_[from.id()] = nullptr;
@ -947,7 +947,7 @@ GVSpan MFNetworkEvaluationStorage::get_single_input__full(const MFInputSocket &s
BLI_assert(any_value != nullptr);
if (any_value->type == ValueType::OwnSingle) {
OwnSingleValue *value = (OwnSingleValue *)any_value;
OwnSingleValue *value = static_cast<OwnSingleValue *>(any_value);
if (value->is_single_allocated) {
return GVSpan::FromSingle(value->span.type(), value->span.data(), min_array_size_);
}
@ -955,11 +955,11 @@ GVSpan MFNetworkEvaluationStorage::get_single_input__full(const MFInputSocket &s
return value->span;
}
if (any_value->type == ValueType::InputSingle) {
InputSingleValue *value = (InputSingleValue *)any_value;
InputSingleValue *value = static_cast<InputSingleValue *>(any_value);
return value->virtual_span;
}
if (any_value->type == ValueType::OutputSingle) {
OutputSingleValue *value = (OutputSingleValue *)any_value;
OutputSingleValue *value = static_cast<OutputSingleValue *>(any_value);
BLI_assert(value->is_computed);
return value->span;
}
@ -975,17 +975,17 @@ GVSpan MFNetworkEvaluationStorage::get_single_input__single(const MFInputSocket
BLI_assert(any_value != nullptr);
if (any_value->type == ValueType::OwnSingle) {
OwnSingleValue *value = (OwnSingleValue *)any_value;
OwnSingleValue *value = static_cast<OwnSingleValue *>(any_value);
BLI_assert(value->span.size() == 1);
return value->span;
}
if (any_value->type == ValueType::InputSingle) {
InputSingleValue *value = (InputSingleValue *)any_value;
InputSingleValue *value = static_cast<InputSingleValue *>(any_value);
BLI_assert(value->virtual_span.is_single_element());
return value->virtual_span;
}
if (any_value->type == ValueType::OutputSingle) {
OutputSingleValue *value = (OutputSingleValue *)any_value;
OutputSingleValue *value = static_cast<OutputSingleValue *>(any_value);
BLI_assert(value->is_computed);
BLI_assert(value->span.size() == 1);
return value->span;
@ -1002,7 +1002,7 @@ GVArraySpan MFNetworkEvaluationStorage::get_vector_input__full(const MFInputSock
BLI_assert(any_value != nullptr);
if (any_value->type == ValueType::OwnVector) {
OwnVectorValue *value = (OwnVectorValue *)any_value;
OwnVectorValue *value = static_cast<OwnVectorValue *>(any_value);
if (value->vector_array->size() == 1) {
GSpan span = (*value->vector_array)[0];
return GVArraySpan(span, min_array_size_);
@ -1011,11 +1011,11 @@ GVArraySpan MFNetworkEvaluationStorage::get_vector_input__full(const MFInputSock
return *value->vector_array;
}
if (any_value->type == ValueType::InputVector) {
InputVectorValue *value = (InputVectorValue *)any_value;
InputVectorValue *value = static_cast<InputVectorValue *>(any_value);
return value->virtual_array_span;
}
if (any_value->type == ValueType::OutputVector) {
OutputVectorValue *value = (OutputVectorValue *)any_value;
OutputVectorValue *value = static_cast<OutputVectorValue *>(any_value);
return *value->vector_array;
}
@ -1030,17 +1030,17 @@ GVArraySpan MFNetworkEvaluationStorage::get_vector_input__single(const MFInputSo
BLI_assert(any_value != nullptr);
if (any_value->type == ValueType::OwnVector) {
OwnVectorValue *value = (OwnVectorValue *)any_value;
OwnVectorValue *value = static_cast<OwnVectorValue *>(any_value);
BLI_assert(value->vector_array->size() == 1);
return *value->vector_array;
}
if (any_value->type == ValueType::InputVector) {
InputVectorValue *value = (InputVectorValue *)any_value;
InputVectorValue *value = static_cast<InputVectorValue *>(any_value);
BLI_assert(value->virtual_array_span.is_single_array());
return value->virtual_array_span;
}
if (any_value->type == ValueType::OutputVector) {
OutputVectorValue *value = (OutputVectorValue *)any_value;
OutputVectorValue *value = static_cast<OutputVectorValue *>(any_value);
BLI_assert(value->vector_array->size() == 1);
return *value->vector_array;
}

View File

@ -130,7 +130,8 @@ static Vector<MFNode *> find_nodes_based_on_mask(MFNetwork &network,
*/
void dead_node_removal(MFNetwork &network)
{
Array<bool> node_is_used_mask = mask_nodes_to_the_left(network, network.dummy_nodes());
Array<bool> node_is_used_mask = mask_nodes_to_the_left(network,
network.dummy_nodes().cast<MFNode *>());
Vector<MFNode *> nodes_to_remove = find_nodes_based_on_mask(network, node_is_used_mask, false);
network.remove(nodes_to_remove);
}
@ -156,7 +157,7 @@ static bool function_node_can_be_constant(MFFunctionNode *node)
static Vector<MFNode *> find_non_constant_nodes(MFNetwork &network)
{
Vector<MFNode *> non_constant_nodes;
non_constant_nodes.extend(network.dummy_nodes());
non_constant_nodes.extend(network.dummy_nodes().cast<MFNode *>());
for (MFFunctionNode *node : network.function_nodes()) {
if (!function_node_can_be_constant(node)) {
@ -299,6 +300,10 @@ static Array<MFOutputSocket *> compute_constant_sockets_and_add_folded_nodes(
return add_constant_folded_sockets(network_fn, params, resources, network);
}
class MyClass {
MFDummyNode node;
};
/**
* Find function nodes that always output the same value and replace those with constant nodes.
*/
@ -318,7 +323,7 @@ void constant_folding(MFNetwork &network, ResourceCollector &resources)
network.relink(original_socket, *folded_sockets[i]);
}
network.remove(temporary_nodes);
network.remove(temporary_nodes.as_span().cast<MFNode *>());
}
/** \} */

View File

@ -77,13 +77,13 @@ static void requiredDataMask(Object *UNUSED(ob),
static void foreachObjectLink(ModifierData *md, Object *ob, ObjectWalkFunc walk, void *userData)
{
MaskModifierData *mmd = (MaskModifierData *)md;
MaskModifierData *mmd = reinterpret_cast<MaskModifierData *>(md);
walk(userData, ob, &mmd->ob_arm, IDWALK_CB_NOP);
}
static void updateDepsgraph(ModifierData *md, const ModifierUpdateDepsgraphContext *ctx)
{
MaskModifierData *mmd = (MaskModifierData *)md;
MaskModifierData *mmd = reinterpret_cast<MaskModifierData *>(md);
if (mmd->ob_arm) {
bArmature *arm = (bArmature *)mmd->ob_arm->data;
/* Tag relationship in depsgraph, but also on the armature. */
@ -306,7 +306,7 @@ static void copy_masked_polys_to_new_mesh(const Mesh &src_mesh,
*/
static Mesh *modifyMesh(ModifierData *md, const ModifierEvalContext *ctx, Mesh *mesh)
{
MaskModifierData *mmd = (MaskModifierData *)md;
MaskModifierData *mmd = reinterpret_cast<MaskModifierData *>(md);
Object *ob = ctx->object;
const bool invert_mask = mmd->flag & MOD_MASK_INV;
@ -388,7 +388,7 @@ static bool isDisabled(const struct Scene *UNUSED(scene),
ModifierData *md,
bool UNUSED(useRenderParams))
{
MaskModifierData *mmd = (MaskModifierData *)md;
MaskModifierData *mmd = reinterpret_cast<MaskModifierData *>(md);
/* The object type check is only needed here in case we have a placeholder
* object assigned (because the library containing the armature is missing).

View File

@ -66,7 +66,7 @@ using blender::float3;
static void updateDepsgraph(ModifierData *md, const ModifierUpdateDepsgraphContext *ctx)
{
SimulationModifierData *smd = (SimulationModifierData *)md;
SimulationModifierData *smd = reinterpret_cast<SimulationModifierData *>(md);
if (smd->simulation) {
DEG_add_simulation_relation(ctx->node, smd->simulation, "Accessed Simulation");
}
@ -74,7 +74,7 @@ static void updateDepsgraph(ModifierData *md, const ModifierUpdateDepsgraphConte
static void foreachIDLink(ModifierData *md, Object *ob, IDWalkFunc walk, void *userData)
{
SimulationModifierData *smd = (SimulationModifierData *)md;
SimulationModifierData *smd = reinterpret_cast<SimulationModifierData *>(md);
walk(userData, ob, (ID **)&smd->simulation, IDWALK_CB_USER);
}
@ -82,21 +82,22 @@ static bool isDisabled(const struct Scene *UNUSED(scene),
ModifierData *md,
bool UNUSED(useRenderParams))
{
SimulationModifierData *smd = (SimulationModifierData *)md;
SimulationModifierData *smd = reinterpret_cast<SimulationModifierData *>(md);
return smd->simulation == nullptr;
}
static const ParticleSimulationState *find_particle_state(SimulationModifierData *smd)
{
return (const ParticleSimulationState *)BKE_simulation_state_try_find_by_name_and_type(
smd->simulation, smd->data_path, SIM_TYPE_NAME_PARTICLE_SIMULATION);
return reinterpret_cast<const ParticleSimulationState *>(
BKE_simulation_state_try_find_by_name_and_type(
smd->simulation, smd->data_path, SIM_TYPE_NAME_PARTICLE_SIMULATION));
}
static PointCloud *modifyPointCloud(ModifierData *md,
const ModifierEvalContext *UNUSED(ctx),
PointCloud *input_pointcloud)
{
SimulationModifierData *smd = (SimulationModifierData *)md;
SimulationModifierData *smd = reinterpret_cast<SimulationModifierData *>(md);
const ParticleSimulationState *state = find_particle_state(smd);
if (state == nullptr) {
return input_pointcloud;
@ -107,10 +108,10 @@ static PointCloud *modifyPointCloud(ModifierData *md,
return pointcloud;
}
const float3 *positions = (const float3 *)CustomData_get_layer_named(
&state->attributes, CD_PROP_FLOAT3, "Position");
const float *radii = (const float *)CustomData_get_layer_named(
&state->attributes, CD_PROP_FLOAT, "Radius");
const float3 *positions = static_cast<const float3 *>(
CustomData_get_layer_named(&state->attributes, CD_PROP_FLOAT3, "Position"));
const float *radii = static_cast<const float *>(
CustomData_get_layer_named(&state->attributes, CD_PROP_FLOAT, "Radius"));
memcpy(pointcloud->co, positions, sizeof(float3) * state->tot_particles);
for (int i = 0; i < state->tot_particles; i++) {
@ -144,20 +145,20 @@ static void panelRegister(ARegionType *region_type)
static void blendWrite(BlendWriter *writer, const ModifierData *md)
{
const SimulationModifierData *smd = (const SimulationModifierData *)md;
const SimulationModifierData *smd = reinterpret_cast<const SimulationModifierData *>(md);
BLO_write_string(writer, smd->data_path);
}
static void blendRead(BlendDataReader *reader, ModifierData *md)
{
SimulationModifierData *smd = (SimulationModifierData *)md;
SimulationModifierData *smd = reinterpret_cast<SimulationModifierData *>(md);
BLO_read_data_address(reader, &smd->data_path);
}
static void copyData(const ModifierData *md, ModifierData *target, const int flag)
{
const SimulationModifierData *smd = (const SimulationModifierData *)md;
SimulationModifierData *tsmd = (SimulationModifierData *)target;
const SimulationModifierData *smd = reinterpret_cast<const SimulationModifierData *>(md);
SimulationModifierData *tsmd = reinterpret_cast<SimulationModifierData *>(target);
BKE_modifier_copydata_generic(md, target, flag);
if (smd->data_path != nullptr) {
@ -167,7 +168,7 @@ static void copyData(const ModifierData *md, ModifierData *target, const int fla
static void freeData(ModifierData *md)
{
SimulationModifierData *smd = (SimulationModifierData *)md;
SimulationModifierData *smd = reinterpret_cast<SimulationModifierData *>(md);
if (smd->data_path) {
MEM_freeN(smd->data_path);
}

View File

@ -265,12 +265,12 @@ inline const DSocket &DSocket::as_base() const
inline const DInputSocket &DSocket::as_input() const
{
return *(DInputSocket *)this;
return static_cast<const DInputSocket &>(*this);
}
inline const DOutputSocket &DSocket::as_output() const
{
return *(DOutputSocket *)this;
return static_cast<const DOutputSocket &>(*this);
}
inline PointerRNA *DSocket::rna() const

View File

@ -126,8 +126,20 @@ class MFNetworkTreeMap {
void add_try_match(const DNode &dnode, fn::MFNode &node)
{
this->add_try_match(dnode.inputs(), node.inputs());
this->add_try_match(dnode.outputs(), node.outputs());
this->add_try_match(dnode.inputs().cast<const DSocket *>(),
node.inputs().cast<fn::MFSocket *>());
this->add_try_match(dnode.outputs().cast<const DSocket *>(),
node.outputs().cast<fn::MFSocket *>());
}
void add_try_match(Span<const DInputSocket *> dsockets, Span<fn::MFInputSocket *> sockets)
{
this->add_try_match(dsockets.cast<const DSocket *>(), sockets.cast<fn::MFSocket *>());
}
void add_try_match(Span<const DOutputSocket *> dsockets, Span<fn::MFOutputSocket *> sockets)
{
this->add_try_match(dsockets.cast<const DSocket *>(), sockets.cast<fn::MFSocket *>());
}
void add_try_match(Span<const DSocket *> dsockets, Span<fn::MFSocket *> sockets)
@ -277,7 +289,7 @@ class SocketMFNetworkBuilder : public MFNetworkBuilderBase {
*/
template<typename T> T *socket_default_value()
{
return (T *)bsocket_->default_value;
return static_cast<T *>(bsocket_->default_value);
}
/**

View File

@ -248,13 +248,13 @@ inline const SocketRef &SocketRef::as_base() const
inline const InputSocketRef &SocketRef::as_input() const
{
BLI_assert(this->is_input());
return *(const InputSocketRef *)this;
return static_cast<const InputSocketRef &>(*this);
}
inline const OutputSocketRef &SocketRef::as_output() const
{
BLI_assert(this->is_output());
return *(const OutputSocketRef *)this;
return static_cast<const OutputSocketRef &>(*this);
}
inline PointerRNA *SocketRef::rna() const

View File

@ -57,7 +57,7 @@ class RandomFloatFunction : public blender::fn::MultiFunction {
const float min_value = min_values[i];
const float max_value = max_values[i];
const int seed = seeds[i];
const float value = BLI_hash_int_01((uint32_t)seed ^ function_seed_);
const float value = BLI_hash_int_01(static_cast<uint32_t>(seed) ^ function_seed_);
values[i] = value * (max_value - min_value) + min_value;
}
}

View File

@ -59,9 +59,10 @@ BLI_NOINLINE void DerivedNodeTree::insert_nodes_and_links_in_id_order(const Node
/* Insert links. */
for (const NodeRef *node_ref : tree_ref.nodes()) {
for (const InputSocketRef *to_socket_ref : node_ref->inputs()) {
DInputSocket *to_socket = (DInputSocket *)sockets_map[to_socket_ref->id()];
DInputSocket *to_socket = static_cast<DInputSocket *>(sockets_map[to_socket_ref->id()]);
for (const OutputSocketRef *from_socket_ref : to_socket_ref->linked_sockets()) {
DOutputSocket *from_socket = (DOutputSocket *)sockets_map[from_socket_ref->id()];
DOutputSocket *from_socket = static_cast<DOutputSocket *>(
sockets_map[from_socket_ref->id()]);
to_socket->linked_sockets_.append(from_socket);
from_socket->linked_sockets_.append(to_socket);
}
@ -130,7 +131,7 @@ BLI_NOINLINE void DerivedNodeTree::expand_group_node(DNode &group_node,
const NodeRef &group_node_ref = *group_node.node_ref_;
BLI_assert(group_node_ref.is_group_node());
bNodeTree *btree = (bNodeTree *)group_node_ref.bnode()->id;
bNodeTree *btree = reinterpret_cast<bNodeTree *>(group_node_ref.bnode()->id);
if (btree == nullptr) {
return;
}
@ -366,7 +367,7 @@ static dot::Cluster *get_cluster_for_parent(dot::DirectedGraph &graph,
}
return clusters.lookup_or_add_cb(parent, [&]() {
dot::Cluster *parent_cluster = get_cluster_for_parent(graph, clusters, parent->parent());
bNodeTree *btree = (bNodeTree *)parent->node_ref().bnode()->id;
bNodeTree *btree = reinterpret_cast<bNodeTree *>(parent->node_ref().bnode()->id);
dot::Cluster *new_cluster = &graph.new_cluster(parent->node_ref().name() + " / " +
StringRef(btree->id.name + 2));
new_cluster->set_parent_cluster(parent_cluster);

View File

@ -28,7 +28,7 @@ static void add_dependencies_of_node_tree(bNodeTree &ntree, NodeTreeDependencies
LISTBASE_FOREACH (bNode *, node, &ntree.nodes) {
LISTBASE_FOREACH (bNodeSocket *, socket, &node->inputs) {
if (socket->type == SOCK_OBJECT) {
Object *object = ((bNodeSocketValueObject *)socket->default_value)->value;
Object *object = reinterpret_cast<bNodeSocketValueObject *>(socket->default_value)->value;
if (object != nullptr) {
r_dependencies.add_transform_dependency(object);
if (object->type == OB_MESH) {
@ -39,7 +39,7 @@ static void add_dependencies_of_node_tree(bNodeTree &ntree, NodeTreeDependencies
}
if (node->type == NODE_GROUP) {
bNodeTree *group = (bNodeTree *)node->id;
bNodeTree *group = reinterpret_cast<bNodeTree *>(node->id);
if (group != nullptr) {
add_dependencies_of_node_tree(*group, r_dependencies);
}

View File

@ -101,9 +101,9 @@ InputSocketRef &NodeTreeRef::find_input_socket(Map<bNode *, NodeRef *> &node_map
bNodeSocket *bsocket)
{
NodeRef *node = node_mapping.lookup(bnode);
for (SocketRef *socket : node->inputs_) {
for (InputSocketRef *socket : node->inputs_) {
if (socket->bsocket_ == bsocket) {
return *(InputSocketRef *)socket;
return *socket;
}
}
BLI_assert(false);
@ -115,9 +115,9 @@ OutputSocketRef &NodeTreeRef::find_output_socket(Map<bNode *, NodeRef *> &node_m
bNodeSocket *bsocket)
{
NodeRef *node = node_mapping.lookup(bnode);
for (SocketRef *socket : node->outputs_) {
for (OutputSocketRef *socket : node->outputs_) {
if (socket->bsocket_ == bsocket) {
return *(OutputSocketRef *)socket;
return *socket;
}
}
BLI_assert(false);

View File

@ -32,8 +32,8 @@ bNodeTreeType *ntreeType_Simulation;
void register_node_tree_type_sim(void)
{
bNodeTreeType *tt = ntreeType_Simulation = (bNodeTreeType *)MEM_callocN(
sizeof(bNodeTreeType), "simulation node tree type");
bNodeTreeType *tt = ntreeType_Simulation = static_cast<bNodeTreeType *>(
MEM_callocN(sizeof(bNodeTreeType), "simulation node tree type"));
tt->type = NTREE_SIMULATION;
strcpy(tt->idname, "SimulationNodeTree");
strcpy(tt->ui_name, N_("Simulation Editor"));

View File

@ -71,9 +71,9 @@ fn::MutableAttributesRef ParticleAllocator::allocate(int size)
}
else if (name == "Hash") {
MutableSpan<int> hashes = attributes.get<int>("Hash");
RandomNumberGenerator rng(hash_seed_ ^ (uint32_t)next_id_);
RandomNumberGenerator rng(hash_seed_ ^ static_cast<uint32_t>(next_id_));
for (int pindex : IndexRange(size)) {
hashes[pindex] = (int)rng.get_uint32();
hashes[pindex] = static_cast<int>(rng.get_uint32());
}
}
else {

View File

@ -233,14 +233,14 @@ static BLI_NOINLINE bool compute_new_particle_attributes(ParticleEmitterContext
if (settings.object->type != OB_MESH) {
return false;
}
Mesh &mesh = *(Mesh *)settings.object->data;
Mesh &mesh = *static_cast<Mesh *>(settings.object->data);
if (mesh.totvert == 0) {
return false;
}
const float start_time = context.emit_interval.start();
const uint32_t seed = DefaultHash<StringRef>{}(state.head.name);
RandomNumberGenerator rng{(*(uint32_t *)&start_time) ^ seed};
RandomNumberGenerator rng{*reinterpret_cast<const uint32_t *>(&start_time) ^ seed};
compute_birth_times(settings.rate, context.emit_interval, state, r_birth_times);
const int particle_amount = r_birth_times.size();

View File

@ -96,7 +96,7 @@ class SimulationStateMap {
template<typename StateType> StateType *lookup(StringRef name) const
{
const char *type = BKE_simulation_get_state_type_name<StateType>();
return (StateType *)this->lookup_name_type(name, type);
return reinterpret_cast<StateType *>(this->lookup_name_type(name, type));
}
template<typename StateType> Span<StateType *> lookup() const

View File

@ -143,7 +143,7 @@ class SampledDependencyAnimations : public DependencyAnimations {
const float factor = simulation_time_interval_.factor_at_time(simulation_time);
BLI_assert(factor > 0.0f && factor < 1.0f);
const float scaled_factor = factor * (cached_transforms.size() - 1);
const int lower_sample = (int)scaled_factor;
const int lower_sample = static_cast<int>(scaled_factor);
const int upper_sample = lower_sample + 1;
const float mix_factor = scaled_factor - lower_sample;
r_transforms[i] = float4x4::interpolate(
@ -205,7 +205,7 @@ static void prepare_dependency_animations(Depsgraph &depsgraph,
if (GS(id_cow->name) != ID_OB) {
continue;
}
Object &object_cow = *(Object *)id_cow;
Object &object_cow = *reinterpret_cast<Object *>(id_cow);
constexpr int sample_count = 10;
Array<float4x4, sample_count> transforms(sample_count);
sample_object_transforms(object_cow, depsgraph, scene, scene_frame_interval, transforms);
@ -233,7 +233,8 @@ void update_simulation_in_depsgraph(Depsgraph *depsgraph,
return;
}
Simulation *simulation_orig = (Simulation *)DEG_get_original_id(&simulation_cow->id);
Simulation *simulation_orig = reinterpret_cast<Simulation *>(
DEG_get_original_id(&simulation_cow->id));
ResourceCollector resources;
SimulationInfluences influences;
@ -317,8 +318,8 @@ bool update_simulation_dependencies(Simulation *simulation)
}
used_handles.add_new(next_handle);
SimulationDependency *dependency = (SimulationDependency *)MEM_callocN(sizeof(*dependency),
AT);
SimulationDependency *dependency = static_cast<SimulationDependency *>(
MEM_callocN(sizeof(*dependency), AT));
id_us_plus(id);
dependency->id = id;
dependency->handle = next_handle;