Merge branch 'master' into geometry-nodes-simulation

This commit is contained in:
Hans Goudey 2022-12-02 09:17:54 -06:00
commit a0caa03942
57 changed files with 1364 additions and 899 deletions

View File

@ -15,18 +15,19 @@ CCL_NAMESPACE_BEGIN
* NOTE: light_p is modified when sample_coord is true. */
ccl_device_inline float area_light_rect_sample(float3 P,
ccl_private float3 *light_p,
float3 extentu,
float3 extentv,
const float3 axis_u,
const float len_u,
const float3 axis_v,
const float len_v,
float randu,
float randv,
bool sample_coord)
{
/* In our name system we're using P for the center, which is o in the paper. */
float3 corner = *light_p - extentu * 0.5f - extentv * 0.5f;
float extentu_len, extentv_len;
float3 corner = *light_p - axis_u * len_u * 0.5f - axis_v * len_v * 0.5f;
/* Compute local reference system R. */
float3 x = normalize_len(extentu, &extentu_len);
float3 y = normalize_len(extentv, &extentv_len);
float3 x = axis_u;
float3 y = axis_v;
float3 z = cross(x, y);
/* Compute rectangle coords in local reference system. */
float3 dir = corner - P;
@ -38,8 +39,8 @@ ccl_device_inline float area_light_rect_sample(float3 P,
}
float x0 = dot(dir, x);
float y0 = dot(dir, y);
float x1 = x0 + extentu_len;
float y1 = y0 + extentv_len;
float x1 = x0 + len_u;
float y1 = y0 + len_v;
/* Compute internal angles (gamma_i). */
float4 diff = make_float4(x0, y1, x1, y0) - make_float4(x1, y0, x0, y1);
float4 nz = make_float4(y0, x1, y1, x0) * diff;
@ -106,8 +107,10 @@ ccl_device float area_light_spread_attenuation(const float3 D,
ccl_device bool area_light_spread_clamp_area_light(const float3 P,
const float3 lightNg,
ccl_private float3 *lightP,
ccl_private float3 *extentu,
ccl_private float3 *extentv,
const float3 axis_u,
ccl_private float *len_u,
const float3 axis_v,
ccl_private float *len_v,
const float tan_spread)
{
/* Closest point in area light plane and distance to that plane. */
@ -117,22 +120,16 @@ ccl_device bool area_light_spread_clamp_area_light(const float3 P,
/* Radius of circle on area light that actually affects the shading point. */
const float radius = t / tan_spread;
/* TODO: would be faster to store as normalized vector + length, also in area_light_rect_sample.
*/
float len_u, len_v;
const float3 u = normalize_len(*extentu, &len_u);
const float3 v = normalize_len(*extentv, &len_v);
/* Local uv coordinates of closest point. */
const float closest_u = dot(u, closest_P - *lightP);
const float closest_v = dot(v, closest_P - *lightP);
const float closest_u = dot(axis_u, closest_P - *lightP);
const float closest_v = dot(axis_v, closest_P - *lightP);
/* Compute rectangle encompassing the circle that affects the shading point,
* clamped to the bounds of the area light. */
const float min_u = max(closest_u - radius, -len_u * 0.5f);
const float max_u = min(closest_u + radius, len_u * 0.5f);
const float min_v = max(closest_v - radius, -len_v * 0.5f);
const float max_v = min(closest_v + radius, len_v * 0.5f);
const float min_u = max(closest_u - radius, -*len_u * 0.5f);
const float max_u = min(closest_u + radius, *len_u * 0.5f);
const float min_v = max(closest_v - radius, -*len_v * 0.5f);
const float max_v = min(closest_v + radius, *len_v * 0.5f);
/* Skip if rectangle is empty. */
if (min_u >= max_u || min_v >= max_v) {
@ -143,12 +140,10 @@ ccl_device bool area_light_spread_clamp_area_light(const float3 P,
* uv coordinates. */
const float new_center_u = 0.5f * (min_u + max_u);
const float new_center_v = 0.5f * (min_v + max_v);
const float new_len_u = max_u - min_u;
const float new_len_v = max_v - min_v;
*len_u = max_u - min_u;
*len_v = max_v - min_v;
*lightP = *lightP + new_center_u * u + new_center_v * v;
*extentu = u * new_len_u;
*extentv = v * new_len_v;
*lightP = *lightP + new_center_u * axis_u + new_center_v * axis_v;
return true;
}
@ -164,8 +159,10 @@ ccl_device_inline bool area_light_sample(const ccl_global KernelLight *klight,
{
ls->P = klight->co;
float3 extentu = klight->area.extentu;
float3 extentv = klight->area.extentv;
const float3 axis_u = klight->area.axis_u;
const float3 axis_v = klight->area.axis_v;
const float len_u = klight->area.len_u;
const float len_v = klight->area.len_v;
float3 Ng = klight->area.dir;
float invarea = fabsf(klight->area.invarea);
bool is_round = (klight->area.invarea < 0.0f);
@ -179,30 +176,36 @@ ccl_device_inline bool area_light_sample(const ccl_global KernelLight *klight,
float3 inplane;
if (is_round || in_volume_segment) {
inplane = ellipse_sample(extentu * 0.5f, extentv * 0.5f, randu, randv);
inplane = ellipse_sample(axis_u * len_u * 0.5f, axis_v * len_v * 0.5f, randu, randv);
ls->P += inplane;
ls->pdf = invarea;
}
else {
inplane = ls->P;
float3 sample_extentu = extentu;
float3 sample_extentv = extentv;
float sample_len_u = len_u;
float sample_len_v = len_v;
if (!in_volume_segment && klight->area.tan_spread > 0.0f) {
if (!area_light_spread_clamp_area_light(
P, Ng, &ls->P, &sample_extentu, &sample_extentv, klight->area.tan_spread)) {
if (!area_light_spread_clamp_area_light(P,
Ng,
&ls->P,
axis_u,
&sample_len_u,
axis_v,
&sample_len_v,
klight->area.tan_spread)) {
return false;
}
}
ls->pdf = area_light_rect_sample(
P, &ls->P, sample_extentu, sample_extentv, randu, randv, true);
P, &ls->P, axis_u, sample_len_u, axis_v, sample_len_v, randu, randv, true);
inplane = ls->P - inplane;
}
const float light_u = dot(inplane, extentu) * (1.0f / dot(extentu, extentu));
const float light_v = dot(inplane, extentv) * (1.0f / dot(extentv, extentv));
const float light_u = dot(inplane, axis_u) / len_u;
const float light_v = dot(inplane, axis_v) / len_v;
/* NOTE: Return barycentric coordinates in the same notation as Embree and OptiX. */
ls->u = light_v + 0.5f;
@ -254,8 +257,8 @@ ccl_device_inline bool area_light_intersect(const ccl_global KernelLight *klight
return false;
}
const float3 extentu = klight->area.extentu;
const float3 extentv = klight->area.extentv;
const float3 inv_extent_u = klight->area.axis_u / klight->area.len_u;
const float3 inv_extent_v = klight->area.axis_v / klight->area.len_v;
const float3 Ng = klight->area.dir;
/* One sided. */
@ -266,8 +269,19 @@ ccl_device_inline bool area_light_intersect(const ccl_global KernelLight *klight
const float3 light_P = klight->co;
float3 P;
return ray_quad_intersect(
ray->P, ray->D, ray->tmin, ray->tmax, light_P, extentu, extentv, Ng, &P, t, u, v, is_round);
return ray_quad_intersect(ray->P,
ray->D,
ray->tmin,
ray->tmax,
light_P,
inv_extent_u,
inv_extent_v,
Ng,
&P,
t,
u,
v,
is_round);
}
ccl_device_inline bool area_light_sample_from_intersection(
@ -281,8 +295,6 @@ ccl_device_inline bool area_light_sample_from_intersection(
/* area light */
float invarea = fabsf(klight->area.invarea);
float3 extentu = klight->area.extentu;
float3 extentv = klight->area.extentv;
float3 Ng = klight->area.dir;
float3 light_P = klight->co;
@ -296,17 +308,26 @@ ccl_device_inline bool area_light_sample_from_intersection(
ls->pdf = invarea * lamp_light_pdf(Ng, -ray_D, ls->t);
}
else {
float3 sample_extentu = extentu;
float3 sample_extentv = extentv;
const float3 axis_u = klight->area.axis_u;
const float3 axis_v = klight->area.axis_v;
float sample_len_u = klight->area.len_u;
float sample_len_v = klight->area.len_v;
if (klight->area.tan_spread > 0.0f) {
if (!area_light_spread_clamp_area_light(
ray_P, Ng, &light_P, &sample_extentu, &sample_extentv, klight->area.tan_spread)) {
if (!area_light_spread_clamp_area_light(ray_P,
Ng,
&light_P,
axis_u,
&sample_len_u,
axis_v,
&sample_len_v,
klight->area.tan_spread)) {
return false;
}
}
ls->pdf = area_light_rect_sample(ray_P, &light_P, sample_extentu, sample_extentv, 0, 0, false);
ls->pdf = area_light_rect_sample(
ray_P, &light_P, axis_u, sample_len_u, axis_v, sample_len_v, 0, 0, false);
}
ls->eval_fac = 0.25f * invarea;

View File

@ -166,8 +166,14 @@ ccl_device_inline float background_portal_pdf(
int portal = kernel_data.integrator.portal_offset + p;
const ccl_global KernelLight *klight = &kernel_data_fetch(lights, portal);
float3 extentu = klight->area.extentu;
float3 extentv = klight->area.extentv;
const float3 axis_u = klight->area.axis_u;
const float len_u = klight->area.len_u;
const float3 axis_v = klight->area.axis_v;
const float len_v = klight->area.len_v;
const float3 inv_extent_u = axis_u / len_u;
const float3 inv_extent_v = axis_v / len_v;
bool is_round = (klight->area.invarea < 0.0f);
if (!ray_quad_intersect(P,
@ -175,8 +181,8 @@ ccl_device_inline float background_portal_pdf(
1e-4f,
FLT_MAX,
lightpos,
extentu,
extentv,
inv_extent_u,
inv_extent_v,
dir,
NULL,
NULL,
@ -191,7 +197,8 @@ ccl_device_inline float background_portal_pdf(
portal_pdf += fabsf(klight->area.invarea) * lamp_light_pdf(dir, -D, t);
}
else {
portal_pdf += area_light_rect_sample(P, &lightpos, extentu, extentv, 0.0f, 0.0f, false);
portal_pdf += area_light_rect_sample(
P, &lightpos, axis_u, len_u, axis_v, len_v, 0.0f, 0.0f, false);
}
}
@ -240,19 +247,22 @@ ccl_device float3 background_portal_sample(KernelGlobals kg,
/* p is the portal to be sampled. */
int portal = kernel_data.integrator.portal_offset + p;
const ccl_global KernelLight *klight = &kernel_data_fetch(lights, portal);
float3 extentu = klight->area.extentu;
float3 extentv = klight->area.extentv;
const float3 axis_u = klight->area.axis_u;
const float3 axis_v = klight->area.axis_v;
const float len_u = klight->area.len_u;
const float len_v = klight->area.len_v;
bool is_round = (klight->area.invarea < 0.0f);
float3 D;
if (is_round) {
lightpos += ellipse_sample(extentu * 0.5f, extentv * 0.5f, randu, randv);
lightpos += ellipse_sample(axis_u * len_u * 0.5f, axis_v * len_v * 0.5f, randu, randv);
float t;
D = normalize_len(lightpos - P, &t);
*pdf = fabsf(klight->area.invarea) * lamp_light_pdf(dir, -D, t);
}
else {
*pdf = area_light_rect_sample(P, &lightpos, extentu, extentv, randu, randv, true);
*pdf = area_light_rect_sample(
P, &lightpos, axis_u, len_u, axis_v, len_v, randu, randv, true);
D = normalize(lightpos - P);
}

View File

@ -1296,12 +1296,15 @@ typedef struct KernelSpotLight {
/* PointLight is SpotLight with only radius and invarea being used. */
typedef struct KernelAreaLight {
packed_float3 extentu;
float invarea;
packed_float3 extentv;
float tan_spread;
packed_float3 axis_u;
float len_u;
packed_float3 axis_v;
float len_v;
packed_float3 dir;
float invarea;
float tan_spread;
float normalize_spread;
float pad[2];
} KernelAreaLight;
typedef struct KernelDistantLight {

View File

@ -719,7 +719,11 @@ void LightManager::device_update_lights(Device *, DeviceScene *dscene, Scene *sc
float3 extentu = light->axisu * (light->sizeu * light->size);
float3 extentv = light->axisv * (light->sizev * light->size);
float area = len(extentu) * len(extentv);
float len_u, len_v;
float3 axis_u = normalize_len(extentu, &len_u);
float3 axis_v = normalize_len(extentv, &len_v);
float area = len_u * len_v;
if (light->round) {
area *= -M_PI_4_F;
}
@ -729,8 +733,10 @@ void LightManager::device_update_lights(Device *, DeviceScene *dscene, Scene *sc
dir = safe_normalize(dir);
klights[portal_index].co = light->co;
klights[portal_index].area.extentu = extentu;
klights[portal_index].area.extentv = extentv;
klights[portal_index].area.axis_u = axis_u;
klights[portal_index].area.len_u = len_u;
klights[portal_index].area.axis_v = axis_v;
klights[portal_index].area.len_v = len_v;
klights[portal_index].area.invarea = invarea;
klights[portal_index].area.dir = dir;
klights[portal_index].tfm = light->tfm;
@ -834,7 +840,11 @@ void LightManager::device_update_lights(Device *, DeviceScene *dscene, Scene *sc
else if (light->light_type == LIGHT_AREA) {
float3 extentu = light->axisu * (light->sizeu * light->size);
float3 extentv = light->axisv * (light->sizev * light->size);
float area = len(extentu) * len(extentv);
float len_u, len_v;
float3 axis_u = normalize_len(extentu, &len_u);
float3 axis_v = normalize_len(extentv, &len_v);
float area = len_u * len_v;
if (light->round) {
area *= -M_PI_4_F;
}
@ -856,8 +866,10 @@ void LightManager::device_update_lights(Device *, DeviceScene *dscene, Scene *sc
shader_id |= SHADER_USE_MIS;
klights[light_index].co = co;
klights[light_index].area.extentu = extentu;
klights[light_index].area.extentv = extentv;
klights[light_index].area.axis_u = axis_u;
klights[light_index].area.len_u = len_u;
klights[light_index].area.axis_v = axis_v;
klights[light_index].area.len_v = len_v;
klights[light_index].area.invarea = invarea;
klights[light_index].area.dir = dir;
klights[light_index].area.tan_spread = tan_spread;

View File

@ -257,8 +257,8 @@ ccl_device bool ray_quad_intersect(float3 ray_P,
float ray_tmin,
float ray_tmax,
float3 quad_P,
float3 quad_u,
float3 quad_v,
float3 inv_quad_u,
float3 inv_quad_v,
float3 quad_n,
ccl_private float3 *isect_P,
ccl_private float *isect_t,
@ -273,11 +273,11 @@ ccl_device bool ray_quad_intersect(float3 ray_P,
}
const float3 hit = ray_P + t * ray_D;
const float3 inplane = hit - quad_P;
const float u = dot(inplane, quad_u) / dot(quad_u, quad_u);
const float u = dot(inplane, inv_quad_u);
if (u < -0.5f || u > 0.5f) {
return false;
}
const float v = dot(inplane, quad_v) / dot(quad_v, quad_v);
const float v = dot(inplane, inv_quad_v);
if (v < -0.5f || v > 0.5f) {
return false;
}

View File

@ -23,14 +23,16 @@ PERFORMANCE OF THIS SOFTWARE.
** Cuda Wrangler; version cbf465b -- https://github.com/CudaWrangler/cuew
** Draco; version 1.3.6 -- https://google.github.io/draco/
** Embree; version 3.13.4 -- https://github.com/embree/embree
** Intel® Open Path Guiding Library; version v0.3.1-beta --
** Intel(R) oneAPI DPC++ compiler; version 20221019 --
https://github.com/intel/llvm#oneapi-dpc-compiler
** Intel® Open Path Guiding Library; version v0.4.1-beta --
http://www.openpgl.org/
** Mantaflow; version 0.13 -- http://mantaflow.com/
** oneAPI Threading Building Block; version 2020_U3 --
https://software.intel.com/en-us/oneapi/onetbb
** OpenCL Wrangler; version 27a6867 -- https://github.com/OpenCLWrangler/clew
** OpenImageDenoise; version 1.4.3 -- https://www.openimagedenoise.org/
** OpenSSL; version 1.1.1 -- https://www.openssl.org/
** OpenSSL; version 1.1.1q -- https://www.openssl.org/
** OpenXR SDK; version 1.0.17 -- https://khronos.org/openxr
** RangeTree; version 40ebed8aa209 -- https://github.com/ideasman42/rangetree-c
** SDL Extension Wrangler; version 15edf8e --
@ -242,6 +244,8 @@ limitations under the License.
Copyright 2018 The Draco Authors
* For Embree see also this required NOTICE:
Copyright 2009-2020 Intel Corporation
* For Intel(R) oneAPI DPC++ compiler see also this required NOTICE:
Copyright (C) 2021 Intel Corporation
* For Intel® Open Path Guiding Library see also this required NOTICE:
Copyright 2020 Intel Corporation.
* For Mantaflow see also this required NOTICE:
@ -273,7 +277,7 @@ limitations under the License.
Copyright (c) 2016, Alliance for Open Media. All rights reserved.
** NASM; version 2.15.02 -- https://www.nasm.us/
Contributions since 2008-12-15 are Copyright Intel Corporation.
** OpenJPEG; version 2.4.0 -- https://github.com/uclouvain/openjpeg
** OpenJPEG; version 2.5.0 -- https://github.com/uclouvain/openjpeg
Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium
Copyright (c) 2002-2014, Professor Benoit Macq
Copyright (c) 2003-2014, Antonin Descampe
@ -330,7 +334,7 @@ Copyright Intel Corporation
Copyright (c) 2005-2021, NumPy Developers.
** Ogg; version 1.3.5 -- https://www.xiph.org/ogg/
COPYRIGHT (C) 1994-2019 by the Xiph.Org Foundation https://www.xiph.org/
** Open Shading Language; version 1.11.17.0 --
** Open Shading Language; version 1.12.6.2 --
https://github.com/imageworks/OpenShadingLanguage
Copyright Contributors to the Open Shading Language project.
** OpenColorIO; version 2.1.1 --
@ -339,7 +343,7 @@ Copyright Contributors to the OpenColorIO Project.
** OpenEXR; version 3.1.5 --
https://github.com/AcademySoftwareFoundation/openexr
Copyright Contributors to the OpenEXR Project. All rights reserved.
** OpenImageIO; version 2.3.13.0 -- http://www.openimageio.org
** OpenImageIO; version 2.3.20.0 -- http://www.openimageio.org
Copyright (c) 2008-present by Contributors to the OpenImageIO project. All
Rights Reserved.
** Pystring; version 1.1.3 -- https://github.com/imageworks/pystring
@ -1183,7 +1187,7 @@ Copyright (C) 2003-2021 x264 project
** miniLZO; version 2.08 -- http://www.oberhumer.com/opensource/lzo/
LZO and miniLZO are Copyright (C) 1996-2014 Markus Franz Xaver Oberhumer
All Rights Reserved.
** The FreeType Project; version 2.11.1 --
** The FreeType Project; version 2.12.1 --
https://sourceforge.net/projects/freetype
Copyright (C) 1996-2020 by David Turner, Robert Wilhelm, and Werner Lemberg.
** X Drag and Drop; version 2000-08-08 --
@ -2186,8 +2190,10 @@ of this License. But first, please read <http s ://www.gnu.org/ licenses
------
** FFmpeg; version 5.0 -- http://ffmpeg.org/
** FFmpeg; version 5.1.2 -- http://ffmpeg.org/
-
** Libsndfile; version 1.1.0 -- http://libsndfile.github.io/libsndfile/
Copyright (C) 2011-2016 Erik de Castro Lopo <erikd@mega-nerd.com>
GNU LESSER GENERAL PUBLIC LICENSE
@ -2675,171 +2681,6 @@ That's all there is to it!
------
** Libsndfile; version 1.0.28 -- http://www.mega-nerd.com/libsndfile/
Copyright (C) 2011-2016 Erik de Castro Lopo <erikd@mega-nerd.com>
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http s ://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies of this license
document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates the terms
and conditions of version 3 of the GNU General Public License, supplemented by
the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License, other
than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided by
the Library, but which is not otherwise based on the Library. Defining a
subclass of a class defined by the Library is deemed a mode of using an
interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library with
which the Combined Work was made is also called the "Linked Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code for
portions of the Combined Work that, considered in isolation, are based on
the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the object
code and/or source code for the Application, including any data and
utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License without
being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a facility
refers to a function or data to be supplied by an Application that uses the
facility (other than as an argument passed when the facility is invoked),
then you may convey a copy of the modified version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the function or
data, the facility still operates, and performs whatever part of its
purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of this
License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from a
header file that is part of the Library. You may convey such object code
under terms of your choice, provided that, if the incorporated material is
not limited to numerical parameters, data structure layouts and accessors,
or small macros, inline functions and templates (ten or fewer lines in
length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are covered by
this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that, taken
together, effectively do not restrict modification of the portions of the
Library contained in the Combined Work and reverse engineering for debugging
such modifications, if you also do each of the following:
a) Give prominent notice with each copy of the Combined Work that the
Library is used in it and that the Library and its use are covered by
this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this
license document.
c) For a Combined Work that displays copyright notices during execution,
include the copyright notice for the Library among these notices, as well
as a reference directing the user to the copies of the GNU GPL and this
license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form suitable
for, and under terms that permit, the user to recombine or relink the
Application with a modified version of the Linked Version to produce a
modified Combined Work, in the manner specified by section 6 of the
GNU GPL for conveying Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time a copy
of the Library already present on the user's computer system, and (b)
will operate properly with a modified version of the Library that is
interface-compatible with the Linked Version.
e) Provide Installation Information, but only if you would otherwise be
required to provide such information under section 6 of the GNU GPL, and
only to the extent that such information is necessary to install and
execute a modified version of the Combined Work produced by recombining
or relinking the Application with a modified version of the Linked
Version. (If you use option 4d0, the Installation Information must
accompany the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL for
conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the Library side
by side in a single library together with other library facilities that are
not Applications and are not covered by this License, and convey such a
combined library under terms of your choice, if you do both of the
following:
a) Accompany the combined library with a copy of the same work based on
the Library, uncombined with any other library facilities, conveyed under
the terms of this License.
b) Give prominent notice with the combined library that part of it is a
work based on the Library, and explaining where to find the accompanying
uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions of the
GNU Lesser General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Library as you
received it specifies that a certain numbered version of the GNU Lesser
General Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that published
version or of any later version published by the Free Software Foundation.
If the Library as you received it does not specify a version number of the
GNU Lesser General Public License, you may choose any version of the GNU
Lesser General Public License ever published by the Free Software
Foundation.
If the Library as you received it specifies that a proxy can decide whether
future versions of the GNU Lesser General Public License shall apply, that
proxy's public statement of acceptance of any version is permanent
authorization for you to choose that version for the Library.
------
** LIBPNG; version 1.6.37 -- http://prdownloads.sourceforge.net/libpng
* Copyright (c) 1995-2019 The PNG Reference Library Authors.
* Copyright (c) 2018-2019 Cosmin Truta.
@ -2984,21 +2825,33 @@ Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors.
** Epoxy; version 1.5.10 -- https://github.com/anholt/libepoxy
Copyright © 2013-2014 Intel Corporation.
Copyright © 2013 The Khronos Group Inc.
** Expat; version 2.4.4 -- https://github.com/libexpat/libexpat/
** Expat; version 2.5.0 -- https://github.com/libexpat/libexpat/
Copyright (c) 1998-2000 Thai Open Source Software Center Ltd and Clark Cooper
Copyright (c) 2001-2019 Expat maintainers
** Intel(R) Graphics Memory Management Library; version 22.1.2 --
** Intel(R) Graphics Compute Runtime; version 22.38.24278 --
https://github.com/intel/compute-runtime
Copyright (C) 2021 Intel Corporation
** Intel(R) Graphics Memory Management Library; version 22.1.8 --
https://github.com/intel/gmmlib
Copyright (c) 2017 Intel Corporation.
Copyright (c) 2016 Gabi Melman.
Copyright 2008, Google Inc. All rights reserved.
** JSON for Modern C++; version 3.10.2 -- https://github.com/nlohmann/json/
Copyright (c) 2013-2021 Niels Lohmann
** Libxml2; version 2.9.10 -- http://xmlsoft.org/
** libdecor; version 0.1.0 -- https://gitlab.freedesktop.org/libdecor/libdecor
Copyright © 2010 Intel Corporation
Copyright © 2011 Benjamin Franzke
Copyright © 2018-2021 Jonas Ådahl
Copyright © 2019 Christian Rauch
Copyright (c) 2006, 2008 Junio C Hamano
Copyright © 2017-2018 Red Hat Inc.
Copyright © 2012 Collabora, Ltd.
Copyright © 2008 Kristian Høgsberg
** Libxml2; version 2.10.3 -- http://xmlsoft.org/
Copyright (C) 1998-2012 Daniel Veillard. All Rights Reserved.
** Mesa 3D; version 21.1.5 -- https://www.mesa3d.org/
Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
** oneAPI Level Zero; version v1.7.15 --
** oneAPI Level Zero; version v1.8.5 --
https://github.com/oneapi-src/level-zero
Copyright (C) 2019-2021 Intel Corporation
** OPENCollada; version 1.6.68 -- https://github.com/KhronosGroup/OpenCOLLADA
@ -3046,9 +2899,6 @@ SOFTWARE.
------
** NanoVDB; version dc37d8a631922e7bef46712947dc19b755f3e841 --
https://github.com/AcademySoftwareFoundation/openvdb
Copyright Contributors to the OpenVDB Project
** OpenVDB; version 9.0.0 -- http://www.openvdb.org/
Copyright Contributors to the OpenVDB Project
@ -3401,7 +3251,7 @@ Copyright (c) 2013-14 Mikko Mononen memon@inside.org
Copyright (C) 1997-2020 Sam Lantinga <slouken@libsdl.org>
** TinyXML; version 2.6.2 -- https://sourceforge.net/projects/tinyxml/
Lee Thomason, Yves Berquin, Andrew Ellerton.
** zlib; version 1.2.12 -- https://zlib.net
** zlib; version 1.2.13 -- https://zlib.net
Copyright (C) 1995-2017 Jean-loup Gailly
zlib License Copyright (c) <year> <copyright holders>
@ -3667,7 +3517,32 @@ disclaims all warranties with regard to this software.
------
** Python; version 3.10.2 -- https://www.python.org
** Wayland; version 1.21.0 -- https://gitlab.freedesktop.org/wayland/wayland
Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
Copyright © 2011 Kristian Høgsberg
Copyright © 2011 Benjamin Franzke
Copyright © 2010-2012 Intel Corporation
Copyright © 2012 Collabora, Ltd.
Copyright © 2015 Giulio Camuffo
Copyright © 2016 Klarälvdalens Datakonsult AB, a KDAB Group company,
info@kdab.com
Copyright © 2012 Jason Ekstrand
Copyright (c) 2014 Red Hat, Inc.
Copyright © 2013 Marek Chalupa
Copyright © 2014 Jonas Ådahl
Copyright © 2016 Yong Bakos
Copyright © 2017 Samsung Electronics Co., Ltd
Copyright © 2002 Keith Packard
Copyright 1999 SuSE, Inc.
Copyright © 2012 Philipp Brüschweiler
Copyright (c) 2020 Simon Ser
Copyright (c) 2006, 2008 Junio C Hamano
MIT Expat
------
** Python; version 3.10.8 -- https://www.python.org
Copyright (c) 2001-2021 Python Software Foundation. All rights reserved.
A. HISTORY OF THE SOFTWARE
@ -4023,6 +3898,38 @@ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
------
** The OpenGL Extension Wrangler Library; version 2.0.0 --
http://glew.sourceforge.net/
Copyright (C) 2008-2015, Nigel Stewart <nigels[]users sourceforge net>
Copyright (C) 2002-2008, Milan Ikits <milan ikits[]ieee org>
Copyright (C) 2002-2008, Marcelo E. Magallon <mmagallo[]debian org>
Copyright (C) 2002, Lev Povalahev
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* The name of the author may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
Mesa 3-D graphics library
Version: 7.0

@ -1 +1 @@
Subproject commit fdfd24de034d4bba4fb67731d0aae81dc4940239
Subproject commit 0b0052bd53ad8249ed07dfb87705c338af698bde

View File

@ -71,12 +71,3 @@ void BKE_asset_metadata_read(struct BlendDataReader *reader, struct AssetMetaDat
#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
# include <memory>
[[nodiscard]] std::unique_ptr<AssetMetaData> BKE_asset_metadata_move_to_unique_ptr(
AssetMetaData *asset_data);
#endif

View File

@ -30,7 +30,7 @@ extern "C" {
/* Minimum Blender version that supports reading file written with the current
* version. Older Blender versions will test this and show a warning if the file
* was written with too new a version. */
#define BLENDER_FILE_MIN_VERSION 304
#define BLENDER_FILE_MIN_VERSION 305
#define BLENDER_FILE_MIN_SUBVERSION 3
/** User readable version string. */

View File

@ -1122,6 +1122,18 @@ inline blender::MutableSpan<MDeformVert> Mesh::deform_verts_for_write()
return {BKE_mesh_deform_verts_for_write(this), this->totvert};
}
inline blender::Span<blender::float3> Mesh::poly_normals() const
{
return {reinterpret_cast<const blender::float3 *>(BKE_mesh_poly_normals_ensure(this)),
this->totpoly};
}
inline blender::Span<blender::float3> Mesh::vertex_normals() const
{
return {reinterpret_cast<const blender::float3 *>(BKE_mesh_vertex_normals_ensure(this)),
this->totvert};
}
#endif
/** \} */

View File

@ -118,7 +118,7 @@ set(SRC
intern/curves_utils.cc
intern/customdata.cc
intern/customdata_file.c
intern/data_transfer.c
intern/data_transfer.cc
intern/deform.c
intern/displist.cc
intern/dynamicpaint.c
@ -164,7 +164,7 @@ set(SRC
intern/instances.cc
intern/ipo.c
intern/kelvinlet.c
intern/key.c
intern/key.cc
intern/keyconfig.c
intern/lattice.c
intern/lattice_deform.c
@ -203,7 +203,7 @@ set(SRC
intern/mesh_mapping.cc
intern/mesh_merge.c
intern/mesh_merge_customdata.cc
intern/mesh_mirror.c
intern/mesh_mirror.cc
intern/mesh_normals.cc
intern/mesh_remap.cc
intern/mesh_remesh_voxel.cc

View File

@ -47,13 +47,6 @@ AssetMetaData::~AssetMetaData()
BLI_freelistN(&tags);
}
std::unique_ptr<AssetMetaData> BKE_asset_metadata_move_to_unique_ptr(AssetMetaData *asset_data)
{
std::unique_ptr unique_asset_data = std::make_unique<AssetMetaData>(*asset_data);
*asset_data = *DNA_struct_default_get(AssetMetaData);
return unique_asset_data;
}
static AssetTag *asset_metadata_tag_add(AssetMetaData *asset_data, const char *const name)
{
AssetTag *tag = (AssetTag *)MEM_callocN(sizeof(*tag), __func__);

View File

@ -271,18 +271,19 @@ static void data_transfer_dtdata_type_preprocess(Mesh *me_src,
const float split_angle_dst = me_dst->smoothresh;
/* This should be ensured by cddata_masks we pass to code generating/giving us me_src now. */
BLI_assert(CustomData_get_layer(&me_src->ldata, CD_NORMAL) != NULL);
BLI_assert(CustomData_get_layer(&me_src->ldata, CD_NORMAL) != nullptr);
(void)me_src;
float(*loop_nors_dst)[3];
short(*custom_nors_dst)[2] = CustomData_get_layer(ldata_dst, CD_CUSTOMLOOPNORMAL);
short(*custom_nors_dst)[2] = static_cast<short(*)[2]>(
CustomData_get_layer(ldata_dst, CD_CUSTOMLOOPNORMAL));
/* Cache loop nors into a temp CDLayer. */
loop_nors_dst = CustomData_get_layer(ldata_dst, CD_NORMAL);
const bool do_loop_nors_dst = (loop_nors_dst == NULL);
loop_nors_dst = static_cast<float(*)[3]>(CustomData_get_layer(ldata_dst, CD_NORMAL));
const bool do_loop_nors_dst = (loop_nors_dst == nullptr);
if (do_loop_nors_dst) {
loop_nors_dst = CustomData_add_layer(
ldata_dst, CD_NORMAL, CD_SET_DEFAULT, NULL, num_loops_dst);
loop_nors_dst = static_cast<float(*)[3]>(
CustomData_add_layer(ldata_dst, CD_NORMAL, CD_SET_DEFAULT, nullptr, num_loops_dst));
CustomData_set_layer_flag(ldata_dst, CD_NORMAL, CD_FLAG_TEMPORARY);
}
if (dirty_nors_dst || do_loop_nors_dst) {
@ -299,8 +300,8 @@ static void data_transfer_dtdata_type_preprocess(Mesh *me_src,
num_polys_dst,
use_split_nors_dst,
split_angle_dst,
NULL,
NULL,
nullptr,
nullptr,
custom_nors_dst);
}
}
@ -330,12 +331,14 @@ static void data_transfer_dtdata_type_postprocess(Object *UNUSED(ob_src),
CustomData *ldata_dst = &me_dst->ldata;
const float(*poly_nors_dst)[3] = BKE_mesh_poly_normals_ensure(me_dst);
float(*loop_nors_dst)[3] = CustomData_get_layer(ldata_dst, CD_NORMAL);
short(*custom_nors_dst)[2] = CustomData_get_layer(ldata_dst, CD_CUSTOMLOOPNORMAL);
float(*loop_nors_dst)[3] = static_cast<float(*)[3]>(
CustomData_get_layer(ldata_dst, CD_NORMAL));
short(*custom_nors_dst)[2] = static_cast<short(*)[2]>(
CustomData_get_layer(ldata_dst, CD_CUSTOMLOOPNORMAL));
if (!custom_nors_dst) {
custom_nors_dst = CustomData_add_layer(
ldata_dst, CD_CUSTOMLOOPNORMAL, CD_SET_DEFAULT, NULL, num_loops_dst);
custom_nors_dst = static_cast<short(*)[2]>(CustomData_add_layer(
ldata_dst, CD_CUSTOMLOOPNORMAL, CD_SET_DEFAULT, nullptr, num_loops_dst));
}
/* Note loop_nors_dst contains our custom normals as transferred from source... */
@ -364,7 +367,7 @@ static MeshRemapIslandsCalc data_transfer_get_loop_islands_generator(const int c
default:
break;
}
return NULL;
return nullptr;
}
float data_transfer_interp_float_do(const int mix_mode,
@ -422,9 +425,9 @@ void data_transfer_layersmapping_add_item(ListBase *r_map,
cd_datatransfer_interp interp,
void *interp_data)
{
CustomDataTransferLayerMap *item = MEM_mallocN(sizeof(*item), __func__);
CustomDataTransferLayerMap *item = MEM_new<CustomDataTransferLayerMap>(__func__);
BLI_assert(data_dst != NULL);
BLI_assert(data_dst != nullptr);
item->data_type = cddata_type;
item->mix_mode = mix_mode;
@ -487,7 +490,7 @@ static void data_transfer_layersmapping_add_item_cd(ListBase *r_map,
* \note
* All those layer mapping handlers return false *only* if they were given invalid parameters.
* This means that even if they do nothing, they will return true if all given parameters were OK.
* Also, r_map may be NULL, in which case they will 'only' create/delete destination layers
* Also, r_map may be nullptr, in which case they will 'only' create/delete destination layers
* according to given parameters.
*/
static bool data_transfer_layersmapping_cdlayers_multisrc_to_dst(ListBase *r_map,
@ -508,10 +511,10 @@ static bool data_transfer_layersmapping_cdlayers_multisrc_to_dst(ListBase *r_map
void *interp_data)
{
const void *data_src;
void *data_dst = NULL;
void *data_dst = nullptr;
int idx_src = num_layers_src;
int idx_dst, tot_dst = CustomData_number_of_layers(cd_dst, cddata_type);
bool *data_dst_to_delete = NULL;
bool *data_dst_to_delete = nullptr;
if (!use_layers_src) {
/* No source at all, we can only delete all dest if requested... */
@ -538,7 +541,7 @@ static bool data_transfer_layersmapping_cdlayers_multisrc_to_dst(ListBase *r_map
if (use_create) {
/* Create as much data layers as necessary! */
for (; idx_dst < idx_src; idx_dst++) {
CustomData_add_layer(cd_dst, cddata_type, CD_SET_DEFAULT, NULL, num_elem_dst);
CustomData_add_layer(cd_dst, cddata_type, CD_SET_DEFAULT, nullptr, num_elem_dst);
}
}
else {
@ -581,8 +584,8 @@ static bool data_transfer_layersmapping_cdlayers_multisrc_to_dst(ListBase *r_map
case DT_LAYERS_NAME_DST:
if (use_delete) {
if (tot_dst) {
data_dst_to_delete = MEM_mallocN(sizeof(*data_dst_to_delete) * (size_t)tot_dst,
__func__);
data_dst_to_delete = static_cast<bool *>(
MEM_mallocN(sizeof(*data_dst_to_delete) * (size_t)tot_dst, __func__));
memset(data_dst_to_delete, true, sizeof(*data_dst_to_delete) * (size_t)tot_dst);
}
}
@ -600,7 +603,7 @@ static bool data_transfer_layersmapping_cdlayers_multisrc_to_dst(ListBase *r_map
if ((idx_dst = CustomData_get_named_layer(cd_dst, cddata_type, name)) == -1) {
if (use_create) {
CustomData_add_layer_named(
cd_dst, cddata_type, CD_SET_DEFAULT, NULL, num_elem_dst, name);
cd_dst, cddata_type, CD_SET_DEFAULT, nullptr, num_elem_dst, name);
idx_dst = CustomData_get_named_layer(cd_dst, cddata_type, name);
}
else {
@ -673,7 +676,7 @@ static bool data_transfer_layersmapping_cdlayers(ListBase *r_map,
{
int idx_src, idx_dst;
const void *data_src;
void *data_dst = NULL;
void *data_dst = nullptr;
if (CustomData_layertype_is_singleton(cddata_type)) {
if (!(data_src = CustomData_get_layer(cd_src, cddata_type))) {
@ -688,7 +691,7 @@ static bool data_transfer_layersmapping_cdlayers(ListBase *r_map,
if (!use_create) {
return true;
}
data_dst = CustomData_add_layer(cd_dst, cddata_type, CD_SET_DEFAULT, NULL, num_elem_dst);
data_dst = CustomData_add_layer(cd_dst, cddata_type, CD_SET_DEFAULT, nullptr, num_elem_dst);
}
else if (use_dupref_dst && r_map) {
/* If dest is a evaluated mesh (from modifier),
@ -741,7 +744,8 @@ static bool data_transfer_layersmapping_cdlayers(ListBase *r_map,
if (!use_create) {
return true;
}
data_dst = CustomData_add_layer(cd_dst, cddata_type, CD_SET_DEFAULT, NULL, num_elem_dst);
data_dst = CustomData_add_layer(
cd_dst, cddata_type, CD_SET_DEFAULT, nullptr, num_elem_dst);
}
else {
/* If dest is a evaluated mesh (from modifier),
@ -764,7 +768,7 @@ static bool data_transfer_layersmapping_cdlayers(ListBase *r_map,
}
/* Create as much data layers as necessary! */
for (; num <= idx_dst; num++) {
CustomData_add_layer(cd_dst, cddata_type, CD_SET_DEFAULT, NULL, num_elem_dst);
CustomData_add_layer(cd_dst, cddata_type, CD_SET_DEFAULT, nullptr, num_elem_dst);
}
}
/* If dest is a evaluated mesh (from modifier),
@ -783,7 +787,8 @@ static bool data_transfer_layersmapping_cdlayers(ListBase *r_map,
if (!use_create) {
return true;
}
CustomData_add_layer_named(cd_dst, cddata_type, CD_SET_DEFAULT, NULL, num_elem_dst, name);
CustomData_add_layer_named(
cd_dst, cddata_type, CD_SET_DEFAULT, nullptr, num_elem_dst, name);
idx_dst = CustomData_get_named_layer(cd_dst, cddata_type, name);
}
/* If dest is a evaluated mesh (from modifier),
@ -818,9 +823,9 @@ static bool data_transfer_layersmapping_cdlayers(ListBase *r_map,
}
else if (fromlayers == DT_LAYERS_ALL_SRC) {
int num_src = CustomData_number_of_layers(cd_src, cddata_type);
bool *use_layers_src = num_src ?
MEM_mallocN(sizeof(*use_layers_src) * (size_t)num_src, __func__) :
NULL;
bool *use_layers_src = num_src ? static_cast<bool *>(MEM_mallocN(
sizeof(*use_layers_src) * (size_t)num_src, __func__)) :
nullptr;
bool ret;
if (use_layers_src) {
@ -875,8 +880,8 @@ static bool data_transfer_layersmapping_generate(ListBase *r_map,
{
CustomData *cd_src, *cd_dst;
cd_datatransfer_interp interp = NULL;
void *interp_data = NULL;
cd_datatransfer_interp interp = nullptr;
void *interp_data = nullptr;
if (elem_type == ME_VERT) {
if (!(cddata_type & CD_FAKE)) {
@ -957,8 +962,8 @@ static bool data_transfer_layersmapping_generate(ListBase *r_map,
return true;
}
if (r_map && ELEM(cddata_type, CD_FAKE_SHARP, CD_FAKE_SEAM)) {
const size_t elem_size = sizeof(*((MEdge *)NULL));
const size_t data_size = sizeof(((MEdge *)NULL)->flag);
const size_t elem_size = sizeof(*((MEdge *)nullptr));
const size_t data_size = sizeof(((MEdge *)nullptr)->flag);
const size_t data_offset = offsetof(MEdge, flag);
const uint64_t data_flag = (cddata_type == CD_FAKE_SHARP) ? ME_SHARP : ME_SEAM;
@ -975,7 +980,7 @@ static bool data_transfer_layersmapping_generate(ListBase *r_map,
data_size,
data_offset,
data_flag,
NULL,
nullptr,
interp_data);
return true;
}
@ -1051,8 +1056,8 @@ static bool data_transfer_layersmapping_generate(ListBase *r_map,
return true;
}
if (r_map && cddata_type == CD_FAKE_SHARP) {
const size_t elem_size = sizeof(*((MPoly *)NULL));
const size_t data_size = sizeof(((MPoly *)NULL)->flag);
const size_t elem_size = sizeof(*((MPoly *)nullptr));
const size_t data_size = sizeof(((MPoly *)nullptr)->flag);
const size_t data_offset = offsetof(MPoly, flag);
const uint64_t data_flag = ME_SMOOTH;
@ -1069,7 +1074,7 @@ static bool data_transfer_layersmapping_generate(ListBase *r_map,
data_size,
data_offset,
data_flag,
NULL,
nullptr,
interp_data);
return true;
}
@ -1098,7 +1103,7 @@ void BKE_object_data_transfer_layout(struct Depsgraph *depsgraph,
BLI_assert((ob_src != ob_dst) && (ob_src->type == OB_MESH) && (ob_dst->type == OB_MESH));
me_dst = ob_dst->data;
me_dst = static_cast<Mesh *>(ob_dst->data);
/* Get source evaluated mesh. */
BKE_object_data_transfer_dttypes_to_cdmask(data_types, &me_src_mask);
@ -1132,7 +1137,7 @@ void BKE_object_data_transfer_layout(struct Depsgraph *depsgraph,
if (DT_DATATYPE_IS_VERT(dtdata_type)) {
const int num_elem_dst = me_dst->totvert;
data_transfer_layersmapping_generate(NULL,
data_transfer_layersmapping_generate(nullptr,
ob_src,
ob_dst,
me_src,
@ -1141,18 +1146,18 @@ void BKE_object_data_transfer_layout(struct Depsgraph *depsgraph,
cddata_type,
0,
0.0f,
NULL,
nullptr,
num_elem_dst,
use_create,
use_delete,
fromlayers,
tolayers,
NULL);
nullptr);
}
if (DT_DATATYPE_IS_EDGE(dtdata_type)) {
const int num_elem_dst = me_dst->totedge;
data_transfer_layersmapping_generate(NULL,
data_transfer_layersmapping_generate(nullptr,
ob_src,
ob_dst,
me_src,
@ -1161,18 +1166,18 @@ void BKE_object_data_transfer_layout(struct Depsgraph *depsgraph,
cddata_type,
0,
0.0f,
NULL,
nullptr,
num_elem_dst,
use_create,
use_delete,
fromlayers,
tolayers,
NULL);
nullptr);
}
if (DT_DATATYPE_IS_LOOP(dtdata_type)) {
const int num_elem_dst = me_dst->totloop;
data_transfer_layersmapping_generate(NULL,
data_transfer_layersmapping_generate(nullptr,
ob_src,
ob_dst,
me_src,
@ -1181,18 +1186,18 @@ void BKE_object_data_transfer_layout(struct Depsgraph *depsgraph,
cddata_type,
0,
0.0f,
NULL,
nullptr,
num_elem_dst,
use_create,
use_delete,
fromlayers,
tolayers,
NULL);
nullptr);
}
if (DT_DATATYPE_IS_POLY(dtdata_type)) {
const int num_elem_dst = me_dst->totpoly;
data_transfer_layersmapping_generate(NULL,
data_transfer_layersmapping_generate(nullptr,
ob_src,
ob_dst,
me_src,
@ -1201,13 +1206,13 @@ void BKE_object_data_transfer_layout(struct Depsgraph *depsgraph,
cddata_type,
0,
0.0f,
NULL,
nullptr,
num_elem_dst,
use_create,
use_delete,
fromlayers,
tolayers,
NULL);
nullptr);
}
}
}
@ -1248,13 +1253,13 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
/* Assumed always true if not using an evaluated mesh as destination. */
bool dirty_nors_dst = true;
const MDeformVert *mdef = NULL;
const MDeformVert *mdef = nullptr;
int vg_idx = -1;
float *weights[DATAMAX] = {NULL};
float *weights[DATAMAX] = {nullptr};
MeshPairRemap geom_map[DATAMAX] = {{0}};
bool geom_map_init[DATAMAX] = {0};
ListBase lay_map = {NULL};
ListBase lay_map = {nullptr};
bool changed = false;
bool is_modifier = false;
@ -1272,11 +1277,11 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
is_modifier = true;
}
else {
me_dst = ob_dst->data;
me_dst = static_cast<Mesh *>(ob_dst->data);
}
if (vgroup_name) {
mdef = CustomData_get_layer(&me_dst->vdata, CD_MDEFORMVERT);
mdef = static_cast<const MDeformVert *>(CustomData_get_layer(&me_dst->vdata, CD_MDEFORMVERT));
if (mdef) {
vg_idx = BKE_id_defgroup_name_index(&me_dst->id, vgroup_name);
}
@ -1289,7 +1294,7 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
if (is_modifier) {
me_src = BKE_modifier_get_evaluated_mesh_from_evaluated_object(ob_src);
if (me_src == NULL ||
if (me_src == nullptr ||
!CustomData_MeshMasks_are_matching(&ob_src->runtime.last_data_mask, &me_src_mask)) {
CLOG_WARN(&LOG, "Data Transfer: source mesh data is not ready - dependency cycle?");
return changed;
@ -1304,7 +1309,7 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
BKE_mesh_wrapper_ensure_mdata(me_src);
if (auto_transform) {
if (space_transform == NULL) {
if (space_transform == nullptr) {
space_transform = &auto_space_transform;
}
@ -1386,7 +1391,8 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
}
if (mdef && vg_idx != -1 && !weights[VDATA]) {
weights[VDATA] = MEM_mallocN(sizeof(*(weights[VDATA])) * (size_t)num_verts_dst, __func__);
weights[VDATA] = static_cast<float *>(
MEM_mallocN(sizeof(*(weights[VDATA])) * (size_t)num_verts_dst, __func__));
BKE_defvert_extract_vgroup_to_vertweights(
mdef, vg_idx, num_verts_dst, invert_vgroup, weights[VDATA]);
}
@ -1409,9 +1415,10 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
space_transform)) {
CustomDataTransferLayerMap *lay_mapit;
changed |= (lay_map.first != NULL);
changed |= (lay_map.first != nullptr);
for (lay_mapit = lay_map.first; lay_mapit; lay_mapit = lay_mapit->next) {
for (lay_mapit = static_cast<CustomDataTransferLayerMap *>(lay_map.first); lay_mapit;
lay_mapit = lay_mapit->next) {
CustomData_data_transfer(&geom_map[VDATA], lay_mapit);
}
@ -1465,7 +1472,8 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
}
if (mdef && vg_idx != -1 && !weights[EDATA]) {
weights[EDATA] = MEM_mallocN(sizeof(*weights[EDATA]) * (size_t)num_edges_dst, __func__);
weights[EDATA] = static_cast<float *>(
MEM_mallocN(sizeof(*weights[EDATA]) * (size_t)num_edges_dst, __func__));
BKE_defvert_extract_vgroup_to_edgeweights(
mdef, vg_idx, num_verts_dst, edges_dst, num_edges_dst, invert_vgroup, weights[EDATA]);
}
@ -1488,9 +1496,10 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
space_transform)) {
CustomDataTransferLayerMap *lay_mapit;
changed |= (lay_map.first != NULL);
changed |= (lay_map.first != nullptr);
for (lay_mapit = lay_map.first; lay_mapit; lay_mapit = lay_mapit->next) {
for (lay_mapit = static_cast<CustomDataTransferLayerMap *>(lay_map.first); lay_mapit;
lay_mapit = lay_mapit->next) {
CustomData_data_transfer(&geom_map[EDATA], lay_mapit);
}
@ -1560,7 +1569,8 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
}
if (mdef && vg_idx != -1 && !weights[LDATA]) {
weights[LDATA] = MEM_mallocN(sizeof(*weights[LDATA]) * (size_t)num_loops_dst, __func__);
weights[LDATA] = static_cast<float *>(
MEM_mallocN(sizeof(*weights[LDATA]) * (size_t)num_loops_dst, __func__));
BKE_defvert_extract_vgroup_to_loopweights(
mdef, vg_idx, num_verts_dst, loops_dst, num_loops_dst, invert_vgroup, weights[LDATA]);
}
@ -1583,9 +1593,10 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
space_transform)) {
CustomDataTransferLayerMap *lay_mapit;
changed |= (lay_map.first != NULL);
changed |= (lay_map.first != nullptr);
for (lay_mapit = lay_map.first; lay_mapit; lay_mapit = lay_mapit->next) {
for (lay_mapit = static_cast<CustomDataTransferLayerMap *>(lay_map.first); lay_mapit;
lay_mapit = lay_mapit->next) {
CustomData_data_transfer(&geom_map[LDATA], lay_mapit);
}
@ -1640,7 +1651,8 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
}
if (mdef && vg_idx != -1 && !weights[PDATA]) {
weights[PDATA] = MEM_mallocN(sizeof(*weights[PDATA]) * (size_t)num_polys_dst, __func__);
weights[PDATA] = static_cast<float *>(
MEM_mallocN(sizeof(*weights[PDATA]) * (size_t)num_polys_dst, __func__));
BKE_defvert_extract_vgroup_to_polyweights(mdef,
vg_idx,
num_verts_dst,
@ -1670,9 +1682,10 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
space_transform)) {
CustomDataTransferLayerMap *lay_mapit;
changed |= (lay_map.first != NULL);
changed |= (lay_map.first != nullptr);
for (lay_mapit = lay_map.first; lay_mapit; lay_mapit = lay_mapit->next) {
for (lay_mapit = static_cast<CustomDataTransferLayerMap *>(lay_map.first); lay_mapit;
lay_mapit = lay_mapit->next) {
CustomData_data_transfer(&geom_map[PDATA], lay_mapit);
}
@ -1724,7 +1737,7 @@ bool BKE_object_data_transfer_mesh(struct Depsgraph *depsgraph,
scene,
ob_src,
ob_dst,
NULL,
nullptr,
data_types,
use_create,
map_vert_mode,

View File

@ -124,19 +124,17 @@ VArray<float3> mesh_normals_varray(const Mesh &mesh,
{
switch (domain) {
case ATTR_DOMAIN_FACE: {
return VArray<float3>::ForSpan(
{(float3 *)BKE_mesh_poly_normals_ensure(&mesh), mesh.totpoly});
return VArray<float3>::ForSpan(mesh.poly_normals());
}
case ATTR_DOMAIN_POINT: {
return VArray<float3>::ForSpan(
{(float3 *)BKE_mesh_vertex_normals_ensure(&mesh), mesh.totvert});
return VArray<float3>::ForSpan(mesh.vertex_normals());
}
case ATTR_DOMAIN_EDGE: {
/* In this case, start with vertex normals and convert to the edge domain, since the
* conversion from edges to vertices is very simple. Use "manual" domain interpolation
* instead of the GeometryComponent API to avoid calculating unnecessary values and to
* allow normalizing the result more simply. */
Span<float3> vert_normals{(float3 *)BKE_mesh_vertex_normals_ensure(&mesh), mesh.totvert};
Span<float3> vert_normals = mesh.vertex_normals();
const Span<MEdge> edges = mesh.edges();
Array<float3> edge_normals(mask.min_array_size());
for (const int i : mask) {
@ -153,9 +151,7 @@ VArray<float3> mesh_normals_varray(const Mesh &mesh,
* component's generic domain interpolation is fine, the data will still be normalized,
* since the face normal is just copied to every corner. */
return mesh.attributes().adapt_domain(
VArray<float3>::ForSpan({(float3 *)BKE_mesh_poly_normals_ensure(&mesh), mesh.totpoly}),
ATTR_DOMAIN_FACE,
ATTR_DOMAIN_CORNER);
VArray<float3>::ForSpan(mesh.poly_normals()), ATTR_DOMAIN_FACE, ATTR_DOMAIN_CORNER);
}
default:
return {};

View File

@ -61,7 +61,9 @@ static void shapekey_copy_data(Main *UNUSED(bmain),
BLI_duplicatelist(&key_dst->block, &key_src->block);
KeyBlock *kb_dst, *kb_src;
for (kb_src = key_src->block.first, kb_dst = key_dst->block.first; kb_dst;
for (kb_src = static_cast<KeyBlock *>(key_src->block.first),
kb_dst = static_cast<KeyBlock *>(key_dst->block.first);
kb_dst;
kb_src = kb_src->next, kb_dst = kb_dst->next) {
if (kb_dst->data) {
kb_dst->data = MEM_dupallocN(kb_dst->data);
@ -77,7 +79,7 @@ static void shapekey_free_data(ID *id)
Key *key = (Key *)id;
KeyBlock *kb;
while ((kb = BLI_pophead(&key->block))) {
while ((kb = static_cast<KeyBlock *>(BLI_pophead(&key->block)))) {
if (kb->data) {
MEM_freeN(kb->data);
}
@ -95,7 +97,7 @@ static ID **shapekey_owner_pointer_get(ID *id)
{
Key *key = (Key *)id;
BLI_assert(key->from != NULL);
BLI_assert(key->from != nullptr);
BLI_assert(BKE_key_from_id(key->from) == key);
return &key->from;
@ -120,10 +122,10 @@ static void shapekey_blend_write(BlendWriter *writer, ID *id, const void *id_add
/* Do not store actual geometry data in case this is a library override ID. */
if (ID_IS_OVERRIDE_LIBRARY(key) && !is_undo) {
tmp_kb.totelem = 0;
tmp_kb.data = NULL;
tmp_kb.data = nullptr;
}
BLO_write_struct_at_address(writer, KeyBlock, kb, &tmp_kb);
if (tmp_kb.data != NULL) {
if (tmp_kb.data != nullptr) {
BLO_write_raw(writer, tmp_kb.totelem * key->elemsize, tmp_kb.data);
}
}
@ -137,7 +139,7 @@ static void shapekey_blend_write(BlendWriter *writer, ID *id, const void *id_add
static void switch_endian_keyblock(Key *key, KeyBlock *kb)
{
int elemsize = key->elemsize;
char *data = kb->data;
char *data = static_cast<char *>(kb->data);
for (int a = 0; a < kb->totelem; a++) {
const char *cp = key->elemstr;
@ -196,35 +198,35 @@ static void shapekey_blend_read_expand(BlendExpander *expander, ID *id)
}
IDTypeInfo IDType_ID_KE = {
.id_code = ID_KE,
.id_filter = FILTER_ID_KE,
.main_listbase_index = INDEX_ID_KE,
.struct_size = sizeof(Key),
.name = "Key",
.name_plural = "shape_keys",
.translation_context = BLT_I18NCONTEXT_ID_SHAPEKEY,
.flags = IDTYPE_FLAGS_NO_LIBLINKING,
.asset_type_info = NULL,
/* id_code */ ID_KE,
/* id_filter */ FILTER_ID_KE,
/* main_listbase_index */ INDEX_ID_KE,
/* struct_size */ sizeof(Key),
/* name */ "Key",
/* name_plural */ "shape_keys",
/* translation_context */ BLT_I18NCONTEXT_ID_SHAPEKEY,
/* flags */ IDTYPE_FLAGS_NO_LIBLINKING,
/* asset_type_info */ nullptr,
.init_data = NULL,
.copy_data = shapekey_copy_data,
.free_data = shapekey_free_data,
.make_local = NULL,
.foreach_id = shapekey_foreach_id,
.foreach_cache = NULL,
.foreach_path = NULL,
/* init_data */ nullptr,
/* copy_data */ shapekey_copy_data,
/* free_data */ shapekey_free_data,
/* make_local */ nullptr,
/* foreach_id */ shapekey_foreach_id,
/* foreach_cache */ nullptr,
/* foreach_path */ nullptr,
/* A bit weird, due to shape-keys not being strictly speaking embedded data... But they also
* share a lot with those (non linkable, only ever used by one owner ID, etc.). */
.owner_pointer_get = shapekey_owner_pointer_get,
/* owner_pointer_get */ shapekey_owner_pointer_get,
.blend_write = shapekey_blend_write,
.blend_read_data = shapekey_blend_read_data,
.blend_read_lib = shapekey_blend_read_lib,
.blend_read_expand = shapekey_blend_read_expand,
/* blend_write */ shapekey_blend_write,
/* blend_read_data */ shapekey_blend_read_data,
/* blend_read_lib */ shapekey_blend_read_lib,
/* blend_read_expand */ shapekey_blend_read_expand,
.blend_read_undo_preserve = NULL,
/* blend_read_undo_preserve */ nullptr,
.lib_override_apply_post = NULL,
/* lib_override_apply_post */ nullptr,
};
#define KEY_MODE_DUMMY 0 /* use where mode isn't checked for */
@ -246,7 +248,7 @@ void BKE_key_free_nolib(Key *key)
{
KeyBlock *kb;
while ((kb = BLI_pophead(&key->block))) {
while ((kb = static_cast<KeyBlock *>(BLI_pophead(&key->block)))) {
if (kb->data) {
MEM_freeN(kb->data);
}
@ -259,7 +261,7 @@ Key *BKE_key_add(Main *bmain, ID *id) /* common function */
Key *key;
char *el;
key = BKE_id_new(bmain, ID_KE, "Key");
key = static_cast<Key *>(BKE_id_new(bmain, ID_KE, "Key"));
key->type = KEY_NORMAL;
key->from = id;
@ -312,7 +314,7 @@ void BKE_key_sort(Key *key)
KeyBlock *kb2;
/* locate the key which is out of position */
for (kb = key->block.first; kb; kb = kb->next) {
for (kb = static_cast<KeyBlock *>(key->block.first); kb; kb = kb->next) {
if ((kb->next) && (kb->pos > kb->next->pos)) {
break;
}
@ -324,7 +326,7 @@ void BKE_key_sort(Key *key)
BLI_remlink(&key->block, kb);
/* find the right location and insert before */
for (kb2 = key->block.first; kb2; kb2 = kb2->next) {
for (kb2 = static_cast<KeyBlock *>(key->block.first); kb2; kb2 = kb2->next) {
if (kb2->pos > kb->pos) {
BLI_insertlinkafter(&key->block, kb2->prev, kb);
break;
@ -333,7 +335,7 @@ void BKE_key_sort(Key *key)
}
/* new rule; first key is refkey, this to match drawing channels... */
key->refkey = key->block.first;
key->refkey = static_cast<KeyBlock *>(key->block.first);
}
/**************** do the key ****************/
@ -458,8 +460,8 @@ static int setkeys(float fac, ListBase *lb, KeyBlock *k[], float t[4], int cycl)
float d, dpos, ofs = 0, lastpos;
short bsplinetype;
firstkey = lb->first;
k1 = lb->last;
firstkey = static_cast<KeyBlock *>(lb->first);
k1 = static_cast<KeyBlock *>(lb->last);
lastpos = k1->pos;
dpos = lastpos - firstkey->pos;
@ -475,18 +477,18 @@ static int setkeys(float fac, ListBase *lb, KeyBlock *k[], float t[4], int cycl)
/* if (fac < 0.0 || fac > 1.0) return 1; */
if (k1->next == NULL) {
if (k1->next == nullptr) {
return 1;
}
if (cycl) { /* pre-sort */
k[2] = k1->next;
k[3] = k[2]->next;
if (k[3] == NULL) {
if (k[3] == nullptr) {
k[3] = k1;
}
while (k1) {
if (k1->next == NULL) {
if (k1->next == nullptr) {
k[0] = k1;
}
k1 = k1->next;
@ -511,7 +513,7 @@ static int setkeys(float fac, ListBase *lb, KeyBlock *k[], float t[4], int cycl)
k[2] = k1->next;
t[2] = k[2]->pos;
k[3] = k[2]->next;
if (k[3] == NULL) {
if (k[3] == nullptr) {
k[3] = k[2];
}
t[3] = k[3]->pos;
@ -519,7 +521,7 @@ static int setkeys(float fac, ListBase *lb, KeyBlock *k[], float t[4], int cycl)
}
while (t[2] < fac) { /* find correct location */
if (k1->next == NULL) {
if (k1->next == nullptr) {
if (cycl) {
k1 = firstkey;
ofs += dpos;
@ -631,7 +633,8 @@ static char *key_block_get_data(Key *key, KeyBlock *actkb, KeyBlock *kb, char **
if (me->edit_mesh && me->edit_mesh->bm->totvert == kb->totelem) {
a = 0;
co = MEM_mallocN(sizeof(float[3]) * me->edit_mesh->bm->totvert, "key_block_get_data");
co = static_cast<float(*)[3]>(
MEM_mallocN(sizeof(float[3]) * me->edit_mesh->bm->totvert, "key_block_get_data"));
BM_ITER_MESH (eve, &iter, me->edit_mesh->bm, BM_VERTS_OF_MESH) {
copy_v3_v3(co[a], eve->co);
@ -644,14 +647,14 @@ static char *key_block_get_data(Key *key, KeyBlock *actkb, KeyBlock *kb, char **
}
}
*freedata = NULL;
return kb->data;
*freedata = nullptr;
return static_cast<char *>(kb->data);
}
/* currently only the first value of 'ofs' may be set. */
static bool key_pointer_size(const Key *key, const int mode, int *poinsize, int *ofs, int *step)
{
if (key->from == NULL) {
if (key->from == nullptr) {
return false;
}
@ -834,7 +837,7 @@ static void cp_cu_key(Curve *cu,
Nurb *nu;
int a, step, a1, a2;
for (a = 0, nu = cu->nurb.first; nu; nu = nu->next, a += step) {
for (a = 0, nu = static_cast<Nurb *>(cu->nurb.first); nu; nu = nu->next, a += step) {
if (nu->bp) {
step = KEYELEM_ELEM_LEN_BPOINT * nu->pntsu * nu->pntsv;
@ -842,7 +845,7 @@ static void cp_cu_key(Curve *cu,
a2 = min_ii(a + step, end);
if (a1 < a2) {
cp_key(a1, a2, tot, out, key, actkb, kb, NULL, KEY_MODE_BPOINT);
cp_key(a1, a2, tot, out, key, actkb, kb, nullptr, KEY_MODE_BPOINT);
}
}
else if (nu->bezt) {
@ -853,7 +856,7 @@ static void cp_cu_key(Curve *cu,
a2 = min_ii(a + step, end);
if (a1 < a2) {
cp_key(a1, a2, tot, out, key, actkb, kb, NULL, KEY_MODE_BEZTRIPLE);
cp_key(a1, a2, tot, out, key, actkb, kb, nullptr, KEY_MODE_BEZTRIPLE);
}
}
else {
@ -896,11 +899,12 @@ static void key_evaluate_relative(const int start,
elemsize = key->elemsize * step;
/* step 1 init */
cp_key(start, end, tot, basispoin, key, actkb, key->refkey, NULL, mode);
cp_key(start, end, tot, basispoin, key, actkb, key->refkey, nullptr, mode);
/* step 2: do it */
for (kb = key->block.first, keyblock_index = 0; kb; kb = kb->next, keyblock_index++) {
for (kb = static_cast<KeyBlock *>(key->block.first), keyblock_index = 0; kb;
kb = kb->next, keyblock_index++) {
if (kb != key->refkey) {
float icuval = kb->curval;
@ -908,12 +912,12 @@ static void key_evaluate_relative(const int start,
if (!(kb->flag & KEYBLOCK_MUTE) && icuval != 0.0f && kb->totelem == tot) {
KeyBlock *refb;
float weight,
*weights = per_keyblock_weights ? per_keyblock_weights[keyblock_index] : NULL;
char *freefrom = NULL;
*weights = per_keyblock_weights ? per_keyblock_weights[keyblock_index] : nullptr;
char *freefrom = nullptr;
/* reference now can be any block */
refb = BLI_findlink(&key->block, kb->relative);
if (refb == NULL) {
refb = static_cast<KeyBlock *>(BLI_findlink(&key->block, kb->relative));
if (refb == nullptr) {
continue;
}
@ -922,7 +926,7 @@ static void key_evaluate_relative(const int start,
/* For meshes, use the original values instead of the bmesh values to
* maintain a constant offset. */
reffrom = refb->data;
reffrom = static_cast<char *>(refb->data);
poin += start * poinsize;
reffrom += key->elemsize * start; /* key elemsize yes! */
@ -1167,11 +1171,11 @@ static void do_key(const int start,
break;
case IPO_BEZTRIPLE:
flerp(KEYELEM_FLOAT_LEN_BEZTRIPLE,
(void *)poin,
(void *)k1,
(void *)k2,
(void *)k3,
(void *)k4,
(float *)poin,
(float *)k1,
(float *)k2,
(float *)k3,
(float *)k4,
t);
break;
default:
@ -1263,20 +1267,20 @@ static void do_key(const int start,
static float *get_weights_array(Object *ob, char *vgroup, WeightsArrayCache *cache)
{
const MDeformVert *dvert = NULL;
BMEditMesh *em = NULL;
const MDeformVert *dvert = nullptr;
BMEditMesh *em = nullptr;
BMIter iter;
BMVert *eve;
int totvert = 0, defgrp_index = 0;
/* no vgroup string set? */
if (vgroup[0] == 0) {
return NULL;
return nullptr;
}
/* gather dvert and totvert */
if (ob->type == OB_MESH) {
Mesh *me = ob->data;
Mesh *me = static_cast<Mesh *>(ob->data);
dvert = BKE_mesh_deform_verts(me);
totvert = me->totvert;
@ -1285,13 +1289,13 @@ static float *get_weights_array(Object *ob, char *vgroup, WeightsArrayCache *cac
}
}
else if (ob->type == OB_LATTICE) {
Lattice *lt = ob->data;
Lattice *lt = static_cast<Lattice *>(ob->data);
dvert = lt->dvert;
totvert = lt->pntsu * lt->pntsv * lt->pntsw;
}
if (dvert == NULL) {
return NULL;
if (dvert == nullptr) {
return nullptr;
}
/* find the group (weak loop-in-loop) */
@ -1300,10 +1304,10 @@ static float *get_weights_array(Object *ob, char *vgroup, WeightsArrayCache *cac
float *weights;
if (cache) {
if (cache->defgroup_weights == NULL) {
if (cache->defgroup_weights == nullptr) {
int num_defgroup = BKE_object_defgroup_count(ob);
cache->defgroup_weights = MEM_callocN(sizeof(*cache->defgroup_weights) * num_defgroup,
"cached defgroup weights");
cache->defgroup_weights = static_cast<float **>(MEM_callocN(
sizeof(*cache->defgroup_weights) * num_defgroup, "cached defgroup weights"));
cache->num_defgroup_weights = num_defgroup;
}
@ -1312,13 +1316,13 @@ static float *get_weights_array(Object *ob, char *vgroup, WeightsArrayCache *cac
}
}
weights = MEM_mallocN(totvert * sizeof(float), "weights");
weights = static_cast<float *>(MEM_mallocN(totvert * sizeof(float), "weights"));
if (em) {
int i;
const int cd_dvert_offset = CustomData_get_offset(&em->bm->vdata, CD_MDEFORMVERT);
BM_ITER_MESH_INDEX (eve, &iter, em->bm, BM_VERTS_OF_MESH, i) {
dvert = BM_ELEM_CD_GET_VOID_P(eve, cd_dvert_offset);
dvert = static_cast<const MDeformVert *>(BM_ELEM_CD_GET_VOID_P(eve, cd_dvert_offset));
weights[i] = BKE_defvert_find_weight(dvert, defgrp_index);
}
}
@ -1334,7 +1338,7 @@ static float *get_weights_array(Object *ob, char *vgroup, WeightsArrayCache *cac
return weights;
}
return NULL;
return nullptr;
}
static float **keyblock_get_per_block_weights(Object *ob, Key *key, WeightsArrayCache *cache)
@ -1343,10 +1347,10 @@ static float **keyblock_get_per_block_weights(Object *ob, Key *key, WeightsArray
float **per_keyblock_weights;
int keyblock_index;
per_keyblock_weights = MEM_mallocN(sizeof(*per_keyblock_weights) * key->totkey,
"per keyblock weights");
per_keyblock_weights = static_cast<float **>(
MEM_mallocN(sizeof(*per_keyblock_weights) * key->totkey, "per keyblock weights"));
for (keyblock = key->block.first, keyblock_index = 0; keyblock;
for (keyblock = static_cast<KeyBlock *>(key->block.first), keyblock_index = 0; keyblock;
keyblock = keyblock->next, keyblock_index++) {
per_keyblock_weights[keyblock_index] = get_weights_array(ob, keyblock->vgroup, cache);
}
@ -1369,7 +1373,7 @@ static void keyblock_free_per_block_weights(Key *key,
}
MEM_freeN(cache->defgroup_weights);
}
cache->defgroup_weights = NULL;
cache->defgroup_weights = nullptr;
}
else {
for (a = 0; a < key->totkey; a++) {
@ -1389,7 +1393,7 @@ static void do_mesh_key(Object *ob, Key *key, char *out, const int tot)
int flag = 0;
if (key->type == KEY_RELATIVE) {
WeightsArrayCache cache = {0, NULL};
WeightsArrayCache cache = {0, nullptr};
float **per_keyblock_weights;
per_keyblock_weights = keyblock_get_per_block_weights(ob, key, &cache);
key_evaluate_relative(
@ -1405,7 +1409,7 @@ static void do_mesh_key(Object *ob, Key *key, char *out, const int tot)
do_key(0, tot, tot, (char *)out, key, actkb, k, t, KEY_MODE_DUMMY);
}
else {
cp_key(0, tot, tot, (char *)out, key, actkb, k[2], NULL, KEY_MODE_DUMMY);
cp_key(0, tot, tot, (char *)out, key, actkb, k[2], nullptr, KEY_MODE_DUMMY);
}
}
}
@ -1416,7 +1420,7 @@ static void do_cu_key(
Nurb *nu;
int a, step;
for (a = 0, nu = cu->nurb.first; nu; nu = nu->next, a += step) {
for (a = 0, nu = static_cast<Nurb *>(cu->nurb.first); nu; nu = nu->next, a += step) {
if (nu->bp) {
step = KEYELEM_ELEM_LEN_BPOINT * nu->pntsu * nu->pntsv;
do_key(a, a + step, tot, out, key, actkb, k, t, KEY_MODE_BPOINT);
@ -1436,14 +1440,14 @@ static void do_rel_cu_key(Curve *cu, Key *key, KeyBlock *actkb, char *out, const
Nurb *nu;
int a, step;
for (a = 0, nu = cu->nurb.first; nu; nu = nu->next, a += step) {
for (a = 0, nu = static_cast<Nurb *>(cu->nurb.first); nu; nu = nu->next, a += step) {
if (nu->bp) {
step = KEYELEM_ELEM_LEN_BPOINT * nu->pntsu * nu->pntsv;
key_evaluate_relative(a, a + step, tot, out, key, actkb, NULL, KEY_MODE_BPOINT);
key_evaluate_relative(a, a + step, tot, out, key, actkb, nullptr, KEY_MODE_BPOINT);
}
else if (nu->bezt) {
step = KEYELEM_ELEM_LEN_BEZTRIPLE * nu->pntsu;
key_evaluate_relative(a, a + step, tot, out, key, actkb, NULL, KEY_MODE_BEZTRIPLE);
key_evaluate_relative(a, a + step, tot, out, key, actkb, nullptr, KEY_MODE_BEZTRIPLE);
}
else {
step = 0;
@ -1453,7 +1457,7 @@ static void do_rel_cu_key(Curve *cu, Key *key, KeyBlock *actkb, char *out, const
static void do_curve_key(Object *ob, Key *key, char *out, const int tot)
{
Curve *cu = ob->data;
Curve *cu = static_cast<Curve *>(ob->data);
KeyBlock *k[4], *actkb = BKE_keyblock_from_object(ob);
float t[4];
int flag = 0;
@ -1477,17 +1481,17 @@ static void do_curve_key(Object *ob, Key *key, char *out, const int tot)
static void do_latt_key(Object *ob, Key *key, char *out, const int tot)
{
Lattice *lt = ob->data;
Lattice *lt = static_cast<Lattice *>(ob->data);
KeyBlock *k[4], *actkb = BKE_keyblock_from_object(ob);
float t[4];
int flag;
if (key->type == KEY_RELATIVE) {
float **per_keyblock_weights;
per_keyblock_weights = keyblock_get_per_block_weights(ob, key, NULL);
per_keyblock_weights = keyblock_get_per_block_weights(ob, key, nullptr);
key_evaluate_relative(
0, tot, tot, (char *)out, key, actkb, per_keyblock_weights, KEY_MODE_DUMMY);
keyblock_free_per_block_weights(key, per_keyblock_weights, NULL);
keyblock_free_per_block_weights(key, per_keyblock_weights, nullptr);
}
else {
const float ctime_scaled = key->ctime / 100.0f;
@ -1498,7 +1502,7 @@ static void do_latt_key(Object *ob, Key *key, char *out, const int tot)
do_key(0, tot, tot, (char *)out, key, actkb, k, t, KEY_MODE_DUMMY);
}
else {
cp_key(0, tot, tot, (char *)out, key, actkb, k[2], NULL, KEY_MODE_DUMMY);
cp_key(0, tot, tot, (char *)out, key, actkb, k[2], nullptr, KEY_MODE_DUMMY);
}
}
@ -1521,25 +1525,25 @@ float *BKE_key_evaluate_object_ex(
char *out;
int tot = 0, size = 0;
if (key == NULL || BLI_listbase_is_empty(&key->block)) {
return NULL;
if (key == nullptr || BLI_listbase_is_empty(&key->block)) {
return nullptr;
}
/* compute size of output array */
if (ob->type == OB_MESH) {
Mesh *me = ob->data;
Mesh *me = static_cast<Mesh *>(ob->data);
tot = me->totvert;
size = tot * sizeof(float[KEYELEM_FLOAT_LEN_COORD]);
}
else if (ob->type == OB_LATTICE) {
Lattice *lt = ob->data;
Lattice *lt = static_cast<Lattice *>(ob->data);
tot = lt->pntsu * lt->pntsv * lt->pntsw;
size = tot * sizeof(float[KEYELEM_FLOAT_LEN_COORD]);
}
else if (ELEM(ob->type, OB_CURVES_LEGACY, OB_SURF)) {
Curve *cu = ob->data;
Curve *cu = static_cast<Curve *>(ob->data);
tot = BKE_keyblock_curve_element_count(&cu->nurb);
size = tot * sizeof(float[KEYELEM_ELEM_SIZE_CURVE]);
@ -1547,16 +1551,16 @@ float *BKE_key_evaluate_object_ex(
/* if nothing to interpolate, cancel */
if (tot == 0 || size == 0) {
return NULL;
return nullptr;
}
/* allocate array */
if (arr == NULL) {
out = MEM_callocN(size, "BKE_key_evaluate_object out");
if (arr == nullptr) {
out = static_cast<char *>(MEM_callocN(size, "BKE_key_evaluate_object out"));
}
else {
if (arr_size != size) {
return NULL;
return nullptr;
}
out = (char *)arr;
@ -1564,19 +1568,19 @@ float *BKE_key_evaluate_object_ex(
if (ob->shapeflag & OB_SHAPE_LOCK) {
/* shape locked, copy the locked shape instead of blending */
KeyBlock *kb = BLI_findlink(&key->block, ob->shapenr - 1);
KeyBlock *kb = static_cast<KeyBlock *>(BLI_findlink(&key->block, ob->shapenr - 1));
if (kb && (kb->flag & KEYBLOCK_MUTE)) {
kb = key->refkey;
}
if (kb == NULL) {
kb = key->block.first;
if (kb == nullptr) {
kb = static_cast<KeyBlock *>(key->block.first);
ob->shapenr = 1;
}
if (OB_TYPE_SUPPORT_VGROUP(ob->type)) {
float *weights = get_weights_array(ob, kb->vgroup, NULL);
float *weights = get_weights_array(ob, kb->vgroup, nullptr);
cp_key(0, tot, tot, out, key, actkb, kb, weights, 0);
@ -1585,7 +1589,7 @@ float *BKE_key_evaluate_object_ex(
}
}
else if (ELEM(ob->type, OB_CURVES_LEGACY, OB_SURF)) {
cp_cu_key(ob->data, key, actkb, kb, 0, tot, out, tot);
cp_cu_key(static_cast<Curve *>(ob->data), key, actkb, kb, 0, tot, out, tot);
}
}
else {
@ -1603,7 +1607,7 @@ float *BKE_key_evaluate_object_ex(
}
}
if (obdata != NULL) {
if (obdata != nullptr) {
switch (GS(obdata->name)) {
case ID_ME: {
Mesh *mesh = (Mesh *)obdata;
@ -1637,14 +1641,15 @@ float *BKE_key_evaluate_object_ex(
float *BKE_key_evaluate_object(Object *ob, int *r_totelem)
{
return BKE_key_evaluate_object_ex(ob, r_totelem, NULL, 0, NULL);
return BKE_key_evaluate_object_ex(ob, r_totelem, nullptr, 0, nullptr);
}
int BKE_keyblock_element_count_from_shape(const Key *key, const int shape_index)
{
int result = 0;
int index = 0;
for (const KeyBlock *kb = key->block.first; kb; kb = kb->next, index++) {
for (const KeyBlock *kb = static_cast<const KeyBlock *>(key->block.first); kb;
kb = kb->next, index++) {
if (ELEM(shape_index, -1, index)) {
result += kb->totelem;
}
@ -1678,7 +1683,8 @@ void BKE_keyblock_data_get_from_shape(const Key *key, float (*arr)[3], const int
{
uint8_t *elements = (uint8_t *)arr;
int index = 0;
for (const KeyBlock *kb = key->block.first; kb; kb = kb->next, index++) {
for (const KeyBlock *kb = static_cast<const KeyBlock *>(key->block.first); kb;
kb = kb->next, index++) {
if (ELEM(shape_index, -1, index)) {
const int block_elem_len = kb->totelem * key->elemsize;
memcpy(elements, kb->data, block_elem_len);
@ -1705,7 +1711,7 @@ void BKE_keyblock_data_set_with_mat4(Key *key,
const float(*elements)[3] = coords;
int index = 0;
for (KeyBlock *kb = key->block.first; kb; kb = kb->next, index++) {
for (KeyBlock *kb = static_cast<KeyBlock *>(key->block.first); kb; kb = kb->next, index++) {
if (ELEM(shape_index, -1, index)) {
const int block_elem_len = kb->totelem;
float(*block_data)[3] = (float(*)[3])kb->data;
@ -1722,10 +1728,10 @@ void BKE_keyblock_data_set_with_mat4(Key *key,
void BKE_keyblock_curve_data_set_with_mat4(
Key *key, const ListBase *nurb, const int shape_index, const void *data, const float mat[4][4])
{
const uint8_t *elements = data;
const uint8_t *elements = static_cast<const uint8_t *>(data);
int index = 0;
for (KeyBlock *kb = key->block.first; kb; kb = kb->next, index++) {
for (KeyBlock *kb = static_cast<KeyBlock *>(key->block.first); kb; kb = kb->next, index++) {
if (ELEM(shape_index, -1, index)) {
const int block_elem_size = kb->totelem * key->elemsize;
BKE_keyblock_curve_data_transform(nurb, mat, elements, kb->data);
@ -1736,9 +1742,9 @@ void BKE_keyblock_curve_data_set_with_mat4(
void BKE_keyblock_data_set(Key *key, const int shape_index, const void *data)
{
const uint8_t *elements = data;
const uint8_t *elements = static_cast<const uint8_t *>(data);
int index = 0;
for (KeyBlock *kb = key->block.first; kb; kb = kb->next, index++) {
for (KeyBlock *kb = static_cast<KeyBlock *>(key->block.first); kb; kb = kb->next, index++) {
if (ELEM(shape_index, -1, index)) {
const int block_elem_size = kb->totelem * key->elemsize;
memcpy(kb->data, elements, block_elem_size);
@ -1770,7 +1776,7 @@ Key **BKE_key_from_id_p(ID *id)
}
case ID_CU_LEGACY: {
Curve *cu = (Curve *)id;
if (cu->vfont == NULL) {
if (cu->vfont == nullptr) {
return &cu->key;
}
break;
@ -1783,7 +1789,7 @@ Key **BKE_key_from_id_p(ID *id)
break;
}
return NULL;
return nullptr;
}
Key *BKE_key_from_id(ID *id)
@ -1794,16 +1800,16 @@ Key *BKE_key_from_id(ID *id)
return *key_p;
}
return NULL;
return nullptr;
}
Key **BKE_key_from_object_p(Object *ob)
{
if (ob == NULL || ob->data == NULL) {
return NULL;
if (ob == nullptr || ob->data == nullptr) {
return nullptr;
}
return BKE_key_from_id_p(ob->data);
return BKE_key_from_id_p(static_cast<ID *>(ob->data));
}
Key *BKE_key_from_object(Object *ob)
@ -1814,7 +1820,7 @@ Key *BKE_key_from_object(Object *ob)
return *key_p;
}
return NULL;
return nullptr;
}
KeyBlock *BKE_keyblock_add(Key *key, const char *name)
@ -1823,12 +1829,12 @@ KeyBlock *BKE_keyblock_add(Key *key, const char *name)
float curpos = -0.1;
int tot;
kb = key->block.last;
kb = static_cast<KeyBlock *>(key->block.last);
if (kb) {
curpos = kb->pos;
}
kb = MEM_callocN(sizeof(KeyBlock), "Keyblock");
kb = MEM_cnew<KeyBlock>("Keyblock");
BLI_addtail(&key->block, kb);
kb->type = KEY_LINEAR;
@ -1877,7 +1883,7 @@ KeyBlock *BKE_keyblock_add_ctime(Key *key, const char *name, const bool do_force
*/
if (!do_force && (key->type != KEY_RELATIVE)) {
KeyBlock *it_kb;
for (it_kb = key->block.first; it_kb; it_kb = it_kb->next) {
for (it_kb = static_cast<KeyBlock *>(key->block.first); it_kb; it_kb = it_kb->next) {
/* Use epsilon to avoid floating point precision issues.
* 1e-3 because the position is stored as frame * 1e-2. */
if (compare_ff(it_kb->pos, cpos, 1e-3f)) {
@ -1898,11 +1904,11 @@ KeyBlock *BKE_keyblock_from_object(Object *ob)
Key *key = BKE_key_from_object(ob);
if (key) {
KeyBlock *kb = BLI_findlink(&key->block, ob->shapenr - 1);
KeyBlock *kb = static_cast<KeyBlock *>(BLI_findlink(&key->block, ob->shapenr - 1));
return kb;
}
return NULL;
return nullptr;
}
KeyBlock *BKE_keyblock_from_object_reference(Object *ob)
@ -1913,13 +1919,13 @@ KeyBlock *BKE_keyblock_from_object_reference(Object *ob)
return key->refkey;
}
return NULL;
return nullptr;
}
KeyBlock *BKE_keyblock_from_key(Key *key, int index)
{
if (key) {
KeyBlock *kb = key->block.first;
KeyBlock *kb = static_cast<KeyBlock *>(key->block.first);
for (int i = 1; i < key->totkey; i++) {
kb = kb->next;
@ -1930,12 +1936,12 @@ KeyBlock *BKE_keyblock_from_key(Key *key, int index)
}
}
return NULL;
return nullptr;
}
KeyBlock *BKE_keyblock_find_name(Key *key, const char name[])
{
return BLI_findstring(&key->block, name, offsetof(KeyBlock, name));
return static_cast<KeyBlock *>(BLI_findstring(&key->block, name, offsetof(KeyBlock, name)));
}
KeyBlock *BKE_keyblock_find_uid(Key *key, const int uid)
@ -1945,7 +1951,7 @@ KeyBlock *BKE_keyblock_find_uid(Key *key, const int uid)
return kb;
}
}
return NULL;
return nullptr;
}
void BKE_keyblock_copy_settings(KeyBlock *kb_dst, const KeyBlock *kb_src)
@ -1965,8 +1971,8 @@ char *BKE_keyblock_curval_rnapath_get(const Key *key, const KeyBlock *kb)
PropertyRNA *prop;
/* sanity checks */
if (ELEM(NULL, key, kb)) {
return NULL;
if (ELEM(nullptr, key, kb)) {
return nullptr;
}
/* create the RNA pointer */
@ -1996,7 +2002,7 @@ void BKE_keyblock_update_from_lattice(const Lattice *lt, KeyBlock *kb)
}
bp = lt->def;
fp = kb->data;
fp = static_cast<float(*)[3]>(kb->data);
for (a = 0; a < kb->totelem; a++, fp++, bp++) {
copy_v3_v3(*fp, bp->vec);
}
@ -2031,7 +2037,7 @@ static void keyblock_data_convert_to_lattice(const float (*fp)[3],
void BKE_keyblock_convert_to_lattice(const KeyBlock *kb, Lattice *lt)
{
BPoint *bp = lt->def;
const float(*fp)[3] = kb->data;
const float(*fp)[3] = static_cast<const float(*)[3]>(kb->data);
const int tot = min_ii(kb->totelem, lt->pntsu * lt->pntsv * lt->pntsw);
keyblock_data_convert_to_lattice(fp, bp, tot);
@ -2044,7 +2050,7 @@ int BKE_keyblock_curve_element_count(const ListBase *nurb)
const Nurb *nu;
int tot = 0;
nu = nurb->first;
nu = static_cast<const Nurb *>(nurb->first);
while (nu) {
if (nu->bezt) {
tot += KEYELEM_ELEM_LEN_BEZTRIPLE * nu->pntsu;
@ -2074,8 +2080,8 @@ void BKE_keyblock_update_from_curve(const Curve *UNUSED(cu), KeyBlock *kb, const
return;
}
fp = kb->data;
for (nu = nurb->first; nu; nu = nu->next) {
fp = static_cast<float *>(kb->data);
for (nu = static_cast<Nurb *>(nurb->first); nu; nu = nu->next) {
if (nu->bezt) {
for (a = nu->pntsu, bezt = nu->bezt; a; a--, bezt++) {
for (int i = 0; i < 3; i++) {
@ -2102,9 +2108,9 @@ void BKE_keyblock_curve_data_transform(const ListBase *nurb,
const void *src_data,
void *dst_data)
{
const float *src = src_data;
float *dst = dst_data;
for (Nurb *nu = nurb->first; nu; nu = nu->next) {
const float *src = static_cast<const float *>(src_data);
float *dst = static_cast<float *>(dst_data);
for (Nurb *nu = static_cast<Nurb *>(nurb->first); nu; nu = nu->next) {
if (nu->bezt) {
for (int a = nu->pntsu; a; a--) {
for (int i = 0; i < 3; i++) {
@ -2148,8 +2154,8 @@ void BKE_keyblock_convert_from_curve(const Curve *cu, KeyBlock *kb, const ListBa
static void keyblock_data_convert_to_curve(const float *fp, ListBase *nurb, int totpoint)
{
for (Nurb *nu = nurb->first; nu && totpoint > 0; nu = nu->next) {
if (nu->bezt != NULL) {
for (Nurb *nu = static_cast<Nurb *>(nurb->first); nu && totpoint > 0; nu = nu->next) {
if (nu->bezt != nullptr) {
BezTriple *bezt = nu->bezt;
for (int i = nu->pntsu; i && (totpoint -= KEYELEM_ELEM_LEN_BEZTRIPLE) >= 0;
i--, bezt++, fp += KEYELEM_FLOAT_LEN_BEZTRIPLE) {
@ -2174,7 +2180,7 @@ static void keyblock_data_convert_to_curve(const float *fp, ListBase *nurb, int
void BKE_keyblock_convert_to_curve(KeyBlock *kb, Curve *UNUSED(cu), ListBase *nurb)
{
const float *fp = kb->data;
const float *fp = static_cast<const float *>(kb->data);
const int tot = min_ii(kb->totelem, BKE_keyblock_curve_element_count(nurb));
keyblock_data_convert_to_curve(fp, nurb, tot);
@ -2195,7 +2201,7 @@ void BKE_keyblock_update_from_mesh(const Mesh *me, KeyBlock *kb)
}
const MVert *mvert = BKE_mesh_verts(me);
fp = kb->data;
fp = static_cast<float(*)[3]>(kb->data);
for (a = 0; a < tot; a++, fp++, mvert++) {
copy_v3_v3(*fp, mvert->co);
}
@ -2226,7 +2232,7 @@ static void keyblock_data_convert_to_mesh(const float (*fp)[3], MVert *mvert, co
void BKE_keyblock_convert_to_mesh(const KeyBlock *kb, MVert *mvert, const int totvert)
{
const float(*fp)[3] = kb->data;
const float(*fp)[3] = static_cast<const float(*)[3]>(kb->data);
const int tot = min_ii(kb->totelem, totvert);
keyblock_data_convert_to_mesh(fp, mvert, tot);
@ -2238,31 +2244,33 @@ void BKE_keyblock_mesh_calc_normals(const KeyBlock *kb,
float (*r_polynors)[3],
float (*r_loopnors)[3])
{
if (r_vertnors == NULL && r_polynors == NULL && r_loopnors == NULL) {
if (r_vertnors == nullptr && r_polynors == nullptr && r_loopnors == nullptr) {
return;
}
MVert *verts = MEM_dupallocN(BKE_mesh_verts(mesh));
MVert *verts = static_cast<MVert *>(MEM_dupallocN(BKE_mesh_verts(mesh)));
BKE_keyblock_convert_to_mesh(kb, verts, mesh->totvert);
const MEdge *edges = BKE_mesh_edges(mesh);
const MPoly *polys = BKE_mesh_polys(mesh);
const MLoop *loops = BKE_mesh_loops(mesh);
const bool loop_normals_needed = r_loopnors != NULL;
const bool vert_normals_needed = r_vertnors != NULL || loop_normals_needed;
const bool poly_normals_needed = r_polynors != NULL || vert_normals_needed ||
const bool loop_normals_needed = r_loopnors != nullptr;
const bool vert_normals_needed = r_vertnors != nullptr || loop_normals_needed;
const bool poly_normals_needed = r_polynors != nullptr || vert_normals_needed ||
loop_normals_needed;
float(*vert_normals)[3] = r_vertnors;
float(*poly_normals)[3] = r_polynors;
bool free_vert_normals = false;
bool free_poly_normals = false;
if (vert_normals_needed && r_vertnors == NULL) {
vert_normals = MEM_malloc_arrayN(mesh->totvert, sizeof(float[3]), __func__);
if (vert_normals_needed && r_vertnors == nullptr) {
vert_normals = static_cast<float(*)[3]>(
MEM_malloc_arrayN(mesh->totvert, sizeof(float[3]), __func__));
free_vert_normals = true;
}
if (poly_normals_needed && r_polynors == NULL) {
poly_normals = MEM_malloc_arrayN(mesh->totpoly, sizeof(float[3]), __func__);
if (poly_normals_needed && r_polynors == nullptr) {
poly_normals = static_cast<float(*)[3]>(
MEM_malloc_arrayN(mesh->totpoly, sizeof(float[3]), __func__));
free_poly_normals = true;
}
@ -2281,7 +2289,8 @@ void BKE_keyblock_mesh_calc_normals(const KeyBlock *kb,
vert_normals);
}
if (loop_normals_needed) {
short(*clnors)[2] = CustomData_get_layer(&mesh->ldata, CD_CUSTOMLOOPNORMAL); /* May be NULL. */
short(*clnors)[2] = static_cast<short(*)[2]>(
CustomData_get_layer(&mesh->ldata, CD_CUSTOMLOOPNORMAL)); /* May be nullptr. */
BKE_mesh_normals_loop_split(verts,
vert_normals,
mesh->totvert,
@ -2295,8 +2304,8 @@ void BKE_keyblock_mesh_calc_normals(const KeyBlock *kb,
mesh->totpoly,
(mesh->flag & ME_AUTOSMOOTH) != 0,
mesh->smoothresh,
NULL,
NULL,
nullptr,
nullptr,
clnors);
}
@ -2314,20 +2323,20 @@ void BKE_keyblock_mesh_calc_normals(const KeyBlock *kb,
void BKE_keyblock_update_from_vertcos(const Object *ob, KeyBlock *kb, const float (*vertCos)[3])
{
const float(*co)[3] = vertCos;
float *fp = kb->data;
float *fp = static_cast<float *>(kb->data);
int tot, a;
#ifndef NDEBUG
if (ob->type == OB_LATTICE) {
Lattice *lt = ob->data;
Lattice *lt = static_cast<Lattice *>(ob->data);
BLI_assert((lt->pntsu * lt->pntsv * lt->pntsw) == kb->totelem);
}
else if (ELEM(ob->type, OB_CURVES_LEGACY, OB_SURF)) {
Curve *cu = ob->data;
Curve *cu = static_cast<Curve *>(ob->data);
BLI_assert(BKE_keyblock_curve_element_count(&cu->nurb) == kb->totelem);
}
else if (ob->type == OB_MESH) {
Mesh *me = ob->data;
Mesh *me = static_cast<Mesh *>(ob->data);
BLI_assert(me->totvert == kb->totelem);
}
else {
@ -2352,7 +2361,7 @@ void BKE_keyblock_update_from_vertcos(const Object *ob, KeyBlock *kb, const floa
const BezTriple *bezt;
const BPoint *bp;
for (nu = cu->nurb.first; nu; nu = nu->next) {
for (nu = static_cast<const Nurb *>(cu->nurb.first); nu; nu = nu->next) {
if (nu->bezt) {
for (a = nu->pntsu, bezt = nu->bezt; a; a--, bezt++) {
for (int i = 0; i < 3; i++, co++) {
@ -2407,7 +2416,7 @@ void BKE_keyblock_convert_from_vertcos(const Object *ob, KeyBlock *kb, const flo
float (*BKE_keyblock_convert_to_vertcos(const Object *ob, const KeyBlock *kb))[3]
{
float(*vertCos)[3], (*co)[3];
const float *fp = kb->data;
const float *fp = static_cast<const float *>(kb->data);
int tot = 0, a;
/* Count of vertex coords in array */
@ -2425,10 +2434,10 @@ float (*BKE_keyblock_convert_to_vertcos(const Object *ob, const KeyBlock *kb))[3
}
if (tot == 0) {
return NULL;
return nullptr;
}
co = vertCos = MEM_mallocN(tot * sizeof(*vertCos), __func__);
co = vertCos = static_cast<float(*)[3]>(MEM_mallocN(tot * sizeof(*vertCos), __func__));
/* Copy coords to array */
if (ELEM(ob->type, OB_MESH, OB_LATTICE)) {
@ -2442,7 +2451,7 @@ float (*BKE_keyblock_convert_to_vertcos(const Object *ob, const KeyBlock *kb))[3
const BezTriple *bezt;
const BPoint *bp;
for (nu = cu->nurb.first; nu; nu = nu->next) {
for (nu = static_cast<Nurb *>(cu->nurb.first); nu; nu = nu->next) {
if (nu->bezt) {
for (a = nu->pntsu, bezt = nu->bezt; a; a--, bezt++) {
for (int i = 0; i < 3; i++, co++) {
@ -2468,7 +2477,7 @@ float (*BKE_keyblock_convert_to_vertcos(const Object *ob, const KeyBlock *kb))[3
void BKE_keyblock_update_from_offset(const Object *ob, KeyBlock *kb, const float (*ofs)[3])
{
int a;
float *fp = kb->data;
float *fp = static_cast<float *>(kb->data);
if (ELEM(ob->type, OB_MESH, OB_LATTICE)) {
for (a = 0; a < kb->totelem; a++, fp += 3, ofs++) {
@ -2481,7 +2490,7 @@ void BKE_keyblock_update_from_offset(const Object *ob, KeyBlock *kb, const float
const BezTriple *bezt;
const BPoint *bp;
for (nu = cu->nurb.first; nu; nu = nu->next) {
for (nu = static_cast<const Nurb *>(cu->nurb.first); nu; nu = nu->next) {
if (nu->bezt) {
for (a = nu->pntsu, bezt = nu->bezt; a; a--, bezt++) {
for (int i = 0; i < 3; i++, ofs++) {
@ -2527,7 +2536,9 @@ bool BKE_keyblock_move(Object *ob, int org_index, int new_index)
/* We swap 'org' element with its previous/next neighbor (depending on direction of the move)
* repeatedly, until we reach final position.
* This allows us to only loop on the list once! */
for (kb = (rev ? key->block.last : key->block.first), i = (rev ? totkey - 1 : 0); kb;
for (kb = static_cast<KeyBlock *>(rev ? key->block.last : key->block.first),
i = (rev ? totkey - 1 : 0);
kb;
kb = (rev ? kb->prev : kb->next), rev ? i-- : i++) {
if (i == org_index) {
in_range = true; /* Start list items swapping... */
@ -2575,7 +2586,7 @@ bool BKE_keyblock_move(Object *ob, int org_index, int new_index)
}
/* First key is always refkey, matches interface and BKE_key_sort */
key->refkey = key->block.first;
key->refkey = static_cast<KeyBlock *>(key->block.first);
return true;
}
@ -2586,7 +2597,7 @@ bool BKE_keyblock_is_basis(const Key *key, const int index)
int i;
if (key->type == KEY_RELATIVE) {
for (i = 0, kb = key->block.first; kb; i++, kb = kb->next) {
for (i = 0, kb = static_cast<const KeyBlock *>(key->block.first); kb; i++, kb = kb->next) {
if ((i != index) && (kb->relative == index)) {
return true;
}

View File

@ -42,15 +42,16 @@ Mesh *BKE_mesh_mirror_bisect_on_mirror_plane_for_modifier(MirrorModifierData *mm
BMIter viter;
BMVert *v, *v_next;
bm = BKE_mesh_to_bmesh_ex(mesh,
&(struct BMeshCreateParams){0},
&(struct BMeshFromMeshParams){
.calc_face_normal = true,
.calc_vert_normal = true,
.cd_mask_extra = {.vmask = CD_MASK_ORIGINDEX,
.emask = CD_MASK_ORIGINDEX,
.pmask = CD_MASK_ORIGINDEX},
});
BMeshCreateParams bmesh_create_params{0};
BMeshFromMeshParams bmesh_from_mesh_params{};
bmesh_from_mesh_params.calc_face_normal = true;
bmesh_from_mesh_params.calc_vert_normal = true;
bmesh_from_mesh_params.cd_mask_extra.vmask = CD_MASK_ORIGINDEX;
bmesh_from_mesh_params.cd_mask_extra.emask = CD_MASK_ORIGINDEX;
bmesh_from_mesh_params.cd_mask_extra.pmask = CD_MASK_ORIGINDEX;
bm = BKE_mesh_to_bmesh_ex(mesh, &bmesh_create_params, &bmesh_from_mesh_params);
/* Define bisecting plane (aka mirror plane). */
float plane[4];
@ -76,7 +77,7 @@ Mesh *BKE_mesh_mirror_bisect_on_mirror_plane_for_modifier(MirrorModifierData *mm
}
}
result = BKE_mesh_from_bmesh_for_eval_nomain(bm, NULL, mesh);
result = BKE_mesh_from_bmesh_for_eval_nomain(bm, nullptr, mesh);
BM_mesh_free(bm);
return result;
@ -87,18 +88,15 @@ void BKE_mesh_mirror_apply_mirror_on_axis(struct Main *bmain,
const int axis,
const float dist)
{
BMesh *bm = BKE_mesh_to_bmesh_ex(mesh,
&(struct BMeshCreateParams){
.use_toolflags = 1,
},
&(struct BMeshFromMeshParams){
.calc_face_normal = true,
.calc_vert_normal = true,
.cd_mask_extra =
{
.vmask = CD_MASK_SHAPEKEY,
},
});
BMeshCreateParams bmesh_create_params{};
bmesh_create_params.use_toolflags = 1;
BMeshFromMeshParams bmesh_from_mesh_params{};
bmesh_from_mesh_params.calc_face_normal = true;
bmesh_from_mesh_params.calc_vert_normal = true;
bmesh_from_mesh_params.cd_mask_extra.vmask = CD_MASK_SHAPEKEY;
BMesh *bm = BKE_mesh_to_bmesh_ex(mesh, &bmesh_create_params, &bmesh_from_mesh_params);
BMO_op_callf(bm,
(BMO_FLAG_DEFAULTS & ~BMO_FLAG_RESPECT_HIDE),
"symmetrize input=%avef direction=%i dist=%f use_shapekey=%b",
@ -106,13 +104,10 @@ void BKE_mesh_mirror_apply_mirror_on_axis(struct Main *bmain,
dist,
true);
BM_mesh_bm_to_me(bmain,
bm,
mesh,
(&(struct BMeshToMeshParams){
.calc_object_remap = true,
BMeshToMeshParams bmesh_to_mesh_params{};
bmesh_to_mesh_params.calc_object_remap = true;
}));
BM_mesh_bm_to_me(bmain, bm, mesh, &bmesh_to_mesh_params);
BM_mesh_free(bm);
}
@ -139,14 +134,14 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
float plane_co[3], plane_no[3];
int i;
int a, totshape;
int *vtargetmap = NULL, *vtmap_a = NULL, *vtmap_b = NULL;
int *vtargetmap = nullptr, *vtmap_a = nullptr, *vtmap_b = nullptr;
/* mtx is the mirror transformation */
unit_m4(mtx);
mtx[axis][axis] = -1.0f;
Object *mirror_ob = mmd->mirror_ob;
if (mirror_ob != NULL) {
if (mirror_ob != nullptr) {
float tmp[4][4];
float itmp[4][4];
@ -187,7 +182,7 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
negate_v3_v3(plane_no, mtx[axis]);
}
Mesh *mesh_bisect = NULL;
Mesh *mesh_bisect = nullptr;
if (do_bisect) {
mesh_bisect = BKE_mesh_mirror_bisect_on_mirror_plane_for_modifier(
mmd, mesh, axis, plane_co, plane_no);
@ -230,7 +225,8 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
if (do_vtargetmap) {
/* second half is filled with -1 */
vtargetmap = MEM_malloc_arrayN(maxVerts, sizeof(int[2]), "MOD_mirror tarmap");
vtargetmap = static_cast<int *>(
MEM_malloc_arrayN(maxVerts, sizeof(int[2]), "MOD_mirror tarmap"));
vtmap_a = vtargetmap;
vtmap_b = vtargetmap + maxVerts;
@ -297,7 +293,8 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
/* handle shape keys */
totshape = CustomData_number_of_layers(&result->vdata, CD_SHAPEKEY);
for (a = 0; a < totshape; a++) {
float(*cos)[3] = CustomData_get_layer_n(&result->vdata, CD_SHAPEKEY, a);
float(*cos)[3] = static_cast<float(*)[3]>(
CustomData_get_layer_n(&result->vdata, CD_SHAPEKEY, a));
for (i = maxVerts; i < result->totvert; i++) {
mul_m4_v3(mtx, cos[i]);
}
@ -359,7 +356,8 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
const int totuv = CustomData_number_of_layers(&result->ldata, CD_MLOOPUV);
for (a = 0; a < totuv; a++) {
MLoopUV *dmloopuv = CustomData_get_layer_n(&result->ldata, CD_MLOOPUV, a);
MLoopUV *dmloopuv = static_cast<MLoopUV *>(
CustomData_get_layer_n(&result->ldata, CD_MLOOPUV, a));
int j = maxLoops;
dmloopuv += j; /* second set of loops only */
for (; j-- > 0; dmloopuv++) {
@ -392,10 +390,11 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
CustomData_has_layer(&result->ldata, CD_CUSTOMLOOPNORMAL)) {
const int totloop = result->totloop;
const int totpoly = result->totpoly;
float(*loop_normals)[3] = MEM_calloc_arrayN((size_t)totloop, sizeof(*loop_normals), __func__);
float(*loop_normals)[3] = static_cast<float(*)[3]>(
MEM_calloc_arrayN((size_t)totloop, sizeof(*loop_normals), __func__));
CustomData *ldata = &result->ldata;
short(*clnors)[2] = CustomData_get_layer(ldata, CD_CUSTOMLOOPNORMAL);
MLoopNorSpaceArray lnors_spacearr = {NULL};
short(*clnors)[2] = static_cast<short(*)[2]>(CustomData_get_layer(ldata, CD_CUSTOMLOOPNORMAL));
MLoopNorSpaceArray lnors_spacearr = {nullptr};
/* The transform matrix of a normal must be
* the transpose of inverse of transform matrix of the geometry... */
@ -418,7 +417,7 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
totpoly,
true,
mesh->smoothresh,
NULL,
nullptr,
&lnors_spacearr,
clnors);
@ -448,7 +447,7 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
/* handle vgroup stuff */
if ((mmd->flag & MOD_MIR_VGROUP) && CustomData_has_layer(&result->vdata, CD_MDEFORMVERT)) {
MDeformVert *dvert = BKE_mesh_deform_verts_for_write(result) + maxVerts;
int *flip_map = NULL, flip_map_len = 0;
int *flip_map = nullptr, flip_map_len = 0;
flip_map = BKE_object_defgroup_flip_map(ob, false, &flip_map_len);
@ -480,8 +479,8 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
MEM_freeN(vtargetmap);
}
if (mesh_bisect != NULL) {
BKE_id_free(NULL, mesh_bisect);
if (mesh_bisect != nullptr) {
BKE_id_free(nullptr, mesh_bisect);
}
return result;

View File

@ -679,7 +679,7 @@ void ntreeBlendReadData(BlendDataReader *reader, ID *owner_id, bNodeTree *ntree)
/* Create the `nodes_by_id` cache eagerly so it can be expected to be valid. Because
* we create it here we also have to check for zero identifiers from previous versions. */
if (ntree->runtime->nodes_by_id.contains_as(node->identifier)) {
if (node->identifier == 0 || ntree->runtime->nodes_by_id.contains_as(node->identifier)) {
nodeUniqueID(ntree, node);
}
else {
@ -2192,7 +2192,7 @@ void nodeUniqueID(bNodeTree *ntree, bNode *node)
/* In the unlikely case that the random ID doesn't match, choose a new one until it does. */
int32_t new_id = id_rng.get_int32();
while (ntree->runtime->nodes_by_id.contains_as(new_id)) {
while (ntree->runtime->nodes_by_id.contains_as(new_id) || new_id <= 0) {
new_id = id_rng.get_int32();
}

View File

@ -1013,7 +1013,7 @@ class NodeTreeMainUpdater {
/* Check the uniqueness of node identifiers. */
Set<int32_t> node_identifiers;
LISTBASE_FOREACH (bNode *, node, &ntree.nodes) {
BLI_assert(node->identifier >= 0);
BLI_assert(node->identifier > 0);
node_identifiers.add_new(node->identifier);
}
#endif

View File

@ -176,8 +176,7 @@ static int partition_indices_faces(int *prim_indices,
int axis,
float mid,
BBC *prim_bbc,
const MLoopTri *looptri,
const MPoly *mpoly)
const MLoopTri *looptri)
{
for (int i = lo; i < hi; i++) {
prim_scratch[i - lo] = prim_indices[i];
@ -586,8 +585,7 @@ static void build_sub(PBVH *pbvh,
axis,
(cb->bmax[axis] + cb->bmin[axis]) * 0.5f,
prim_bbc,
pbvh->looptri,
pbvh->mpoly);
pbvh->looptri);
}
else {
end = partition_indices_grids(pbvh->prim_indices,

View File

@ -0,0 +1,146 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
#include <atomic>
#include "BLI_array.hh"
namespace blender {
/**
* Same as `DisjointSet` but is thread safe (at slightly higher cost for the single threaded case).
*
* The implementation is based on the following paper:
* "Wait-free Parallel Algorithms for the Union-Find Problem"
* by Richard J. Anderson and Heather Woll.
*
* It's also inspired by this implementation: https://github.com/wjakob/dset.
*/
class AtomicDisjointSet {
private:
/* Can generally used relaxed memory order with this algorithm. */
static constexpr auto relaxed = std::memory_order_relaxed;
struct Item {
int parent;
int rank;
};
/**
* An #Item per element. It's important that the entire item is in a single atomic, so that it
* can be updated atomically. */
mutable Array<std::atomic<Item>> items_;
public:
/**
* Create a new disjoing set with the given set. Initially, every element is in a separate set.
*/
AtomicDisjointSet(const int size);
/**
* Join the sets containing elements x and y. Nothing happens when they were in the same set
* before.
*/
void join(int x, int y)
{
while (true) {
x = this->find_root(x);
y = this->find_root(y);
if (x == y) {
/* They are in the same set already. */
return;
}
Item x_item = items_[x].load(relaxed);
Item y_item = items_[y].load(relaxed);
if (
/* Implement union by rank heuristic. */
x_item.rank > y_item.rank
/* If the rank is the same, make a consistent decision. */
|| (x_item.rank == y_item.rank && x < y)) {
std::swap(x_item, y_item);
std::swap(x, y);
}
/* Update parent of item x. */
const Item x_item_new{y, x_item.rank};
if (!items_[x].compare_exchange_strong(x_item, x_item_new, relaxed)) {
/* Another thread has updated item x, start again. */
continue;
}
if (x_item.rank == y_item.rank) {
/* Increase rank of item y. This may fail when another thread has updated item y in the
* meantime. That may lead to worse behavior with the union by rank heurist, but seems to
* be ok in practice. */
const Item y_item_new{y, y_item.rank + 1};
items_[y].compare_exchange_weak(y_item, y_item_new, relaxed);
}
}
}
/**
* Return true when x and y are in the same set.
*/
bool in_same_set(int x, int y) const
{
while (true) {
x = this->find_root(x);
y = this->find_root(y);
if (x == y) {
return true;
}
if (items_[x].load(relaxed).parent == x) {
return false;
}
}
}
/**
* Find the element that represents the set containing x currently.
*/
int find_root(int x) const
{
while (true) {
const Item item = items_[x].load(relaxed);
if (x == item.parent) {
return x;
}
const int new_parent = items_[item.parent].load(relaxed).parent;
if (item.parent != new_parent) {
/* This halves the path for faster future lookups. That fail but that does not change
* correctness. */
Item expected = item;
const Item desired{new_parent, item.rank};
items_[x].compare_exchange_weak(expected, desired, relaxed);
}
x = new_parent;
}
}
/**
* True when x represents a set.
*/
bool is_root(const int x) const
{
const Item item = items_[x].load(relaxed);
return item.parent == x;
}
/**
* Get an identifier for each id. This is deterministic and does not depend on the order of
* joins. The ids are ordered by their first occurence. Consequently, `result[0]` is always zero
* (unless there are no elements).
*/
void calc_reduced_ids(MutableSpan<int> result) const;
/**
* Count the number of disjoint sets.
*/
int count_sets() const;
};
} // namespace blender

View File

@ -88,9 +88,9 @@ LinkNode *BLI_linklist_sort_r(LinkNode *list,
void *thunk) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(2);
#define BLI_linklist_prepend_alloca(listp, ptr) \
BLI_linklist_prepend_nlink(listp, ptr, alloca(sizeof(LinkNode)))
BLI_linklist_prepend_nlink(listp, ptr, (LinkNode *)alloca(sizeof(LinkNode)))
#define BLI_linklist_append_alloca(list_pair, ptr) \
BLI_linklist_append_nlink(list_pair, ptr, alloca(sizeof(LinkNode)))
BLI_linklist_append_nlink(list_pair, ptr, (LinkNode *)alloca(sizeof(LinkNode)))
#ifdef __cplusplus
}

View File

@ -50,6 +50,7 @@ set(SRC
intern/array_utils.c
intern/array_utils.cc
intern/astar.c
intern/atomic_disjoint_set.cc
intern/bitmap.c
intern/bitmap_draw_2d.c
intern/boxpack_2d.c
@ -172,6 +173,7 @@ set(SRC
BLI_asan.h
BLI_assert.h
BLI_astar.h
BLI_atomic_disjoint_set.hh
BLI_bit_vector.hh
BLI_bitmap.h
BLI_bitmap_draw_2d.h

View File

@ -0,0 +1,108 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BLI_atomic_disjoint_set.hh"
#include "BLI_enumerable_thread_specific.hh"
#include "BLI_map.hh"
#include "BLI_sort.hh"
#include "BLI_task.hh"
namespace blender {
AtomicDisjointSet::AtomicDisjointSet(const int size) : items_(size)
{
threading::parallel_for(IndexRange(size), 4096, [&](const IndexRange range) {
for (const int i : range) {
items_[i].store(Item{i, 0}, relaxed);
}
});
}
static void update_first_occurence(Map<int, int> &map, const int root, const int index)
{
map.add_or_modify(
root,
[&](int *first_occurence) { *first_occurence = index; },
[&](int *first_occurence) {
if (index < *first_occurence) {
*first_occurence = index;
}
});
}
void AtomicDisjointSet::calc_reduced_ids(MutableSpan<int> result) const
{
BLI_assert(result.size() == items_.size());
const int size = result.size();
/* Find the root for element. With multi-threading, this root is not deterministic. So
* some postprocessing has to be done to make it deterministic. */
threading::EnumerableThreadSpecific<Map<int, int>> first_occurence_by_root_per_thread;
threading::parallel_for(IndexRange(size), 1024, [&](const IndexRange range) {
Map<int, int> &first_occurence_by_root = first_occurence_by_root_per_thread.local();
for (const int i : range) {
const int root = this->find_root(i);
result[i] = root;
update_first_occurence(first_occurence_by_root, root, i);
}
});
/* Build a map that contains the first element index that has a certain root. */
Map<int, int> &combined_map = first_occurence_by_root_per_thread.local();
for (const Map<int, int> &other_map : first_occurence_by_root_per_thread) {
if (&combined_map == &other_map) {
continue;
}
for (const auto item : other_map.items()) {
update_first_occurence(combined_map, item.key, item.value);
}
}
struct RootOccurence {
int root;
int first_occurence;
};
/* Sort roots by first occurence. This removes the non-determinism above. */
Vector<RootOccurence, 16> root_occurences;
root_occurences.reserve(combined_map.size());
for (const auto item : combined_map.items()) {
root_occurences.append({item.key, item.value});
}
parallel_sort(root_occurences.begin(),
root_occurences.end(),
[](const RootOccurence &a, const RootOccurence &b) {
return a.first_occurence < b.first_occurence;
});
/* Remap original root values with deterministic values. */
Map<int, int> id_by_root;
id_by_root.reserve(root_occurences.size());
for (const int i : root_occurences.index_range()) {
id_by_root.add_new(root_occurences[i].root, i);
}
threading::parallel_for(IndexRange(size), 1024, [&](const IndexRange range) {
for (const int i : range) {
result[i] = id_by_root.lookup(result[i]);
}
});
}
int AtomicDisjointSet::count_sets() const
{
return threading::parallel_reduce<int>(
items_.index_range(),
1024,
0,
[&](const IndexRange range, int count) {
for (const int i : range) {
if (this->is_root(i)) {
count++;
}
}
return count;
},
[](const int a, const int b) { return a + b; });
}
} // namespace blender

View File

@ -182,6 +182,7 @@ void BLO_blendfiledata_free(BlendFileData *bfd);
typedef struct BLODataBlockInfo {
char name[64]; /* MAX_NAME */
struct AssetMetaData *asset_data;
bool free_asset_data;
/* Optimization: Tag data-blocks for which we know there is no preview.
* Knowing this can be used to skip the (potentially expensive) preview loading process. If this
* is set to true it means we looked for a preview and couldn't find one. False may mean that
@ -189,6 +190,15 @@ typedef struct BLODataBlockInfo {
bool no_preview_found;
} BLODataBlockInfo;
/**
* Frees contained data, not \a datablock_info itself.
*/
void BLO_datablock_info_free(BLODataBlockInfo *datablock_info);
/**
* Can be used to free the list returned by #BLO_blendhandle_get_datablock_info().
*/
void BLO_datablock_info_linklist_free(struct LinkNode * /*BLODataBlockInfo*/ datablock_infos);
/**
* Open a blendhandle from a file path.
*
@ -231,8 +241,11 @@ struct LinkNode *BLO_blendhandle_get_datablock_names(BlendHandle *bh,
* \param ofblocktype: The type of names to get.
* \param use_assets_only: Limit the result to assets only.
* \param r_tot_info_items: The length of the returned list.
*
* \return A BLI_linklist of `BLODataBlockInfo *`.
* The links and #BLODataBlockInfo.asset_data should be freed with MEM_freeN.
*
* \note The links should be freed using #BLO_datablock_info_free() or the entire list using
* #BLO_datablock_info_linklist_free().
*/
struct LinkNode * /*BLODataBlockInfo*/ BLO_blendhandle_get_datablock_info(BlendHandle *bh,
int ofblocktype,

View File

@ -23,6 +23,7 @@
#include "DNA_genfile.h"
#include "DNA_sdna_types.h"
#include "BKE_asset.h"
#include "BKE_icons.h"
#include "BKE_idtype.h"
#include "BKE_main.h"
@ -44,6 +45,23 @@ void BLO_blendhandle_print_sizes(BlendHandle *bh, void *fp);
/* Access routines used by filesel. */
void BLO_datablock_info_free(BLODataBlockInfo *datablock_info)
{
if (datablock_info->free_asset_data) {
BKE_asset_metadata_free(&datablock_info->asset_data);
datablock_info->free_asset_data = false;
}
}
void BLO_datablock_info_linklist_free(LinkNode *datablock_infos)
{
BLI_linklist_free(datablock_infos, [](void *link) {
BLODataBlockInfo *datablock_info = static_cast<BLODataBlockInfo *>(link);
BLO_datablock_info_free(datablock_info);
MEM_freeN(datablock_info);
});
}
BlendHandle *BLO_blendhandle_from_file(const char *filepath, BlendFileReadReport *reports)
{
BlendHandle *bh;
@ -168,6 +186,7 @@ LinkNode *BLO_blendhandle_get_datablock_info(BlendHandle *bh,
STRNCPY(info->name, name);
info->asset_data = asset_meta_data;
info->free_asset_data = true;
bool has_preview = false;
/* See if we can find a preview in the data of this ID. */

View File

@ -1131,9 +1131,17 @@ static bool write_file_handle(Main *mainvar,
if (!wd->use_memfile) {
ID *id_iter;
FOREACH_MAIN_ID_BEGIN (mainvar, id_iter) {
if (ID_IS_LINKED(id_iter)) {
id_iter->tag |= LIB_TAG_INDIRECT;
id_iter->tag &= ~LIB_TAG_EXTERN;
if (ID_IS_LINKED(id_iter) && BKE_idtype_idcode_is_linkable(GS(id_iter->name))) {
if (USER_EXPERIMENTAL_TEST(&U, use_all_linked_data_direct)) {
/* Forces all linked data to be considered as directly linked.
* FIXME: Workaround some BAT tool limitations for Heist production, should be removed
* asap afterward. */
id_lib_extern(id_iter);
}
else {
id_iter->tag |= LIB_TAG_INDIRECT;
id_iter->tag &= ~LIB_TAG_EXTERN;
}
}
}
FOREACH_MAIN_ID_END;

View File

@ -88,7 +88,7 @@ set(SRC
intern/bmesh_mesh_debug.h
intern/bmesh_mesh_duplicate.c
intern/bmesh_mesh_duplicate.h
intern/bmesh_mesh_normals.c
intern/bmesh_mesh_normals.cc
intern/bmesh_mesh_normals.h
intern/bmesh_mesh_partial_update.c
intern/bmesh_mesh_partial_update.h

View File

@ -109,12 +109,12 @@ static void bm_vert_calc_normals_impl(BMVert *v)
zero_v3(v_no);
BMEdge *e_first = v->e;
if (e_first != NULL) {
if (e_first != nullptr) {
float e1diff[3], e2diff[3];
BMEdge *e_iter = e_first;
do {
BMLoop *l_first = e_iter->l;
if (l_first != NULL) {
if (l_first != nullptr) {
sub_v3_v3v3(e2diff, e_iter->v1->co, e_iter->v2->co);
normalize_v3(e2diff);
@ -155,12 +155,12 @@ static void bm_vert_calc_normals_with_coords(BMVert *v, BMVertsCalcNormalsWithCo
/* Loop over edges. */
BMEdge *e_first = v->e;
if (e_first != NULL) {
if (e_first != nullptr) {
float e1diff[3], e2diff[3];
BMEdge *e_iter = e_first;
do {
BMLoop *l_first = e_iter->l;
if (l_first != NULL) {
if (l_first != nullptr) {
sub_v3_v3v3(e2diff,
data->vcos[BM_elem_index_get(e_iter->v1)],
data->vcos[BM_elem_index_get(e_iter->v2)]);
@ -194,7 +194,8 @@ static void bm_vert_calc_normals_with_coords_cb(void *userdata,
MempoolIterData *mp_v,
const TaskParallelTLS *__restrict UNUSED(tls))
{
BMVertsCalcNormalsWithCoordsData *data = userdata;
BMVertsCalcNormalsWithCoordsData *data = static_cast<BMVertsCalcNormalsWithCoordsData *>(
userdata);
BMVert *v = (BMVert *)mp_v;
bm_vert_calc_normals_with_coords(v, data);
}
@ -210,16 +211,15 @@ static void bm_mesh_verts_calc_normals(BMesh *bm,
BLI_parallel_mempool_settings_defaults(&settings);
settings.use_threading = bm->totvert >= BM_OMP_LIMIT;
if (vcos == NULL) {
BM_iter_parallel(bm, BM_VERTS_OF_MESH, bm_vert_calc_normals_cb, NULL, &settings);
if (vcos == nullptr) {
BM_iter_parallel(bm, BM_VERTS_OF_MESH, bm_vert_calc_normals_cb, nullptr, &settings);
}
else {
BLI_assert(!ELEM(NULL, fnos, vnos));
BMVertsCalcNormalsWithCoordsData data = {
.fnos = fnos,
.vcos = vcos,
.vnos = vnos,
};
BLI_assert(!ELEM(nullptr, fnos, vnos));
BMVertsCalcNormalsWithCoordsData data{};
data.fnos = fnos;
data.vcos = vcos;
data.vnos = vnos;
BM_iter_parallel(bm, BM_VERTS_OF_MESH, bm_vert_calc_normals_with_coords_cb, &data, &settings);
}
}
@ -241,19 +241,18 @@ void BM_mesh_normals_update_ex(BMesh *bm, const struct BMeshNormalsUpdate_Params
BLI_parallel_mempool_settings_defaults(&settings);
settings.use_threading = bm->totedge >= BM_OMP_LIMIT;
BM_iter_parallel(bm, BM_FACES_OF_MESH, bm_face_calc_normals_cb, NULL, &settings);
BM_iter_parallel(bm, BM_FACES_OF_MESH, bm_face_calc_normals_cb, nullptr, &settings);
}
/* Add weighted face normals to vertices, and normalize vert normals. */
bm_mesh_verts_calc_normals(bm, NULL, NULL, NULL);
bm_mesh_verts_calc_normals(bm, nullptr, nullptr, nullptr);
}
void BM_mesh_normals_update(BMesh *bm)
{
BM_mesh_normals_update_ex(bm,
&(const struct BMeshNormalsUpdate_Params){
.face_normals = true,
});
BMeshNormalsUpdate_Params params{};
params.face_normals = true;
BM_mesh_normals_update_ex(bm, &params);
}
/** \} */
@ -307,11 +306,9 @@ void BM_mesh_normals_update_with_partial_ex(BMesh *UNUSED(bm),
void BM_mesh_normals_update_with_partial(BMesh *bm, const BMPartialUpdate *bmpinfo)
{
BM_mesh_normals_update_with_partial_ex(bm,
bmpinfo,
&(const struct BMeshNormalsUpdate_Params){
.face_normals = true,
});
BMeshNormalsUpdate_Params params{};
params.face_normals = true;
BM_mesh_normals_update_with_partial_ex(bm, bmpinfo, &params);
}
/** \} */
@ -389,7 +386,7 @@ static void bm_mesh_edges_sharp_tag(BMesh *bm,
if (do_sharp_edges_tag) {
BM_ITER_MESH_INDEX (e, &eiter, bm, BM_EDGES_OF_MESH, i) {
BM_elem_index_set(e, i); /* set_inline */
if (e->l != NULL) {
if (e->l != nullptr) {
bm_edge_tag_from_smooth_and_set_sharp(fnos, e, split_angle_cos);
}
}
@ -397,7 +394,7 @@ static void bm_mesh_edges_sharp_tag(BMesh *bm,
else {
BM_ITER_MESH_INDEX (e, &eiter, bm, BM_EDGES_OF_MESH, i) {
BM_elem_index_set(e, i); /* set_inline */
if (e->l != NULL) {
if (e->l != nullptr) {
bm_edge_tag_from_smooth(fnos, e, split_angle_cos);
}
}
@ -413,7 +410,7 @@ void BM_edges_sharp_from_angle_set(BMesh *bm, const float split_angle)
return;
}
bm_mesh_edges_sharp_tag(bm, NULL, cosf(split_angle), true);
bm_mesh_edges_sharp_tag(bm, nullptr, cosf(split_angle), true);
}
/** \} */
@ -481,8 +478,8 @@ static int bm_mesh_loops_calc_normals_for_loop(BMesh *bm,
MLoopNorSpaceArray *r_lnors_spacearr)
{
BLI_assert((bm->elem_index_dirty & BM_LOOP) == 0);
BLI_assert((fnos == NULL) || ((bm->elem_index_dirty & BM_FACE) == 0));
BLI_assert((vcos == NULL) || ((bm->elem_index_dirty & BM_VERT) == 0));
BLI_assert((fnos == nullptr) || ((bm->elem_index_dirty & BM_FACE) == 0));
BLI_assert((vcos == nullptr) || ((bm->elem_index_dirty & BM_VERT) == 0));
UNUSED_VARS_NDEBUG(bm);
int handled = 0;
@ -537,15 +534,16 @@ static int bm_mesh_loops_calc_normals_for_loop(BMesh *bm,
normalize_v3(vec_prev);
}
BKE_lnor_space_define(lnor_space, r_lnos[l_curr_index], vec_curr, vec_prev, NULL);
BKE_lnor_space_define(lnor_space, r_lnos[l_curr_index], vec_curr, vec_prev, nullptr);
/* We know there is only one loop in this space,
* no need to create a linklist in this case... */
BKE_lnor_space_add_loop(r_lnors_spacearr, lnor_space, l_curr_index, l_curr, true);
if (has_clnors) {
const short(*clnor)[2] = clnors_data ? &clnors_data[l_curr_index] :
(const void *)BM_ELEM_CD_GET_VOID_P(
l_curr, cd_loop_clnors_offset);
const short(*clnor)[2] = clnors_data ?
&clnors_data[l_curr_index] :
static_cast<const short(*)[2]>(
BM_ELEM_CD_GET_VOID_P(l_curr, cd_loop_clnors_offset));
BKE_lnor_space_custom_data_to_normal(lnor_space, *clnor, r_lnos[l_curr_index]);
}
}
@ -579,15 +577,16 @@ static int bm_mesh_loops_calc_normals_for_loop(BMesh *bm,
/* We validate clnors data on the fly - cheapest way to do! */
int clnors_avg[2] = {0, 0};
const short(*clnor_ref)[2] = NULL;
const short(*clnor_ref)[2] = nullptr;
int clnors_count = 0;
bool clnors_invalid = false;
const float *co_pivot = vcos ? vcos[BM_elem_index_get(v_pivot)] : v_pivot->co;
MLoopNorSpace *lnor_space = r_lnors_spacearr ? BKE_lnor_space_create(r_lnors_spacearr) : NULL;
MLoopNorSpace *lnor_space = r_lnors_spacearr ? BKE_lnor_space_create(r_lnors_spacearr) :
nullptr;
BLI_assert((edge_vectors == NULL) || BLI_stack_is_empty(edge_vectors));
BLI_assert((edge_vectors == nullptr) || BLI_stack_is_empty(edge_vectors));
lfan_pivot = l_curr;
lfan_pivot_index = BM_elem_index_get(lfan_pivot);
@ -646,9 +645,10 @@ static int bm_mesh_loops_calc_normals_for_loop(BMesh *bm,
if (has_clnors) {
/* Accumulate all clnors, if they are not all equal we have to fix that! */
const short(*clnor)[2] = clnors_data ? &clnors_data[lfan_pivot_index] :
(const void *)BM_ELEM_CD_GET_VOID_P(
lfan_pivot, cd_loop_clnors_offset);
const short(*clnor)[2] = clnors_data ?
&clnors_data[lfan_pivot_index] :
static_cast<const short(*)[2]>(BM_ELEM_CD_GET_VOID_P(
lfan_pivot, cd_loop_clnors_offset));
if (clnors_count) {
clnors_invalid |= ((*clnor_ref)[0] != (*clnor)[0] || (*clnor_ref)[1] != (*clnor)[1]);
}
@ -713,7 +713,7 @@ static int bm_mesh_loops_calc_normals_for_loop(BMesh *bm,
/* Prints continuously when merge custom normals, so commenting. */
// printf("Invalid clnors in this fan!\n");
while ((clnor = BLI_SMALLSTACK_POP(clnors))) {
while ((clnor = static_cast<short *>(BLI_SMALLSTACK_POP(clnors)))) {
// print_v2("org clnor", clnor);
clnor[0] = (short)clnors_avg[0];
clnor[1] = (short)clnors_avg[1];
@ -735,7 +735,7 @@ static int bm_mesh_loops_calc_normals_for_loop(BMesh *bm,
/* Copy back the final computed normal into all related loop-normals. */
float *nor;
while ((nor = BLI_SMALLSTACK_POP(normal))) {
while ((nor = static_cast<float *>(BLI_SMALLSTACK_POP(normal)))) {
copy_v3_v3(nor, lnor);
}
}
@ -791,14 +791,14 @@ BLI_INLINE bool bm_edge_is_smooth_no_angle_test(const BMEdge *e,
static void bm_edge_tag_from_smooth(const float (*fnos)[3], BMEdge *e, const float split_angle_cos)
{
BLI_assert(e->l != NULL);
BLI_assert(e->l != nullptr);
BMLoop *l_a = e->l, *l_b = l_a->radial_next;
bool is_smooth = false;
if (bm_edge_is_smooth_no_angle_test(e, l_a, l_b)) {
if (split_angle_cos != -1.0f) {
const float dot = (fnos == NULL) ? dot_v3v3(l_a->f->no, l_b->f->no) :
dot_v3v3(fnos[BM_elem_index_get(l_a->f)],
fnos[BM_elem_index_get(l_b->f)]);
const float dot = (fnos == nullptr) ? dot_v3v3(l_a->f->no, l_b->f->no) :
dot_v3v3(fnos[BM_elem_index_get(l_a->f)],
fnos[BM_elem_index_get(l_b->f)]);
if (dot >= split_angle_cos) {
is_smooth = true;
}
@ -832,14 +832,14 @@ static void bm_edge_tag_from_smooth_and_set_sharp(const float (*fnos)[3],
BMEdge *e,
const float split_angle_cos)
{
BLI_assert(e->l != NULL);
BLI_assert(e->l != nullptr);
BMLoop *l_a = e->l, *l_b = l_a->radial_next;
bool is_smooth = false;
if (bm_edge_is_smooth_no_angle_test(e, l_a, l_b)) {
if (split_angle_cos != -1.0f) {
const float dot = (fnos == NULL) ? dot_v3v3(l_a->f->no, l_b->f->no) :
dot_v3v3(fnos[BM_elem_index_get(l_a->f)],
fnos[BM_elem_index_get(l_b->f)]);
const float dot = (fnos == nullptr) ? dot_v3v3(l_a->f->no, l_b->f->no) :
dot_v3v3(fnos[BM_elem_index_get(l_a->f)],
fnos[BM_elem_index_get(l_b->f)]);
if (dot >= split_angle_cos) {
is_smooth = true;
}
@ -887,7 +887,7 @@ static void bm_mesh_loops_calc_normals_for_vert_with_clnors(BMesh *bm,
*
* Sorting is only performed when multiple fans are found. */
const bool has_clnors = true;
LinkNode *loops_of_vert = NULL;
LinkNode *loops_of_vert = nullptr;
int loops_of_vert_count = 0;
/* When false the caller must have already tagged the edges. */
const bool do_edge_tag = (split_angle_cos != EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS);
@ -899,7 +899,7 @@ static void bm_mesh_loops_calc_normals_for_vert_with_clnors(BMesh *bm,
BMEdge *e_curr_iter = v->e;
do { /* Edges of vertex. */
BMLoop *l_curr = e_curr_iter->l;
if (l_curr == NULL) {
if (l_curr == nullptr) {
continue;
}
@ -927,7 +927,7 @@ static void bm_mesh_loops_calc_normals_for_vert_with_clnors(BMesh *bm,
} while ((l_curr = l_curr->radial_next) != e_curr_iter->l);
} while ((e_curr_iter = BM_DISK_EDGE_NEXT(e_curr_iter, v)) != v->e);
if (UNLIKELY(loops_of_vert == NULL)) {
if (UNLIKELY(loops_of_vert == nullptr)) {
return;
}
@ -944,8 +944,8 @@ static void bm_mesh_loops_calc_normals_for_vert_with_clnors(BMesh *bm,
/* Keep track of the number of loops that have been assigned. */
int loops_of_vert_handled = 0;
while (loops_of_vert != NULL) {
BMLoop *l_best = loops_of_vert->link;
while (loops_of_vert != nullptr) {
BMLoop *l_best = static_cast<BMLoop *>(loops_of_vert->link);
loops_of_vert = loops_of_vert->next;
BLI_assert(l_best->v == v);
@ -997,7 +997,7 @@ static void bm_mesh_loops_calc_normals_for_vert_without_clnors(
BMVert *v)
{
const bool has_clnors = false;
const short(*clnors_data)[2] = NULL;
const short(*clnors_data)[2] = nullptr;
/* When false the caller must have already tagged the edges. */
const bool do_edge_tag = (split_angle_cos != EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS);
const int cd_loop_clnors_offset = -1;
@ -1008,7 +1008,7 @@ static void bm_mesh_loops_calc_normals_for_vert_without_clnors(
e_curr_iter = v->e;
do { /* Edges of vertex. */
BMLoop *l_curr = e_curr_iter->l;
if (l_curr == NULL) {
if (l_curr == nullptr) {
continue;
}
@ -1027,7 +1027,7 @@ static void bm_mesh_loops_calc_normals_for_vert_without_clnors(
e_curr_iter = v->e;
do { /* Edges of vertex. */
BMLoop *l_curr = e_curr_iter->l;
if (l_curr == NULL) {
if (l_curr == nullptr) {
continue;
}
do { /* Radial loops. */
@ -1055,7 +1055,7 @@ static void bm_mesh_loops_calc_normals_for_vert_without_clnors(
/**
* BMesh version of BKE_mesh_normals_loop_split() in `mesh_evaluate.cc`
* Will use first clnors_data array, and fallback to cd_loop_clnors_offset
* (use NULL and -1 to not use clnors).
* (use nullptr and -1 to not use clnors).
*
* \note This sets #BM_ELEM_TAG which is used in tool code (e.g. T84426).
* we could add a low-level API flag for this, see #BM_ELEM_API_FLAG_ENABLE and friends.
@ -1076,9 +1076,9 @@ static void bm_mesh_loops_calc_normals__single_threaded(BMesh *bm,
/* When false the caller must have already tagged the edges. */
const bool do_edge_tag = (split_angle_cos != EDGE_TAG_FROM_SPLIT_ANGLE_BYPASS);
MLoopNorSpaceArray _lnors_spacearr = {NULL};
MLoopNorSpaceArray _lnors_spacearr = {nullptr};
BLI_Stack *edge_vectors = NULL;
BLI_Stack *edge_vectors = nullptr;
{
char htype = 0;
@ -1159,9 +1159,9 @@ typedef struct BMLoopsCalcNormalsWithCoordsData {
const float (*vcos)[3];
BMesh *bm;
const short (*clnors_data)[2];
const int cd_loop_clnors_offset;
const bool do_rebuild;
const float split_angle_cos;
int cd_loop_clnors_offset;
bool do_rebuild;
float split_angle_cos;
/* Output. */
float (*r_lnos)[3];
@ -1171,7 +1171,7 @@ typedef struct BMLoopsCalcNormalsWithCoordsData {
typedef struct BMLoopsCalcNormalsWithCoords_TLS {
BLI_Stack *edge_vectors;
/** Copied from #BMLoopsCalcNormalsWithCoordsData.r_lnors_spacearr when it's not NULL. */
/** Copied from #BMLoopsCalcNormalsWithCoordsData.r_lnors_spacearr when it's not nullptr. */
MLoopNorSpaceArray *lnors_spacearr;
MLoopNorSpaceArray lnors_spacearr_buf;
} BMLoopsCalcNormalsWithCoords_TLS;
@ -1179,15 +1179,15 @@ typedef struct BMLoopsCalcNormalsWithCoords_TLS {
static void bm_mesh_loops_calc_normals_for_vert_init_fn(const void *__restrict userdata,
void *__restrict chunk)
{
const BMLoopsCalcNormalsWithCoordsData *data = userdata;
BMLoopsCalcNormalsWithCoords_TLS *tls_data = chunk;
auto *data = static_cast<const BMLoopsCalcNormalsWithCoordsData *>(userdata);
auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(chunk);
if (data->r_lnors_spacearr) {
tls_data->edge_vectors = BLI_stack_new(sizeof(float[3]), __func__);
BKE_lnor_spacearr_tls_init(data->r_lnors_spacearr, &tls_data->lnors_spacearr_buf);
tls_data->lnors_spacearr = &tls_data->lnors_spacearr_buf;
}
else {
tls_data->lnors_spacearr = NULL;
tls_data->lnors_spacearr = nullptr;
}
}
@ -1195,8 +1195,8 @@ static void bm_mesh_loops_calc_normals_for_vert_reduce_fn(const void *__restrict
void *__restrict UNUSED(chunk_join),
void *__restrict chunk)
{
const BMLoopsCalcNormalsWithCoordsData *data = userdata;
BMLoopsCalcNormalsWithCoords_TLS *tls_data = chunk;
auto *data = static_cast<const BMLoopsCalcNormalsWithCoordsData *>(userdata);
auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(chunk);
if (data->r_lnors_spacearr) {
BKE_lnor_spacearr_tls_join(data->r_lnors_spacearr, tls_data->lnors_spacearr);
@ -1206,8 +1206,8 @@ static void bm_mesh_loops_calc_normals_for_vert_reduce_fn(const void *__restrict
static void bm_mesh_loops_calc_normals_for_vert_free_fn(const void *__restrict userdata,
void *__restrict chunk)
{
const BMLoopsCalcNormalsWithCoordsData *data = userdata;
BMLoopsCalcNormalsWithCoords_TLS *tls_data = chunk;
auto *data = static_cast<const BMLoopsCalcNormalsWithCoordsData *>(userdata);
auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(chunk);
if (data->r_lnors_spacearr) {
BLI_stack_free(tls_data->edge_vectors);
@ -1218,11 +1218,11 @@ static void bm_mesh_loops_calc_normals_for_vert_with_clnors_fn(
void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict tls)
{
BMVert *v = (BMVert *)mp_v;
if (v->e == NULL) {
if (v->e == nullptr) {
return;
}
BMLoopsCalcNormalsWithCoordsData *data = userdata;
BMLoopsCalcNormalsWithCoords_TLS *tls_data = tls->userdata_chunk;
auto *data = static_cast<BMLoopsCalcNormalsWithCoordsData *>(userdata);
auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(tls->userdata_chunk);
bm_mesh_loops_calc_normals_for_vert_with_clnors(data->bm,
data->vcos,
data->fnos,
@ -1243,11 +1243,11 @@ static void bm_mesh_loops_calc_normals_for_vert_without_clnors_fn(
void *userdata, MempoolIterData *mp_v, const TaskParallelTLS *__restrict tls)
{
BMVert *v = (BMVert *)mp_v;
if (v->e == NULL) {
if (v->e == nullptr) {
return;
}
BMLoopsCalcNormalsWithCoordsData *data = userdata;
BMLoopsCalcNormalsWithCoords_TLS *tls_data = tls->userdata_chunk;
auto *data = static_cast<BMLoopsCalcNormalsWithCoordsData *>(userdata);
auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(tls->userdata_chunk);
bm_mesh_loops_calc_normals_for_vert_without_clnors(data->bm,
data->vcos,
data->fnos,
@ -1273,7 +1273,7 @@ static void bm_mesh_loops_calc_normals__multi_threaded(BMesh *bm,
const float split_angle_cos)
{
const bool has_clnors = clnors_data || (cd_loop_clnors_offset != -1);
MLoopNorSpaceArray _lnors_spacearr = {NULL};
MLoopNorSpaceArray _lnors_spacearr = {nullptr};
{
char htype = BM_LOOP;
@ -1303,7 +1303,7 @@ static void bm_mesh_loops_calc_normals__multi_threaded(BMesh *bm,
TaskParallelSettings settings;
BLI_parallel_mempool_settings_defaults(&settings);
BMLoopsCalcNormalsWithCoords_TLS tls = {NULL};
BMLoopsCalcNormalsWithCoords_TLS tls = {nullptr};
settings.userdata_chunk = &tls;
settings.userdata_chunk_size = sizeof(tls);
@ -1312,17 +1312,16 @@ static void bm_mesh_loops_calc_normals__multi_threaded(BMesh *bm,
settings.func_reduce = bm_mesh_loops_calc_normals_for_vert_reduce_fn;
settings.func_free = bm_mesh_loops_calc_normals_for_vert_free_fn;
BMLoopsCalcNormalsWithCoordsData data = {
.bm = bm,
.vcos = vcos,
.fnos = fnos,
.r_lnos = r_lnos,
.r_lnors_spacearr = r_lnors_spacearr,
.clnors_data = clnors_data,
.cd_loop_clnors_offset = cd_loop_clnors_offset,
.do_rebuild = do_rebuild,
.split_angle_cos = split_angle_cos,
};
BMLoopsCalcNormalsWithCoordsData data{};
data.bm = bm;
data.vcos = vcos;
data.fnos = fnos;
data.r_lnos = r_lnos;
data.r_lnors_spacearr = r_lnors_spacearr;
data.clnors_data = clnors_data;
data.cd_loop_clnors_offset = cd_loop_clnors_offset;
data.do_rebuild = do_rebuild;
data.split_angle_cos = split_angle_cos;
BM_iter_parallel(bm,
BM_VERTS_OF_MESH,
@ -1391,12 +1390,12 @@ static bool bm_mesh_loops_split_lnor_fans(BMesh *bm,
for (int i = 0; i < bm->totloop; i++) {
if (!lnors_spacearr->lspacearr[i]) {
/* This should not happen in theory, but in some rare case (probably ugly geometry)
* we can get some NULL loopspacearr at this point. :/
* we can get some nullptr loopspacearr at this point. :/
* Maybe we should set those loops' edges as sharp?
*/
BLI_BITMAP_ENABLE(done_loops, i);
if (G.debug & G_DEBUG) {
printf("WARNING! Getting invalid NULL loop space for loop %d!\n", i);
printf("WARNING! Getting invalid nullptr loop space for loop %d!\n", i);
}
continue;
}
@ -1417,11 +1416,11 @@ static bool bm_mesh_loops_split_lnor_fans(BMesh *bm,
}
LinkNode *loops = lnors_spacearr->lspacearr[i]->loops;
BMLoop *prev_ml = NULL;
const float *org_nor = NULL;
BMLoop *prev_ml = nullptr;
const float *org_nor = nullptr;
while (loops) {
BMLoop *ml = loops->link;
BMLoop *ml = static_cast<BMLoop *>(loops->link);
const int lidx = BM_elem_index_get(ml);
const float *nor = new_lnors[lidx];
@ -1453,7 +1452,7 @@ static bool bm_mesh_loops_split_lnor_fans(BMesh *bm,
* See T45984. */
loops = lnors_spacearr->lspacearr[i]->loops;
if (loops && org_nor) {
BMLoop *ml = loops->link;
BMLoop *ml = static_cast<BMLoop *>(loops->link);
const int lidx = BM_elem_index_get(ml);
const float *nor = new_lnors[lidx];
@ -1491,7 +1490,8 @@ static void bm_mesh_loops_assign_normal_data(BMesh *bm,
if (!lnors_spacearr->lspacearr[i]) {
BLI_BITMAP_ENABLE(done_loops, i);
if (G.debug & G_DEBUG) {
printf("WARNING! Still getting invalid NULL loop space in second loop for loop %d!\n", i);
printf("WARNING! Still getting invalid nullptr loop space in second loop for loop %d!\n",
i);
}
continue;
}
@ -1510,8 +1510,9 @@ static void bm_mesh_loops_assign_normal_data(BMesh *bm,
BLI_assert(lidx == i);
const float *nor = new_lnors[lidx];
short *clnor = r_clnors_data ? &r_clnors_data[lidx] :
BM_ELEM_CD_GET_VOID_P(ml, cd_loop_clnors_offset);
short *clnor = static_cast<short *>(r_clnors_data ?
&r_clnors_data[lidx] :
BM_ELEM_CD_GET_VOID_P(ml, cd_loop_clnors_offset));
BKE_lnor_space_custom_normal_to_data(lnors_spacearr->lspacearr[i], nor, clnor);
BLI_BITMAP_ENABLE(done_loops, i);
@ -1524,11 +1525,12 @@ static void bm_mesh_loops_assign_normal_data(BMesh *bm,
zero_v3(avg_nor);
while (loops) {
BMLoop *ml = loops->link;
BMLoop *ml = static_cast<BMLoop *>(loops->link);
const int lidx = BM_elem_index_get(ml);
const float *nor = new_lnors[lidx];
short *clnor = r_clnors_data ? &r_clnors_data[lidx] :
BM_ELEM_CD_GET_VOID_P(ml, cd_loop_clnors_offset);
short *clnor = static_cast<short *>(
r_clnors_data ? &r_clnors_data[lidx] :
BM_ELEM_CD_GET_VOID_P(ml, cd_loop_clnors_offset));
avg_nor_count++;
add_v3_v3(avg_nor, nor);
@ -1542,7 +1544,7 @@ static void bm_mesh_loops_assign_normal_data(BMesh *bm,
BKE_lnor_space_custom_normal_to_data(
lnors_spacearr->lspacearr[i], avg_nor, clnor_data_tmp);
while ((clnor_data = BLI_SMALLSTACK_POP(clnors_data))) {
while ((clnor_data = static_cast<short *>(BLI_SMALLSTACK_POP(clnors_data)))) {
clnor_data[0] = clnor_data_tmp[0];
clnor_data[1] = clnor_data_tmp[1];
}
@ -1573,7 +1575,8 @@ static void bm_mesh_loops_custom_normals_set(BMesh *bm,
BMFace *f;
BMLoop *l;
BMIter liter, fiter;
float(*cur_lnors)[3] = MEM_mallocN(sizeof(*cur_lnors) * bm->totloop, __func__);
float(*cur_lnors)[3] = static_cast<float(*)[3]>(
MEM_mallocN(sizeof(*cur_lnors) * bm->totloop, __func__));
BKE_lnor_spacearr_clear(r_lnors_spacearr);
@ -1596,12 +1599,13 @@ static void bm_mesh_loops_custom_normals_set(BMesh *bm,
/* Extract new normals from the data layer if necessary. */
float(*custom_lnors)[3] = new_lnors;
if (new_lnors == NULL) {
custom_lnors = MEM_mallocN(sizeof(*new_lnors) * bm->totloop, __func__);
if (new_lnors == nullptr) {
custom_lnors = static_cast<float(*)[3]>(
MEM_mallocN(sizeof(*new_lnors) * bm->totloop, __func__));
BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
const float *normal = BM_ELEM_CD_GET_VOID_P(l, cd_new_lnors_offset);
const float *normal = static_cast<float *>(BM_ELEM_CD_GET_VOID_P(l, cd_new_lnors_offset));
copy_v3_v3(custom_lnors[BM_elem_index_get(l)], normal);
}
}
@ -1719,7 +1723,7 @@ void BM_loops_calc_normal_vcos(BMesh *bm,
void BM_lnorspacearr_store(BMesh *bm, float (*r_lnors)[3])
{
BLI_assert(bm->lnor_spacearr != NULL);
BLI_assert(bm->lnor_spacearr != nullptr);
if (!CustomData_has_layer(&bm->ldata, CD_CUSTOMLOOPNORMAL)) {
BM_data_layer_add(bm, &bm->ldata, CD_CUSTOMLOOPNORMAL);
@ -1728,14 +1732,14 @@ void BM_lnorspacearr_store(BMesh *bm, float (*r_lnors)[3])
int cd_loop_clnors_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
BM_loops_calc_normal_vcos(bm,
NULL,
NULL,
NULL,
nullptr,
nullptr,
nullptr,
true,
M_PI,
r_lnors,
bm->lnor_spacearr,
NULL,
nullptr,
cd_loop_clnors_offset,
false);
bm->spacearr_dirty &= ~(BM_SPACEARR_DIRTY | BM_SPACEARR_DIRTY_ALL);
@ -1752,7 +1756,7 @@ void BM_lnorspace_invalidate(BMesh *bm, const bool do_invalidate_all)
bm->spacearr_dirty |= BM_SPACEARR_DIRTY_ALL;
return;
}
if (bm->lnor_spacearr == NULL) {
if (bm->lnor_spacearr == nullptr) {
bm->spacearr_dirty |= BM_SPACEARR_DIRTY_ALL;
return;
}
@ -1813,7 +1817,7 @@ void BM_lnorspace_invalidate(BMesh *bm, const bool do_invalidate_all)
void BM_lnorspace_rebuild(BMesh *bm, bool preserve_clnor)
{
BLI_assert(bm->lnor_spacearr != NULL);
BLI_assert(bm->lnor_spacearr != nullptr);
if (!(bm->spacearr_dirty & (BM_SPACEARR_DIRTY | BM_SPACEARR_DIRTY_ALL))) {
return;
@ -1822,22 +1826,24 @@ void BM_lnorspace_rebuild(BMesh *bm, bool preserve_clnor)
BMLoop *l;
BMIter fiter, liter;
float(*r_lnors)[3] = MEM_callocN(sizeof(*r_lnors) * bm->totloop, __func__);
float(*oldnors)[3] = preserve_clnor ? MEM_mallocN(sizeof(*oldnors) * bm->totloop, __func__) :
NULL;
float(*r_lnors)[3] = static_cast<float(*)[3]>(
MEM_callocN(sizeof(*r_lnors) * bm->totloop, __func__));
float(*oldnors)[3] = static_cast<float(*)[3]>(
preserve_clnor ? MEM_mallocN(sizeof(*oldnors) * bm->totloop, __func__) : nullptr);
int cd_loop_clnors_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
BM_mesh_elem_index_ensure(bm, BM_LOOP);
if (preserve_clnor) {
BLI_assert(bm->lnor_spacearr->lspacearr != NULL);
BLI_assert(bm->lnor_spacearr->lspacearr != nullptr);
BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
if (BM_ELEM_API_FLAG_TEST(l, BM_LNORSPACE_UPDATE) ||
bm->spacearr_dirty & BM_SPACEARR_DIRTY_ALL) {
short(*clnor)[2] = BM_ELEM_CD_GET_VOID_P(l, cd_loop_clnors_offset);
short(*clnor)[2] = static_cast<short(*)[2]>(
BM_ELEM_CD_GET_VOID_P(l, cd_loop_clnors_offset));
int l_index = BM_elem_index_get(l);
BKE_lnor_space_custom_data_to_normal(
@ -1851,14 +1857,14 @@ void BM_lnorspace_rebuild(BMesh *bm, bool preserve_clnor)
BKE_lnor_spacearr_clear(bm->lnor_spacearr);
}
BM_loops_calc_normal_vcos(bm,
NULL,
NULL,
NULL,
nullptr,
nullptr,
nullptr,
true,
M_PI,
r_lnors,
bm->lnor_spacearr,
NULL,
nullptr,
cd_loop_clnors_offset,
true);
MEM_freeN(r_lnors);
@ -1868,7 +1874,8 @@ void BM_lnorspace_rebuild(BMesh *bm, bool preserve_clnor)
if (BM_ELEM_API_FLAG_TEST(l, BM_LNORSPACE_UPDATE) ||
bm->spacearr_dirty & BM_SPACEARR_DIRTY_ALL) {
if (preserve_clnor) {
short(*clnor)[2] = BM_ELEM_CD_GET_VOID_P(l, cd_loop_clnors_offset);
short(*clnor)[2] = static_cast<short(*)[2]>(
BM_ELEM_CD_GET_VOID_P(l, cd_loop_clnors_offset));
int l_index = BM_elem_index_get(l);
BKE_lnor_space_custom_normal_to_data(
bm->lnor_spacearr->lspacearr[l_index], oldnors[l_index], *clnor);
@ -1888,11 +1895,12 @@ void BM_lnorspace_rebuild(BMesh *bm, bool preserve_clnor)
void BM_lnorspace_update(BMesh *bm)
{
if (bm->lnor_spacearr == NULL) {
bm->lnor_spacearr = MEM_callocN(sizeof(*bm->lnor_spacearr), __func__);
if (bm->lnor_spacearr == nullptr) {
bm->lnor_spacearr = MEM_cnew<MLoopNorSpaceArray>(__func__);
}
if (bm->lnor_spacearr->lspacearr == NULL) {
float(*lnors)[3] = MEM_callocN(sizeof(*lnors) * bm->totloop, __func__);
if (bm->lnor_spacearr->lspacearr == nullptr) {
float(*lnors)[3] = static_cast<float(*)[3]>(
MEM_callocN(sizeof(*lnors) * bm->totloop, __func__));
BM_lnorspacearr_store(bm, lnors);
@ -1922,15 +1930,24 @@ void BM_lnorspace_err(BMesh *bm)
bm->spacearr_dirty |= BM_SPACEARR_DIRTY_ALL;
bool clear = true;
MLoopNorSpaceArray *temp = MEM_callocN(sizeof(*temp), __func__);
temp->lspacearr = NULL;
MLoopNorSpaceArray *temp = MEM_cnew<MLoopNorSpaceArray>(__func__);
temp->lspacearr = nullptr;
BKE_lnor_spacearr_init(temp, bm->totloop, MLNOR_SPACEARR_BMLOOP_PTR);
int cd_loop_clnors_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
float(*lnors)[3] = MEM_callocN(sizeof(*lnors) * bm->totloop, __func__);
BM_loops_calc_normal_vcos(
bm, NULL, NULL, NULL, true, M_PI, lnors, temp, NULL, cd_loop_clnors_offset, true);
float(*lnors)[3] = static_cast<float(*)[3]>(MEM_callocN(sizeof(*lnors) * bm->totloop, __func__));
BM_loops_calc_normal_vcos(bm,
nullptr,
nullptr,
nullptr,
true,
M_PI,
lnors,
temp,
nullptr,
cd_loop_clnors_offset,
true);
for (int i = 0; i < bm->totloop; i++) {
int j = 0;
@ -1965,7 +1982,7 @@ static void bm_loop_normal_mark_indiv_do_loop(BMLoop *l,
int *totloopsel,
const bool do_all_loops_of_vert)
{
if (l != NULL) {
if (l != nullptr) {
const int l_idx = BM_elem_index_get(l);
if (!BLI_BITMAP_TEST(loops, l_idx)) {
@ -2016,7 +2033,7 @@ static int bm_loop_normal_mark_indiv(BMesh *bm, BLI_bitmap *loops, const bool do
BM_mesh_elem_index_ensure(bm, BM_LOOP);
BLI_assert(bm->lnor_spacearr != NULL);
BLI_assert(bm->lnor_spacearr != nullptr);
BLI_assert(bm->lnor_spacearr->data_type == MLNOR_SPACEARR_BMLOOP_PTR);
if (use_sel_face_history) {
@ -2025,7 +2042,7 @@ static int bm_loop_normal_mark_indiv(BMesh *bm, BLI_bitmap *loops, const bool do
* but it is not designed to be used with huge selection sets,
* rather with only a few items selected at most. */
/* Goes from last selected to the first selected element. */
for (ese = bm->selected.last; ese; ese = ese->prev) {
for (ese = static_cast<BMEditSelection *>(bm->selected.last); ese; ese = ese->prev) {
if (ese->htype == BM_FACE) {
/* If current face is selected,
* then any verts to be edited must have been selected before it. */
@ -2118,11 +2135,11 @@ static int bm_loop_normal_mark_indiv(BMesh *bm, BLI_bitmap *loops, const bool do
static void loop_normal_editdata_init(
BMesh *bm, BMLoopNorEditData *lnor_ed, BMVert *v, BMLoop *l, const int offset)
{
BLI_assert(bm->lnor_spacearr != NULL);
BLI_assert(bm->lnor_spacearr->lspacearr != NULL);
BLI_assert(bm->lnor_spacearr != nullptr);
BLI_assert(bm->lnor_spacearr->lspacearr != nullptr);
const int l_index = BM_elem_index_get(l);
short *clnors_data = BM_ELEM_CD_GET_VOID_P(l, offset);
short *clnors_data = static_cast<short *>(BM_ELEM_CD_GET_VOID_P(l, offset));
lnor_ed->loop_index = l_index;
lnor_ed->loop = l;
@ -2149,9 +2166,8 @@ BMLoopNorEditDataArray *BM_loop_normal_editdata_array_init(BMesh *bm,
BLI_assert(bm->spacearr_dirty == 0);
BMLoopNorEditDataArray *lnors_ed_arr = MEM_callocN(sizeof(*lnors_ed_arr), __func__);
lnors_ed_arr->lidx_to_lnor_editdata = MEM_callocN(
sizeof(*lnors_ed_arr->lidx_to_lnor_editdata) * bm->totloop, __func__);
BMLoopNorEditDataArray *lnors_ed_arr = MEM_cnew<BMLoopNorEditDataArray>(__func__);
lnors_ed_arr->lidx_to_lnor_editdata = MEM_cnew_array<BMLoopNorEditData *>(bm->totloop, __func__);
if (!CustomData_has_layer(&bm->ldata, CD_CUSTOMLOOPNORMAL)) {
BM_data_layer_add(bm, &bm->ldata, CD_CUSTOMLOOPNORMAL);
@ -2166,8 +2182,8 @@ BMLoopNorEditDataArray *BM_loop_normal_editdata_array_init(BMesh *bm,
totloopsel = bm_loop_normal_mark_indiv(bm, loops, do_all_loops_of_vert);
if (totloopsel) {
BMLoopNorEditData *lnor_ed = lnors_ed_arr->lnor_editdata = MEM_mallocN(
sizeof(*lnor_ed) * totloopsel, __func__);
BMLoopNorEditData *lnor_ed = lnors_ed_arr->lnor_editdata = static_cast<BMLoopNorEditData *>(
MEM_mallocN(sizeof(*lnor_ed) * totloopsel, __func__));
BM_ITER_MESH (v, &viter, bm, BM_VERTS_OF_MESH) {
BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
@ -2224,8 +2240,9 @@ bool BM_custom_loop_normals_to_vector_layer(BMesh *bm)
int l_index = 0;
BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
const short *clnors_data = BM_ELEM_CD_GET_VOID_P(l, cd_custom_normal_offset);
float *normal = BM_ELEM_CD_GET_VOID_P(l, cd_normal_offset);
const short *clnors_data = static_cast<const short *>(
BM_ELEM_CD_GET_VOID_P(l, cd_custom_normal_offset));
float *normal = static_cast<float *>(BM_ELEM_CD_GET_VOID_P(l, cd_normal_offset));
BKE_lnor_space_custom_data_to_normal(
bm->lnor_spacearr->lspacearr[l_index], clnors_data, normal);
@ -2246,17 +2263,17 @@ void BM_custom_loop_normals_from_vector_layer(BMesh *bm, bool add_sharp_edges)
const int cd_custom_normal_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
const int cd_normal_offset = CustomData_get_offset(&bm->ldata, CD_NORMAL);
if (bm->lnor_spacearr == NULL) {
bm->lnor_spacearr = MEM_callocN(sizeof(*bm->lnor_spacearr), __func__);
if (bm->lnor_spacearr == nullptr) {
bm->lnor_spacearr = MEM_cnew<MLoopNorSpaceArray>(__func__);
}
bm_mesh_loops_custom_normals_set(bm,
NULL,
NULL,
nullptr,
nullptr,
bm->lnor_spacearr,
NULL,
nullptr,
cd_custom_normal_offset,
NULL,
nullptr,
cd_normal_offset,
add_sharp_edges);

View File

@ -357,6 +357,7 @@ static void init_indexer_entry_from_value(FileIndexerEntry &indexer_entry,
AssetMetaData *asset_data = BKE_asset_metadata_create();
indexer_entry.datablock_info.asset_data = asset_data;
indexer_entry.datablock_info.free_asset_data = true;
if (entry.has_description()) {
const StringRefNull description = entry.get_description();

View File

@ -9052,7 +9052,7 @@ static bool ui_handle_button_activate_by_type(bContext *C, ARegion *region, uiBu
}
else {
#ifdef DEBUG
printf("%s: error, unhandled type: %u\n", __func__, but->type);
printf("%s: error, unhandled type: %d\n", __func__, but->type);
#endif
return false;
}

View File

@ -70,6 +70,7 @@ void ED_file_indexer_entries_extend_from_datablock_infos(
static void ED_file_indexer_entry_free(void *indexer_entry_ptr)
{
FileIndexerEntry *indexer_entry = static_cast<FileIndexerEntry *>(indexer_entry_ptr);
BLO_datablock_info_free(&indexer_entry->datablock_info);
MEM_freeN(indexer_entry);
}

View File

@ -3105,10 +3105,14 @@ static void filelist_readjob_list_lib_add_datablock(FileListReadJob *job_params,
entry->typeflag |= FILE_TYPE_ASSET;
if (filelist->asset_library) {
/** XXX Moving out the asset metadata like this isn't great. */
std::unique_ptr metadata = BKE_asset_metadata_move_to_unique_ptr(
datablock_info->asset_data);
BKE_asset_metadata_free(&datablock_info->asset_data);
/* Take ownership over the asset data (shallow copies into unique_ptr managed memory) to
* pass it on to the asset system. */
std::unique_ptr metadata = std::make_unique<AssetMetaData>(*datablock_info->asset_data);
MEM_freeN(datablock_info->asset_data);
/* Give back a non-owning pointer, because the data-block info is still needed (e.g. to
* update the asset index). */
datablock_info->asset_data = metadata.get();
datablock_info->free_asset_data = false;
entry->asset = &filelist->asset_library->add_external_asset(
entry->relpath, datablock_info->name, std::move(metadata));
@ -3267,7 +3271,7 @@ static std::optional<int> filelist_readjob_list_lib(FileListReadJob *job_params,
libfiledata, idcode, options & LIST_LIB_ASSETS_ONLY, &datablock_len);
filelist_readjob_list_lib_add_datablocks(
job_params, entries, datablock_infos, false, idcode, group);
BLI_linklist_freeN(datablock_infos);
BLO_datablock_info_linklist_free(datablock_infos);
}
else {
LinkNode *groups = BLO_blendhandle_get_linkable_groups(libfiledata);
@ -3290,7 +3294,7 @@ static std::optional<int> filelist_readjob_list_lib(FileListReadJob *job_params,
ED_file_indexer_entries_extend_from_datablock_infos(
&indexer_entries, group_datablock_infos, idcode);
}
BLI_linklist_freeN(group_datablock_infos);
BLO_datablock_info_linklist_free(group_datablock_infos);
datablock_len += group_datablock_len;
}
}

View File

@ -196,7 +196,8 @@ void ED_node_set_active_viewer_key(SpaceNode *snode)
if (snode->nodetree && path) {
/* A change in active viewer may result in the change of the output node used by the
* compositor, so we need to get notified about such changes. */
if (snode->nodetree->active_viewer_key.value != path->parent_key.value) {
if (snode->nodetree->active_viewer_key.value != path->parent_key.value &&
snode->nodetree->type == NTREE_COMPOSIT) {
DEG_id_tag_update(&snode->nodetree->id, ID_RECALC_NTREE_OUTPUT);
WM_main_add_notifier(NC_NODE, nullptr);
}

View File

@ -191,8 +191,10 @@ set(VULKAN_SRC
vulkan/vk_batch.cc
vulkan/vk_context.cc
vulkan/vk_drawlist.cc
vulkan/vk_fence.cc
vulkan/vk_framebuffer.cc
vulkan/vk_index_buffer.cc
vulkan/vk_pixel_buffer.cc
vulkan/vk_query.cc
vulkan/vk_shader.cc
vulkan/vk_storage_buffer.cc
@ -204,8 +206,10 @@ set(VULKAN_SRC
vulkan/vk_batch.hh
vulkan/vk_context.hh
vulkan/vk_drawlist.hh
vulkan/vk_fence.hh
vulkan/vk_framebuffer.hh
vulkan/vk_index_buffer.hh
vulkan/vk_pixel_buffer.hh
vulkan/vk_query.hh
vulkan/vk_shader.hh
vulkan/vk_storage_buffer.hh

View File

@ -321,6 +321,12 @@ eGPUBackendType GPU_backend_get_type()
}
#endif
#ifdef WITH_VULKAN_BACKEND
if (g_backend && dynamic_cast<VKBackend *>(g_backend) != nullptr) {
return GPU_BACKEND_VULKAN;
}
#endif
return GPU_BACKEND_NONE;
}

View File

@ -10,8 +10,10 @@
#include "vk_batch.hh"
#include "vk_context.hh"
#include "vk_drawlist.hh"
#include "vk_fence.hh"
#include "vk_framebuffer.hh"
#include "vk_index_buffer.hh"
#include "vk_pixel_buffer.hh"
#include "vk_query.hh"
#include "vk_shader.hh"
#include "vk_storage_buffer.hh"
@ -80,6 +82,11 @@ DrawList *VKBackend::drawlist_alloc(int /*list_length*/)
return new VKDrawList();
}
Fence *VKBackend::fence_alloc()
{
return new VKFence();
}
FrameBuffer *VKBackend::framebuffer_alloc(const char *name)
{
return new VKFrameBuffer(name);
@ -90,6 +97,11 @@ IndexBuf *VKBackend::indexbuf_alloc()
return new VKIndexBuffer();
}
PixelBuffer *VKBackend::pixelbuf_alloc(uint size)
{
return new VKPixelBuffer(size);
}
QueryPool *VKBackend::querypool_alloc()
{
return new VKQueryPool();

View File

@ -33,8 +33,10 @@ class VKBackend : public GPUBackend {
Batch *batch_alloc() override;
DrawList *drawlist_alloc(int list_length) override;
Fence *fence_alloc() override;
FrameBuffer *framebuffer_alloc(const char *name) override;
IndexBuf *indexbuf_alloc() override;
PixelBuffer *pixelbuf_alloc(uint size) override;
QueryPool *querypool_alloc() override;
Shader *shader_alloc(const char *name) override;
Texture *texture_alloc(const char *name) override;

View File

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2022 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#include "vk_fence.hh"
namespace blender::gpu {
void VKFence::signal()
{
}
void VKFence::wait()
{
}
} // namespace blender::gpu

View File

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2022 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#pragma once
#include "gpu_state_private.hh"
namespace blender::gpu {
class VKFence : public Fence {
public:
void signal() override;
void wait() override;
};
} // namespace blender::gpu

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2022 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#include "vk_pixel_buffer.hh"
namespace blender::gpu {
VKPixelBuffer::VKPixelBuffer(int64_t size): PixelBuffer(size)
{
}
void *VKPixelBuffer::map()
{
return nullptr;
}
void VKPixelBuffer::unmap()
{
}
int64_t VKPixelBuffer::get_native_handle()
{
return -1;
}
uint VKPixelBuffer::get_size()
{
return size_;
}
} // namespace blender::gpu

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2022 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#pragma once
#include "gpu_texture_private.hh"
namespace blender::gpu {
class VKPixelBuffer : public PixelBuffer {
public:
VKPixelBuffer(int64_t size);
void *map() override;
void unmap() override;
int64_t get_native_handle() override;
uint get_size() override;
};
} // namespace blender::gpu

View File

@ -46,6 +46,13 @@ void VKTexture::update_sub(int /*mip*/,
{
}
void VKTexture::update_sub(int /*offset*/[3],
int /*extent*/[3],
eGPUDataFormat /*format*/,
GPUPixelBuffer * /*pixbuf*/)
{
}
/* TODO(fclem): Legacy. Should be removed at some point. */
uint VKTexture::gl_bindcode_get() const
{

View File

@ -26,6 +26,10 @@ class VKTexture : public Texture {
void *read(int mip, eGPUDataFormat format) override;
void update_sub(
int mip, int offset[3], int extent[3], eGPUDataFormat format, const void *data) override;
void update_sub(int offset[3],
int extent[3],
eGPUDataFormat format,
GPUPixelBuffer *pixbuf) override;
/* TODO(fclem): Legacy. Should be removed at some point. */
uint gl_bindcode_get() const override;

View File

@ -5,10 +5,17 @@
* \ingroup gpu
*/
#include "MEM_guardedalloc.h"
#include "vk_vertex_buffer.hh"
namespace blender::gpu {
VKVertexBuffer::~VKVertexBuffer()
{
release_data();
}
void VKVertexBuffer::bind_as_ssbo(uint /*binding*/)
{
}
@ -37,6 +44,13 @@ void *VKVertexBuffer::unmap(const void * /*mapped_data*/) const
void VKVertexBuffer::acquire_data()
{
if (usage_ == GPU_USAGE_DEVICE_ONLY) {
return;
}
/* Discard previous data if any. */
MEM_SAFE_FREE(data);
data = (uchar *)MEM_mallocN(sizeof(uchar) * this->size_alloc_get(), __func__);
}
void VKVertexBuffer::resize_data()
@ -45,6 +59,7 @@ void VKVertexBuffer::resize_data()
void VKVertexBuffer::release_data()
{
MEM_SAFE_FREE(data);
}
void VKVertexBuffer::upload_data()

View File

@ -13,6 +13,8 @@ namespace blender::gpu {
class VKVertexBuffer : public VertBuf {
public:
~VKVertexBuffer();
void bind_as_ssbo(uint binding) override;
void bind_as_texture(uint binding) override;
void wrap_handle(uint64_t handle) override;

View File

@ -43,6 +43,7 @@ typedef struct AssetFilterSettings {
*/
typedef struct AssetMetaData {
#ifdef __cplusplus
/** Enables use with `std::unique_ptr<AssetMetaData>`. */
~AssetMetaData();
#endif

View File

@ -15,6 +15,9 @@
/** Workaround to forward-declare C++ type in C header. */
#ifdef __cplusplus
# include "BLI_math_vec_types.hh"
namespace blender {
template<typename T> class Span;
template<typename T> class MutableSpan;
@ -267,6 +270,17 @@ typedef struct Mesh {
* cache dirty. If the mesh was changed first, the relevant dirty tags should be called first.
*/
void loose_edges_tag_none() const;
/**
* Normal direction of every polygon, which is defined by the winding direction of its corners.
*/
blender::Span<blender::float3> poly_normals() const;
/**
* Normal direction for each vertex, which is defined as the weighted average of the normals
* from a vertices surrounding faces, or the normalized position of vertices connected to no
* faces.
*/
blender::Span<blender::float3> vertex_normals() const;
#endif
} Mesh;

View File

@ -640,6 +640,7 @@ typedef struct UserDef_Experimental {
char show_asset_debug_info;
char no_asset_indexing;
char use_viewport_debug;
char use_all_linked_data_direct;
char SANITIZE_AFTER_HERE;
/* The following options are automatically sanitized (set to 0)
* when the release cycle is not alpha. */
@ -652,6 +653,7 @@ typedef struct UserDef_Experimental {
char enable_eevee_next;
char use_sculpt_texture_paint;
char use_realtime_compositor;
char _pad0[7];
/** `makesdna` does not allow empty structs. */
} UserDef_Experimental;

View File

@ -6391,6 +6391,13 @@ static void rna_def_userdef_experimental(BlenderRNA *brna)
"Enable viewport debugging options for developers in the overlays "
"pop-over");
RNA_def_property_update(prop, 0, "rna_userdef_ui_update");
prop = RNA_def_property(srna, "use_all_linked_data_direct", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_ui_text(
prop,
"All Linked Data Direct",
"Forces all linked data to be considered as directly linked. Workaround for current "
"issues/limitations in BAT (Blender studio pipeline tool)");
}
static void rna_def_userdef_addon_collection(BlenderRNA *brna, PropertyRNA *cprop)

View File

@ -45,7 +45,7 @@ set(SRC
intern/MOD_curve.c
intern/MOD_datatransfer.cc
intern/MOD_decimate.c
intern/MOD_displace.c
intern/MOD_displace.cc
intern/MOD_dynamicpaint.c
intern/MOD_edgesplit.c
intern/MOD_explode.c
@ -84,7 +84,7 @@ set(SRC
intern/MOD_subsurf.cc
intern/MOD_surface.c
intern/MOD_surfacedeform.c
intern/MOD_triangulate.c
intern/MOD_triangulate.cc
intern/MOD_ui_common.c
intern/MOD_util.cc
intern/MOD_uvproject.cc

View File

@ -126,10 +126,10 @@ static void updateDepsgraph(ModifierData *md, const ModifierUpdateDepsgraphConte
need_transform_relation = true;
}
if (dmd->texture != NULL) {
if (dmd->texture != nullptr) {
DEG_add_generic_id_relation(ctx->node, &dmd->texture->id, "Displace Modifier");
if (dmd->map_object != NULL && dmd->texmapping == MOD_DISP_MAP_OBJECT) {
if (dmd->map_object != nullptr && dmd->texmapping == MOD_DISP_MAP_OBJECT) {
MOD_depsgraph_update_object_bone_relation(
ctx->node, dmd->map_object, dmd->map_bone, "Displace Modifier");
need_transform_relation = true;
@ -273,11 +273,11 @@ static void displaceModifier_do(DisplaceModifierData *dmd,
int defgrp_index;
float(*tex_co)[3];
float weight = 1.0f; /* init value unused but some compilers may complain */
float(*vert_clnors)[3] = NULL;
float(*vert_clnors)[3] = nullptr;
float local_mat[4][4] = {{0}};
const bool use_global_direction = dmd->space == MOD_DISP_SPACE_GLOBAL;
if (dmd->texture == NULL && dmd->direction == MOD_DISP_DIR_RGB_XYZ) {
if (dmd->texture == nullptr && dmd->direction == MOD_DISP_DIR_RGB_XYZ) {
return;
}
if (dmd->strength == 0.0f) {
@ -287,20 +287,21 @@ static void displaceModifier_do(DisplaceModifierData *dmd,
mvert = BKE_mesh_verts_for_write(mesh);
MOD_get_vgroup(ob, mesh, dmd->defgrp_name, &dvert, &defgrp_index);
if (defgrp_index >= 0 && dvert == NULL) {
if (defgrp_index >= 0 && dvert == nullptr) {
/* There is a vertex group, but it has no vertices. */
return;
}
Tex *tex_target = dmd->texture;
if (tex_target != NULL) {
tex_co = MEM_calloc_arrayN((size_t)verts_num, sizeof(*tex_co), "displaceModifier_do tex_co");
if (tex_target != nullptr) {
tex_co = static_cast<float(*)[3]>(
MEM_calloc_arrayN((size_t)verts_num, sizeof(*tex_co), "displaceModifier_do tex_co"));
MOD_get_texture_coords((MappingInfoModifierData *)dmd, ctx, ob, mesh, vertexCos, tex_co);
MOD_init_texture((MappingInfoModifierData *)dmd, ctx);
}
else {
tex_co = NULL;
tex_co = nullptr;
}
if (direction == MOD_DISP_DIR_CLNOR) {
@ -311,8 +312,9 @@ static void displaceModifier_do(DisplaceModifierData *dmd,
BKE_mesh_calc_normals_split(mesh);
}
float(*clnors)[3] = CustomData_get_layer(ldata, CD_NORMAL);
vert_clnors = MEM_malloc_arrayN(verts_num, sizeof(*vert_clnors), __func__);
float(*clnors)[3] = static_cast<float(*)[3]>(CustomData_get_layer(ldata, CD_NORMAL));
vert_clnors = static_cast<float(*)[3]>(
MEM_malloc_arrayN(verts_num, sizeof(*vert_clnors), __func__));
BKE_mesh_normals_loop_to_vertex(
verts_num, BKE_mesh_loops(mesh), mesh->totloop, (const float(*)[3])clnors, vert_clnors);
}
@ -325,7 +327,7 @@ static void displaceModifier_do(DisplaceModifierData *dmd,
copy_m4_m4(local_mat, ob->object_to_world);
}
DisplaceUserdata data = {NULL};
DisplaceUserdata data = {nullptr};
data.scene = DEG_get_evaluated_scene(ctx->depsgraph);
data.dmd = dmd;
data.dvert = dvert;
@ -342,7 +344,7 @@ static void displaceModifier_do(DisplaceModifierData *dmd,
data.vert_normals = BKE_mesh_vertex_normals_ensure(mesh);
}
data.vert_clnors = vert_clnors;
if (tex_target != NULL) {
if (tex_target != nullptr) {
data.pool = BKE_image_pool_new();
BKE_texture_fetch_images_for_pool(tex_target, data.pool);
}
@ -351,7 +353,7 @@ static void displaceModifier_do(DisplaceModifierData *dmd,
settings.use_threading = (verts_num > 512);
BLI_task_parallel_range(0, verts_num, &data, displaceModifier_do_task, &settings);
if (data.pool != NULL) {
if (data.pool != nullptr) {
BKE_image_pool_free(data.pool);
}
@ -370,12 +372,12 @@ static void deformVerts(ModifierData *md,
float (*vertexCos)[3],
int verts_num)
{
Mesh *mesh_src = MOD_deform_mesh_eval_get(ctx->object, NULL, mesh, NULL, verts_num, false);
Mesh *mesh_src = MOD_deform_mesh_eval_get(ctx->object, nullptr, mesh, nullptr, verts_num, false);
displaceModifier_do((DisplaceModifierData *)md, ctx, mesh_src, vertexCos, verts_num);
if (!ELEM(mesh_src, NULL, mesh)) {
BKE_id_free(NULL, mesh_src);
if (!ELEM(mesh_src, nullptr, mesh)) {
BKE_id_free(nullptr, mesh_src);
}
}
@ -386,17 +388,18 @@ static void deformVertsEM(ModifierData *md,
float (*vertexCos)[3],
int verts_num)
{
Mesh *mesh_src = MOD_deform_mesh_eval_get(ctx->object, editData, mesh, NULL, verts_num, false);
Mesh *mesh_src = MOD_deform_mesh_eval_get(
ctx->object, editData, mesh, nullptr, verts_num, false);
/* TODO(@campbellbarton): use edit-mode data only (remove this line). */
if (mesh_src != NULL) {
if (mesh_src != nullptr) {
BKE_mesh_wrapper_ensure_mdata(mesh_src);
}
displaceModifier_do((DisplaceModifierData *)md, ctx, mesh_src, vertexCos, verts_num);
if (!ELEM(mesh_src, NULL, mesh)) {
BKE_id_free(NULL, mesh_src);
if (!ELEM(mesh_src, nullptr, mesh)) {
BKE_id_free(nullptr, mesh_src);
}
}
@ -416,7 +419,7 @@ static void panel_draw(const bContext *C, Panel *panel)
uiLayoutSetPropSep(layout, true);
uiTemplateID(layout, C, ptr, "texture", "texture.new", NULL, NULL, 0, ICON_NONE, NULL);
uiTemplateID(layout, C, ptr, "texture", "texture.new", nullptr, nullptr, 0, ICON_NONE, nullptr);
col = uiLayoutColumn(layout, false);
uiLayoutSetActive(col, has_texture);
@ -437,7 +440,7 @@ static void panel_draw(const bContext *C, Panel *panel)
}
}
else if (texture_coords == MOD_DISP_MAP_UV && RNA_enum_get(&ob_ptr, "type") == OB_MESH) {
uiItemPointerR(col, ptr, "uv_layer", &obj_data_ptr, "uv_layers", NULL, ICON_NONE);
uiItemPointerR(col, ptr, "uv_layer", &obj_data_ptr, "uv_layers", nullptr, ICON_NONE);
}
uiItemS(layout);
@ -449,16 +452,16 @@ static void panel_draw(const bContext *C, Panel *panel)
MOD_DISP_DIR_Y,
MOD_DISP_DIR_Z,
MOD_DISP_DIR_RGB_XYZ)) {
uiItemR(col, ptr, "space", 0, NULL, ICON_NONE);
uiItemR(col, ptr, "space", 0, nullptr, ICON_NONE);
}
uiItemS(layout);
col = uiLayoutColumn(layout, false);
uiItemR(col, ptr, "strength", 0, NULL, ICON_NONE);
uiItemR(col, ptr, "mid_level", 0, NULL, ICON_NONE);
uiItemR(col, ptr, "strength", 0, nullptr, ICON_NONE);
uiItemR(col, ptr, "mid_level", 0, nullptr, ICON_NONE);
modifier_vgroup_ui(col, ptr, &ob_ptr, "vertex_group", "invert_vertex_group", NULL);
modifier_vgroup_ui(col, ptr, &ob_ptr, "vertex_group", "invert_vertex_group", nullptr);
modifier_panel_end(layout, ptr);
}
@ -480,23 +483,23 @@ ModifierTypeInfo modifierType_Displace = {
/* copyData */ BKE_modifier_copydata_generic,
/* deformVerts */ deformVerts,
/* deformMatrices */ NULL,
/* deformMatrices */ nullptr,
/* deformVertsEM */ deformVertsEM,
/* deformMatricesEM */ NULL,
/* modifyMesh */ NULL,
/* modifyGeometrySet */ NULL,
/* deformMatricesEM */ nullptr,
/* modifyMesh */ nullptr,
/* modifyGeometrySet */ nullptr,
/* initData */ initData,
/* requiredDataMask */ requiredDataMask,
/* freeData */ NULL,
/* freeData */ nullptr,
/* isDisabled */ isDisabled,
/* updateDepsgraph */ updateDepsgraph,
/* dependsOnTime */ dependsOnTime,
/* dependsOnNormals */ dependsOnNormals,
/* foreachIDLink */ foreachIDLink,
/* foreachTexLink */ foreachTexLink,
/* freeRuntimeData */ NULL,
/* freeRuntimeData */ nullptr,
/* panelRegister */ panelRegister,
/* blendWrite */ NULL,
/* blendRead */ NULL,
/* blendWrite */ nullptr,
/* blendRead */ nullptr,
};

View File

@ -45,8 +45,10 @@ static Mesh *triangulate_mesh(Mesh *mesh,
BMesh *bm;
int edges_num, i;
MEdge *me;
CustomData_MeshMasks cd_mask_extra = {
.vmask = CD_MASK_ORIGINDEX, .emask = CD_MASK_ORIGINDEX, .pmask = CD_MASK_ORIGINDEX};
CustomData_MeshMasks cd_mask_extra{};
cd_mask_extra.vmask = CD_MASK_ORIGINDEX;
cd_mask_extra.emask = CD_MASK_ORIGINDEX;
cd_mask_extra.pmask = CD_MASK_ORIGINDEX;
bool keep_clnors = (flag & MOD_TRIANGULATE_KEEP_CUSTOMLOOP_NORMALS) != 0;
@ -57,22 +59,23 @@ static Mesh *triangulate_mesh(Mesh *mesh,
cd_mask_extra.lmask |= CD_MASK_NORMAL;
}
bm = BKE_mesh_to_bmesh_ex(mesh,
&((struct BMeshCreateParams){0}),
&((struct BMeshFromMeshParams){
.calc_face_normal = true,
.calc_vert_normal = false,
.cd_mask_extra = cd_mask_extra,
}));
BMeshCreateParams bmesh_create_params{};
BMeshFromMeshParams bmesh_from_mesh_params{};
bmesh_from_mesh_params.calc_face_normal = true;
bmesh_from_mesh_params.calc_vert_normal = false;
bmesh_from_mesh_params.cd_mask_extra = cd_mask_extra;
BM_mesh_triangulate(bm, quad_method, ngon_method, min_vertices, false, NULL, NULL, NULL);
bm = BKE_mesh_to_bmesh_ex(mesh, &bmesh_create_params, &bmesh_from_mesh_params);
BM_mesh_triangulate(
bm, quad_method, ngon_method, min_vertices, false, nullptr, nullptr, nullptr);
result = BKE_mesh_from_bmesh_for_eval_nomain(bm, &cd_mask_extra, mesh);
BM_mesh_free(bm);
if (keep_clnors) {
float(*lnors)[3] = CustomData_get_layer(&result->ldata, CD_NORMAL);
BLI_assert(lnors != NULL);
float(*lnors)[3] = static_cast<float(*)[3]>(CustomData_get_layer(&result->ldata, CD_NORMAL));
BLI_assert(lnors != nullptr);
BKE_mesh_set_custom_normals(result, lnors);
@ -125,10 +128,10 @@ static void panel_draw(const bContext *UNUSED(C), Panel *panel)
uiLayoutSetPropSep(layout, true);
uiItemR(layout, ptr, "quad_method", 0, NULL, ICON_NONE);
uiItemR(layout, ptr, "ngon_method", 0, NULL, ICON_NONE);
uiItemR(layout, ptr, "min_vertices", 0, NULL, ICON_NONE);
uiItemR(layout, ptr, "keep_custom_normals", 0, NULL, ICON_NONE);
uiItemR(layout, ptr, "quad_method", 0, nullptr, ICON_NONE);
uiItemR(layout, ptr, "ngon_method", 0, nullptr, ICON_NONE);
uiItemR(layout, ptr, "min_vertices", 0, nullptr, ICON_NONE);
uiItemR(layout, ptr, "keep_custom_normals", 0, nullptr, ICON_NONE);
modifier_panel_end(layout, ptr);
}
@ -151,24 +154,24 @@ ModifierTypeInfo modifierType_Triangulate = {
/* copyData */ BKE_modifier_copydata_generic,
/* deformVerts */ NULL,
/* deformMatrices */ NULL,
/* deformVertsEM */ NULL,
/* deformMatricesEM */ NULL,
/* deformVerts */ nullptr,
/* deformMatrices */ nullptr,
/* deformVertsEM */ nullptr,
/* deformMatricesEM */ nullptr,
/* modifyMesh */ modifyMesh,
/* modifyGeometrySet */ NULL,
/* modifyGeometrySet */ nullptr,
/* initData */ initData,
/* requiredDataMask */ NULL, // requiredDataMask,
/* freeData */ NULL,
/* isDisabled */ NULL,
/* updateDepsgraph */ NULL,
/* dependsOnTime */ NULL,
/* dependsOnNormals */ NULL,
/* foreachIDLink */ NULL,
/* foreachTexLink */ NULL,
/* freeRuntimeData */ NULL,
/* requiredDataMask */ nullptr, // requiredDataMask,
/* freeData */ nullptr,
/* isDisabled */ nullptr,
/* updateDepsgraph */ nullptr,
/* dependsOnTime */ nullptr,
/* dependsOnNormals */ nullptr,
/* foreachIDLink */ nullptr,
/* foreachTexLink */ nullptr,
/* freeRuntimeData */ nullptr,
/* panelRegister */ panelRegister,
/* blendWrite */ NULL,
/* blendRead */ NULL,
/* blendWrite */ nullptr,
/* blendRead */ nullptr,
};

View File

@ -40,8 +40,7 @@ class PlanarFieldInput final : public bke::MeshFieldInput {
const Span<MVert> verts = mesh.verts();
const Span<MPoly> polys = mesh.polys();
const Span<MLoop> loops = mesh.loops();
const Span<float3> poly_normals{
reinterpret_cast<const float3 *>(BKE_mesh_poly_normals_ensure(&mesh)), mesh.totpoly};
const Span<float3> poly_normals = mesh.poly_normals();
bke::MeshFieldContext context{mesh, ATTR_DOMAIN_FACE};
fn::FieldEvaluator evaluator{context, polys.size()};

View File

@ -5,7 +5,8 @@
#include "BKE_mesh.h"
#include "BLI_disjoint_set.hh"
#include "BLI_atomic_disjoint_set.hh"
#include "BLI_task.hh"
#include "node_geometry_util.hh"
@ -35,17 +36,15 @@ class IslandFieldInput final : public bke::MeshFieldInput {
{
const Span<MEdge> edges = mesh.edges();
DisjointSet<int> islands(mesh.totvert);
for (const int i : edges.index_range()) {
islands.join(edges[i].v1, edges[i].v2);
}
AtomicDisjointSet islands(mesh.totvert);
threading::parallel_for(edges.index_range(), 1024, [&](const IndexRange range) {
for (const MEdge &edge : edges.slice(range)) {
islands.join(edge.v1, edge.v2);
}
});
Array<int> output(mesh.totvert);
VectorSet<int> ordered_roots;
for (const int i : IndexRange(mesh.totvert)) {
const int root = islands.find_root(i);
output[i] = ordered_roots.index_of_or_add(root);
}
islands.calc_reduced_ids(output);
return mesh.attributes().adapt_domain<int>(
VArray<int>::ForContainer(std::move(output)), ATTR_DOMAIN_POINT, domain);
@ -81,18 +80,15 @@ class IslandCountFieldInput final : public bke::MeshFieldInput {
{
const Span<MEdge> edges = mesh.edges();
DisjointSet<int> islands(mesh.totvert);
for (const int i : edges.index_range()) {
islands.join(edges[i].v1, edges[i].v2);
}
AtomicDisjointSet islands(mesh.totvert);
threading::parallel_for(edges.index_range(), 1024, [&](const IndexRange range) {
for (const MEdge &edge : edges.slice(range)) {
islands.join(edge.v1, edge.v2);
}
});
Set<int> island_list;
for (const int i_vert : IndexRange(mesh.totvert)) {
const int root = islands.find_root(i_vert);
island_list.add(root);
}
return VArray<int>::ForSingle(island_list.size(), mesh.attributes().domain_size(domain));
const int islands_num = islands.count_sets();
return VArray<int>::ForSingle(islands_num, mesh.attributes().domain_size(domain));
}
uint64_t hash() const override

View File

@ -26,10 +26,10 @@ set(INC
set(SRC
intern/bake.c
intern/bake.cc
intern/engine.cc
intern/initrender.cc
intern/multires_bake.c
intern/multires_bake.cc
intern/pipeline.cc
intern/render_result.cc
intern/texture_image.c

View File

@ -336,7 +336,8 @@ static bool cast_ray_highpoly(BVHTreeFromMesh *treeData,
}
BVHTreeRayHit *hits;
hits = MEM_mallocN(sizeof(BVHTreeRayHit) * tot_highpoly, "Bake Highpoly to Lowpoly: BVH Rays");
hits = static_cast<BVHTreeRayHit *>(
MEM_mallocN(sizeof(BVHTreeRayHit) * tot_highpoly, "Bake Highpoly to Lowpoly: BVH Rays"));
for (i = 0; i < tot_highpoly; i++) {
float co_high[3], dir_high[3];
@ -463,15 +464,15 @@ static TriTessFace *mesh_calc_tri_tessface(Mesh *me, bool tangent, Mesh *me_eval
const MPoly *polys = BKE_mesh_polys(me);
const MLoop *loops = BKE_mesh_loops(me);
looptri = MEM_mallocN(sizeof(*looptri) * tottri, __func__);
triangles = MEM_callocN(sizeof(TriTessFace) * tottri, __func__);
looptri = static_cast<MLoopTri *>(MEM_mallocN(sizeof(*looptri) * tottri, __func__));
triangles = static_cast<TriTessFace *>(MEM_callocN(sizeof(TriTessFace) * tottri, __func__));
const float(*precomputed_normals)[3] = BKE_mesh_poly_normals_are_dirty(me) ?
NULL :
nullptr :
BKE_mesh_poly_normals_ensure(me);
const bool calculate_normal = precomputed_normals ? false : true;
if (precomputed_normals != NULL) {
if (precomputed_normals != nullptr) {
BKE_mesh_recalc_looptri_with_normals(
loops, polys, verts, me->totloop, me->totpoly, looptri, precomputed_normals);
}
@ -479,17 +480,18 @@ static TriTessFace *mesh_calc_tri_tessface(Mesh *me, bool tangent, Mesh *me_eval
BKE_mesh_recalc_looptri(loops, polys, verts, me->totloop, me->totpoly, looptri);
}
const TSpace *tspace = NULL;
const float(*loop_normals)[3] = NULL;
const TSpace *tspace = nullptr;
const float(*loop_normals)[3] = nullptr;
if (tangent) {
BKE_mesh_ensure_normals_for_display(me_eval);
BKE_mesh_calc_normals_split(me_eval);
BKE_mesh_calc_loop_tangents(me_eval, true, NULL, 0);
BKE_mesh_calc_loop_tangents(me_eval, true, nullptr, 0);
tspace = CustomData_get_layer(&me_eval->ldata, CD_TANGENT);
tspace = static_cast<const TSpace *>(CustomData_get_layer(&me_eval->ldata, CD_TANGENT));
BLI_assert(tspace);
loop_normals = CustomData_get_layer(&me_eval->ldata, CD_NORMAL);
loop_normals = static_cast<const float(*)[3]>(
CustomData_get_layer(&me_eval->ldata, CD_NORMAL));
}
const float(*vert_normals)[3] = BKE_mesh_vertex_normals_ensure(me);
@ -551,41 +553,42 @@ bool RE_bake_pixels_populate_from_objects(struct Mesh *me_low,
int primitive_id;
float u, v;
float imat_low[4][4];
bool is_cage = me_cage != NULL;
bool is_cage = me_cage != nullptr;
bool result = true;
Mesh *me_eval_low = NULL;
Mesh *me_eval_low = nullptr;
Mesh **me_highpoly;
BVHTreeFromMesh *treeData;
/* NOTE: all coordinates are in local space. */
TriTessFace *tris_low = NULL;
TriTessFace *tris_cage = NULL;
TriTessFace *tris_low = nullptr;
TriTessFace *tris_cage = nullptr;
TriTessFace **tris_high;
/* Assume all low-poly tessfaces can be quads. */
tris_high = MEM_callocN(sizeof(TriTessFace *) * tot_highpoly, "MVerts Highpoly Mesh Array");
tris_high = MEM_cnew_array<TriTessFace *>(tot_highpoly, "MVerts Highpoly Mesh Array");
/* Assume all high-poly tessfaces are triangles. */
me_highpoly = MEM_mallocN(sizeof(Mesh *) * tot_highpoly, "Highpoly Derived Meshes");
treeData = MEM_callocN(sizeof(BVHTreeFromMesh) * tot_highpoly, "Highpoly BVH Trees");
me_highpoly = static_cast<Mesh **>(
MEM_mallocN(sizeof(Mesh *) * tot_highpoly, "Highpoly Derived Meshes"));
treeData = MEM_cnew_array<BVHTreeFromMesh>(tot_highpoly, "Highpoly BVH Trees");
if (!is_cage) {
me_eval_low = BKE_mesh_copy_for_eval(me_low, false);
tris_low = mesh_calc_tri_tessface(me_low, true, me_eval_low);
}
else if (is_custom_cage) {
tris_low = mesh_calc_tri_tessface(me_low, false, NULL);
tris_cage = mesh_calc_tri_tessface(me_cage, false, NULL);
tris_low = mesh_calc_tri_tessface(me_low, false, nullptr);
tris_cage = mesh_calc_tri_tessface(me_cage, false, nullptr);
}
else {
tris_cage = mesh_calc_tri_tessface(me_cage, false, NULL);
tris_cage = mesh_calc_tri_tessface(me_cage, false, nullptr);
}
invert_m4_m4(imat_low, mat_low);
for (i = 0; i < tot_highpoly; i++) {
tris_high[i] = mesh_calc_tri_tessface(highpoly[i].me, false, NULL);
tris_high[i] = mesh_calc_tri_tessface(highpoly[i].me, false, nullptr);
me_highpoly[i] = highpoly[i].me;
BKE_mesh_runtime_looptri_ensure(me_highpoly[i]);
@ -594,7 +597,7 @@ bool RE_bake_pixels_populate_from_objects(struct Mesh *me_low,
/* Create a BVH-tree for each `highpoly` object. */
BKE_bvhtree_from_mesh_get(&treeData[i], me_highpoly[i], BVHTREE_FROM_LOOPTRI, 2);
if (treeData[i].tree == NULL) {
if (treeData[i].tree == nullptr) {
printf("Baking: out of memory while creating BHVTree for object \"%s\"\n",
highpoly[i].ob->id.name + 2);
result = false;
@ -668,7 +671,7 @@ cleanup:
MEM_freeN(me_highpoly);
if (me_eval_low) {
BKE_id_free(NULL, me_eval_low);
BKE_id_free(nullptr, me_eval_low);
}
if (tris_low) {
MEM_freeN(tris_low);
@ -712,21 +715,21 @@ void RE_bake_pixels_populate(Mesh *me,
const char *uv_layer)
{
const MLoopUV *mloopuv;
if ((uv_layer == NULL) || (uv_layer[0] == '\0')) {
mloopuv = CustomData_get_layer(&me->ldata, CD_MLOOPUV);
if ((uv_layer == nullptr) || (uv_layer[0] == '\0')) {
mloopuv = static_cast<const MLoopUV *>(CustomData_get_layer(&me->ldata, CD_MLOOPUV));
}
else {
int uv_id = CustomData_get_named_layer(&me->ldata, CD_MLOOPUV, uv_layer);
mloopuv = CustomData_get_layer_n(&me->ldata, CD_MLOOPUV, uv_id);
mloopuv = static_cast<const MLoopUV *>(CustomData_get_layer_n(&me->ldata, CD_MLOOPUV, uv_id));
}
if (mloopuv == NULL) {
if (mloopuv == nullptr) {
return;
}
BakeDataZSpan bd;
bd.pixel_array = pixel_array;
bd.zspan = MEM_callocN(sizeof(ZSpan) * targets->images_num, "bake zspan");
bd.zspan = MEM_cnew_array<ZSpan>(targets->images_num, "bake zspan");
/* initialize all pixel arrays so we know which ones are 'blank' */
for (int i = 0; i < pixels_num; i++) {
@ -739,7 +742,7 @@ void RE_bake_pixels_populate(Mesh *me,
}
const int tottri = poly_to_tri_count(me->totpoly, me->totloop);
MLoopTri *looptri = MEM_mallocN(sizeof(*looptri) * tottri, __func__);
MLoopTri *looptri = static_cast<MLoopTri *>(MEM_mallocN(sizeof(*looptri) * tottri, __func__));
const MVert *verts = BKE_mesh_verts(me);
const MPoly *polys = BKE_mesh_polys(me);
@ -957,7 +960,7 @@ void RE_bake_normal_world_to_tangent(const BakePixel pixel_array[],
MEM_freeN(triangles);
if (me_eval) {
BKE_id_free(NULL, me_eval);
BKE_id_free(nullptr, me_eval);
}
}
@ -1027,7 +1030,7 @@ void RE_bake_ibuf_clear(Image *image, const bool is_tangent)
const float nor_alpha[4] = {0.5f, 0.5f, 1.0f, 0.0f};
const float nor_solid[4] = {0.5f, 0.5f, 1.0f, 1.0f};
ibuf = BKE_image_acquire_ibuf(image, NULL, &lock);
ibuf = BKE_image_acquire_ibuf(image, nullptr, &lock);
BLI_assert(ibuf);
if (is_tangent) {

View File

@ -383,14 +383,14 @@ static void *do_multires_bake_thread(void *data_v)
while ((tri_index = multires_bake_queue_next_tri(handle->queue)) >= 0) {
const MLoopTri *lt = &data->mlooptri[tri_index];
const short mat_nr = data->material_indices == NULL ? 0 : data->material_indices[lt->poly];
const short mat_nr = data->material_indices == nullptr ? 0 : data->material_indices[lt->poly];
const MLoopUV *mloopuv = data->mloopuv;
if (multiresbake_test_break(bkr)) {
break;
}
Image *tri_image = mat_nr < bkr->ob_image.len ? bkr->ob_image.array[mat_nr] : NULL;
Image *tri_image = mat_nr < bkr->ob_image.len ? bkr->ob_image.array[mat_nr] : nullptr;
if (tri_image != handle->image) {
continue;
}
@ -427,7 +427,7 @@ static void *do_multires_bake_thread(void *data_v)
BLI_spin_unlock(&handle->queue->spin);
}
return NULL;
return nullptr;
}
/* some of arrays inside ccgdm are lazy-initialized, which will generally
@ -475,13 +475,13 @@ static void do_multires_bake(MultiresBakeRender *bkr,
MVert *mvert = dm->getVertArray(dm);
MPoly *mpoly = dm->getPolyArray(dm);
MLoop *mloop = dm->getLoopArray(dm);
MLoopUV *mloopuv = dm->getLoopDataArray(dm, CD_MLOOPUV);
float *pvtangent = NULL;
MLoopUV *mloopuv = static_cast<MLoopUV *>(dm->getLoopDataArray(dm, CD_MLOOPUV));
float *pvtangent = nullptr;
ListBase threads;
int i, tot_thread = bkr->threads > 0 ? bkr->threads : BLI_system_thread_count();
void *bake_data = NULL;
void *bake_data = nullptr;
Mesh *temp_mesh = BKE_mesh_new_nomain(
dm->getNumVerts(dm), dm->getNumEdges(dm), 0, dm->getNumLoops(dm), dm->getNumPolys(dm));
@ -511,19 +511,19 @@ static void do_multires_bake(MultiresBakeRender *bkr,
dm->getNumLoopTri(dm),
&dm->loopData,
true,
NULL,
nullptr,
0,
vert_normals,
poly_normals,
(const float(*)[3])dm->getLoopDataArray(dm, CD_NORMAL),
(const float(*)[3])dm->getVertDataArray(dm, CD_ORCO), /* may be nullptr */
(const float(*)[3])dm->getVertDataArray(dm, CD_ORCO), /* may be nullptrptr */
/* result */
&dm->loopData,
dm->getNumLoops(dm),
&dm->tangent_mask);
}
pvtangent = DM_get_loop_data_layer(dm, CD_TANGENT);
pvtangent = static_cast<float *>(DM_get_loop_data_layer(dm, CD_TANGENT));
}
/* all threads shares the same custom bake data */
@ -535,7 +535,7 @@ static void do_multires_bake(MultiresBakeRender *bkr,
BLI_threadpool_init(&threads, do_multires_bake_thread, tot_thread);
}
handles = MEM_callocN(tot_thread * sizeof(MultiresBakeThread), "do_multires_bake handles");
handles = MEM_cnew_array<MultiresBakeThread>(tot_thread, "do_multires_bake handles");
init_ccgdm_arrays(bkr->hires_dm);
@ -553,8 +553,8 @@ static void do_multires_bake(MultiresBakeRender *bkr,
handle->queue = &queue;
handle->data.mpoly = mpoly;
handle->data.material_indices = CustomData_get_layer_named(
&dm->polyData, CD_PROP_INT32, "material_index");
handle->data.material_indices = static_cast<const int *>(
CustomData_get_layer_named(&dm->polyData, CD_PROP_INT32, "material_index"));
handle->data.mvert = mvert;
handle->data.vert_normals = vert_normals;
handle->data.mloopuv = mloopuv;
@ -609,7 +609,7 @@ static void do_multires_bake(MultiresBakeRender *bkr,
MEM_freeN(handles);
BKE_id_free(NULL, temp_mesh);
BKE_id_free(nullptr, temp_mesh);
}
/* mode = 0: interpolate normals,
@ -704,11 +704,11 @@ static void get_ccgdm_data(DerivedMesh *lodm,
CLAMP(crn_x, 0.0f, grid_size);
CLAMP(crn_y, 0.0f, grid_size);
if (n != NULL) {
if (n != nullptr) {
interp_bilinear_grid(&key, grid_data[g_index + S], crn_x, crn_y, 0, n);
}
if (co != NULL) {
if (co != nullptr) {
interp_bilinear_grid(&key, grid_data[g_index + S], crn_x, crn_y, 1, co);
}
}
@ -772,19 +772,19 @@ static void *init_heights_data(MultiresBakeRender *bkr, ImBuf *ibuf)
{
MHeightBakeData *height_data;
DerivedMesh *lodm = bkr->lores_dm;
BakeImBufuserData *userdata = ibuf->userdata;
BakeImBufuserData *userdata = static_cast<BakeImBufuserData *>(ibuf->userdata);
if (userdata->displacement_buffer == NULL) {
userdata->displacement_buffer = MEM_callocN(sizeof(float) * ibuf->x * ibuf->y,
"MultiresBake heights");
if (userdata->displacement_buffer == nullptr) {
userdata->displacement_buffer = MEM_cnew_array<float>(ibuf->x * ibuf->y,
"MultiresBake heights");
}
height_data = MEM_callocN(sizeof(MHeightBakeData), "MultiresBake heightData");
height_data = MEM_cnew<MHeightBakeData>("MultiresBake heightData");
height_data->heights = userdata->displacement_buffer;
if (!bkr->use_lores_mesh) {
SubsurfModifierData smd = {{NULL}};
SubsurfModifierData smd = {{nullptr}};
int ss_lvl = bkr->tot_lvl - bkr->lvl;
CLAMP(ss_lvl, 0, 6);
@ -795,12 +795,13 @@ static void *init_heights_data(MultiresBakeRender *bkr, ImBuf *ibuf)
smd.quality = 3;
height_data->ssdm = subsurf_make_derived_from_derived(
bkr->lores_dm, &smd, bkr->scene, NULL, 0);
bkr->lores_dm, &smd, bkr->scene, nullptr, SubsurfFlags(0));
init_ccgdm_arrays(height_data->ssdm);
}
}
height_data->orig_index_mp_to_orig = lodm->getPolyDataArray(lodm, CD_ORIGINDEX);
height_data->orig_index_mp_to_orig = static_cast<const int *>(
lodm->getPolyDataArray(lodm, CD_ORIGINDEX));
return (void *)height_data;
}
@ -837,7 +838,7 @@ static void apply_heights_callback(DerivedMesh *lores_dm,
const MLoopTri *lt = lores_dm->getLoopTriArray(lores_dm) + tri_index;
MLoop *mloop = lores_dm->getLoopArray(lores_dm);
MPoly *mpoly = lores_dm->getPolyArray(lores_dm) + lt->poly;
MLoopUV *mloopuv = lores_dm->getLoopDataArray(lores_dm, CD_MLOOPUV);
MLoopUV *mloopuv = static_cast<MLoopUV *>(lores_dm->getLoopDataArray(lores_dm, CD_MLOOPUV));
MHeightBakeData *height_data = (MHeightBakeData *)bake_data;
MultiresBakeThread *thread_data = (MultiresBakeThread *)thread_data_v;
float uv[2], *st0, *st1, *st2, *st3;
@ -863,7 +864,7 @@ static void apply_heights_callback(DerivedMesh *lores_dm,
clamp_v2(uv, 0.0f, 1.0f);
get_ccgdm_data(
lores_dm, hires_dm, height_data->orig_index_mp_to_orig, lvl, lt, uv[0], uv[1], p1, NULL);
lores_dm, hires_dm, height_data->orig_index_mp_to_orig, lvl, lt, uv[0], uv[1], p1, nullptr);
if (height_data->ssdm) {
get_ccgdm_data(lores_dm,
@ -914,9 +915,10 @@ static void *init_normal_data(MultiresBakeRender *bkr, ImBuf *UNUSED(ibuf))
MNormalBakeData *normal_data;
DerivedMesh *lodm = bkr->lores_dm;
normal_data = MEM_callocN(sizeof(MNormalBakeData), "MultiresBake normalData");
normal_data = MEM_cnew<MNormalBakeData>("MultiresBake normalData");
normal_data->orig_index_mp_to_orig = lodm->getPolyDataArray(lodm, CD_ORIGINDEX);
normal_data->orig_index_mp_to_orig = static_cast<const int *>(
lodm->getPolyDataArray(lodm, CD_ORIGINDEX));
return (void *)normal_data;
}
@ -950,7 +952,7 @@ static void apply_tangmat_callback(DerivedMesh *lores_dm,
{
const MLoopTri *lt = lores_dm->getLoopTriArray(lores_dm) + tri_index;
MPoly *mpoly = lores_dm->getPolyArray(lores_dm) + lt->poly;
MLoopUV *mloopuv = lores_dm->getLoopDataArray(lores_dm, CD_MLOOPUV);
MLoopUV *mloopuv = static_cast<MLoopUV *>(lores_dm->getLoopDataArray(lores_dm, CD_MLOOPUV));
MNormalBakeData *normal_data = (MNormalBakeData *)bake_data;
float uv[2], *st0, *st1, *st2, *st3;
int pixel = ibuf->x * y + x;
@ -975,7 +977,7 @@ static void apply_tangmat_callback(DerivedMesh *lores_dm,
clamp_v2(uv, 0.0f, 1.0f);
get_ccgdm_data(
lores_dm, hires_dm, normal_data->orig_index_mp_to_orig, lvl, lt, uv[0], uv[1], NULL, n);
lores_dm, hires_dm, normal_data->orig_index_mp_to_orig, lvl, lt, uv[0], uv[1], nullptr, n);
mul_v3_m3v3(vec, tangmat, n);
normalize_v3_length(vec, 0.5);
@ -1435,7 +1437,7 @@ static void bake_images(MultiresBakeRender *bkr, MultiresBakeResult *result)
{
LinkData *link;
for (link = bkr->image.first; link; link = link->next) {
for (link = static_cast<LinkData *>(bkr->image.first); link; link = link->next) {
Image *ima = (Image *)link->data;
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
@ -1443,12 +1445,11 @@ static void bake_images(MultiresBakeRender *bkr, MultiresBakeResult *result)
BKE_imageuser_default(&iuser);
iuser.tile = tile->tile_number;
ImBuf *ibuf = BKE_image_acquire_ibuf(ima, &iuser, NULL);
ImBuf *ibuf = BKE_image_acquire_ibuf(ima, &iuser, nullptr);
if (ibuf->x > 0 && ibuf->y > 0) {
BakeImBufuserData *userdata = MEM_callocN(sizeof(BakeImBufuserData),
"MultiresBake userdata");
userdata->mask_buffer = MEM_callocN(ibuf->y * ibuf->x, "MultiresBake imbuf mask");
BakeImBufuserData *userdata = MEM_cnew<BakeImBufuserData>("MultiresBake userdata");
userdata->mask_buffer = MEM_cnew_array<char>(ibuf->y * ibuf->x, "MultiresBake imbuf mask");
ibuf->userdata = userdata;
switch (bkr->mode) {
@ -1483,7 +1484,7 @@ static void bake_images(MultiresBakeRender *bkr, MultiresBakeResult *result)
}
}
BKE_image_release_ibuf(ima, ibuf, NULL);
BKE_image_release_ibuf(ima, ibuf, nullptr);
}
ima->id.tag |= LIB_TAG_DOIT;
@ -1495,7 +1496,7 @@ static void finish_images(MultiresBakeRender *bkr, MultiresBakeResult *result)
LinkData *link;
bool use_displacement_buffer = bkr->mode == RE_BAKE_DISPLACEMENT;
for (link = bkr->image.first; link; link = link->next) {
for (link = static_cast<LinkData *>(bkr->image.first); link; link = link->next) {
Image *ima = (Image *)link->data;
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
@ -1503,7 +1504,7 @@ static void finish_images(MultiresBakeRender *bkr, MultiresBakeResult *result)
BKE_imageuser_default(&iuser);
iuser.tile = tile->tile_number;
ImBuf *ibuf = BKE_image_acquire_ibuf(ima, &iuser, NULL);
ImBuf *ibuf = BKE_image_acquire_ibuf(ima, &iuser, nullptr);
BakeImBufuserData *userdata = (BakeImBufuserData *)ibuf->userdata;
if (ibuf->x <= 0 || ibuf->y <= 0) {
@ -1547,10 +1548,10 @@ static void finish_images(MultiresBakeRender *bkr, MultiresBakeResult *result)
MEM_freeN(userdata->mask_buffer);
MEM_freeN(userdata);
ibuf->userdata = NULL;
ibuf->userdata = nullptr;
}
BKE_image_release_ibuf(ima, ibuf, NULL);
BKE_image_release_ibuf(ima, ibuf, nullptr);
DEG_id_tag_update(&ima->id, 0);
}
}