Merge branch 'master' into geometry-nodes-simulation

This commit is contained in:
Jacques Lucke 2022-12-13 11:15:20 +01:00
commit 02a264f5ab
134 changed files with 2905 additions and 1615 deletions

View File

@ -12,7 +12,6 @@ if(UNIX)
automake
bison
${_libtoolize_name}
meson
ninja
pkg-config
tclsh

View File

@ -26,5 +26,6 @@ endif()
add_dependencies(
external_epoxy
# Needed for `MESON`.
external_python_site_packages
)

View File

@ -18,6 +18,7 @@ ExternalProject_Add(external_fribidi
add_dependencies(
external_fribidi
external_python
# Needed for `MESON`.
external_python_site_packages
)

View File

@ -5,7 +5,7 @@ if(WIN32)
set(HARFBUZZ_PKG_ENV FREETYPE_DIR=${LIBDIR}/freetype)
else()
set(HARFBUZZ_CONFIGURE_ENV ${CONFIGURE_ENV})
set(HARFBUZZ_PKG_ENV PKG_CONFIG_PATH=${LIBDIR}/freetype/lib/pkgconfig:${LIBDIR}/brotli/lib/pkgconfig:$PKG_CONFIG_PATH)
set(HARFBUZZ_PKG_ENV PKG_CONFIG_PATH=${LIBDIR}/freetype/lib/pkgconfig:${LIBDIR}/brotli/lib/pkgconfig:${LIBDIR}/lib/python3.10/pkgconfig:$PKG_CONFIG_PATH)
endif()
set(HARFBUZZ_EXTRA_OPTIONS
@ -13,6 +13,9 @@ set(HARFBUZZ_EXTRA_OPTIONS
-Dfreetype=enabled
-Dglib=disabled
-Dgobject=disabled
# Only used for command line utilities,
# disable as this would add an addition & unnecessary build-dependency.
-Dcairo=disabled
)
ExternalProject_Add(external_harfbuzz
@ -30,6 +33,7 @@ ExternalProject_Add(external_harfbuzz
add_dependencies(
external_harfbuzz
external_python
# Needed for `MESON`.
external_python_site_packages
)

View File

@ -218,7 +218,7 @@ harvest_rpath_lib(openvdb/lib openvdb/lib "*${SHAREDLIBEXT}*")
harvest_rpath_python(openvdb/lib/python${PYTHON_SHORT_VERSION} python/lib/python${PYTHON_SHORT_VERSION} "*pyopenvdb*")
harvest(xr_openxr_sdk/include/openxr xr_openxr_sdk/include/openxr "*.h")
harvest(xr_openxr_sdk/lib xr_openxr_sdk/lib "*.a")
harvest(osl/bin osl/bin "oslc")
harvest_rpath_bin(osl/bin osl/bin "oslc")
harvest(osl/include osl/include "*.h")
harvest(osl/lib osl/lib "*.a")
harvest(osl/share/OSL/shaders osl/share/OSL/shaders "*.h")
@ -268,6 +268,10 @@ harvest(haru/include haru/include "*.h")
harvest(haru/lib haru/lib "*.a")
harvest(zstd/include zstd/include "*.h")
harvest(zstd/lib zstd/lib "*.a")
harvest(shaderc shaderc "*")
harvest(vulkan_headers vulkan "*")
harvest_rpath_lib(vulkan_loader/lib vulkan/lib "*${SHAREDLIBEXT}*")
harvest(vulkan_loader/loader vulkan/loader "*")
if(UNIX AND NOT APPLE)
harvest(libglu/lib mesa/lib "*${SHAREDLIBEXT}*")

View File

@ -33,6 +33,8 @@ set(MESA_EXTRA_FLAGS
# At some point we will likely want to support Wayland.
# Disable for now since it's not officially supported.
-Dplatforms=x11
# Needed to find the local expat.
--pkg-config-path=${LIBDIR}/expat/lib/pkgconfig
--native-file ${BUILD_DIR}/mesa/tmp/native-file.ini
)
@ -53,4 +55,8 @@ add_dependencies(
external_mesa
ll
external_zlib
# Run-time dependency.
external_expat
# Needed for `MESON`.
external_python_site_packages
)

View File

@ -117,7 +117,7 @@ else()
set(LIBEXT ".a")
set(LIBPREFIX "lib")
set(MESON ${LIBDIR}/python/bin/meson)
if(APPLE)
if(APPLE)
set(SHAREDLIBEXT ".dylib")
# Use same Xcode detection as Blender itself.

View File

@ -5,7 +5,11 @@ if(WIN32 AND BUILD_MODE STREQUAL Debug)
# zstandard is determined to build and link release mode libs in a debug
# configuration, the only way to make it happy is to bend to its will
# and give it a library to link with.
set(PIP_CONFIGURE_COMMAND ${CMAKE_COMMAND} -E copy ${LIBDIR}/python/libs/python${PYTHON_SHORT_VERSION_NO_DOTS}_d.lib ${LIBDIR}/python/libs/python${PYTHON_SHORT_VERSION_NO_DOTS}.lib)
set(
PIP_CONFIGURE_COMMAND ${CMAKE_COMMAND} -E copy
${LIBDIR}/python/libs/python${PYTHON_SHORT_VERSION_NO_DOTS}_d.lib
${LIBDIR}/python/libs/python${PYTHON_SHORT_VERSION_NO_DOTS}.lib
)
else()
set(PIP_CONFIGURE_COMMAND echo ".")
endif()
@ -15,9 +19,23 @@ ExternalProject_Add(external_python_site_packages
CONFIGURE_COMMAND ${PIP_CONFIGURE_COMMAND}
BUILD_COMMAND ""
PREFIX ${BUILD_DIR}/site_packages
# setuptools is downgraded to 63.2.0 (same as python 3.10.8) since numpy 1.23.x seemingly has
# setuptools is downgraded to 63.2.0 (same as python 3.10.8) since numpy 1.23.x seemingly has
# issues building on windows with the newer versions that ships with python 3.10.9+
INSTALL_COMMAND ${PYTHON_BINARY} -m pip install --no-cache-dir ${SITE_PACKAGES_EXTRA} setuptools==63.2.0 cython==${CYTHON_VERSION} idna==${IDNA_VERSION} charset-normalizer==${CHARSET_NORMALIZER_VERSION} urllib3==${URLLIB3_VERSION} certifi==${CERTIFI_VERSION} requests==${REQUESTS_VERSION} zstandard==${ZSTANDARD_VERSION} autopep8==${AUTOPEP8_VERSION} pycodestyle==${PYCODESTYLE_VERSION} toml==${TOML_VERSION} meson==${MESON_VERSION} --no-binary :all:
INSTALL_COMMAND ${PYTHON_BINARY} -m pip install --no-cache-dir ${SITE_PACKAGES_EXTRA}
setuptools==63.2.0
cython==${CYTHON_VERSION}
idna==${IDNA_VERSION}
charset-normalizer==${CHARSET_NORMALIZER_VERSION}
urllib3==${URLLIB3_VERSION}
certifi==${CERTIFI_VERSION}
requests==${REQUESTS_VERSION}
zstandard==${ZSTANDARD_VERSION}
autopep8==${AUTOPEP8_VERSION}
pycodestyle==${PYCODESTYLE_VERSION}
toml==${TOML_VERSION}
meson==${MESON_VERSION}
--no-binary :all:
)
if(USE_PIP_NUMPY)

View File

@ -1,9 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-or-later
set(SNDFILE_EXTRA_ARGS)
set(SNDFILE_ENV PKG_CONFIG_PATH=${mingw_LIBDIR}/ogg/lib/pkgconfig:${mingw_LIBDIR}/vorbis/lib/pkgconfig:${mingw_LIBDIR}/flac/lib/pkgconfig:${mingw_LIBDIR}/opus/lib/pkgconfig:${mingw_LIBDIR})
set(SNDFILE_ENV)
if(WIN32)
set(SNDFILE_ENV PKG_CONFIG_PATH=${mingw_LIBDIR}/ogg/lib/pkgconfig:${mingw_LIBDIR}/vorbis/lib/pkgconfig:${mingw_LIBDIR}/flac/lib/pkgconfig:${mingw_LIBDIR}/opus/lib/pkgconfig:${mingw_LIBDIR})
set(SNDFILE_ENV set ${SNDFILE_ENV} &&)
# Shared for windows because static libs will drag in a libgcc dependency.
set(SNDFILE_OPTIONS --disable-static --enable-shared )
@ -11,6 +12,16 @@ else()
set(SNDFILE_OPTIONS --enable-static --disable-shared )
endif()
if(UNIX AND NOT APPLE)
# NOTE(@campbellbarton): For some reason OPUS is alone in referencing the sub-directory,
# manipulate the package-config file to prevent this from happening.
# There is no problem with applying this change multiple times.
#
# Replace: Cflags: -I${includedir}/opus
# With: Cflags: -I${includedir}
set(SNDFILE_ENV sed -i s/{includedir}\\/opus/{includedir}/g ${LIBDIR}/opus/lib/pkgconfig/opus.pc && ${SNDFILE_ENV})
endif()
ExternalProject_Add(external_sndfile
URL file://${PACKAGE_DIR}/${SNDFILE_FILE}
DOWNLOAD_DIR ${DOWNLOAD_DIR}

View File

@ -203,7 +203,7 @@ set(OSL_FILE OpenShadingLanguage-${OSL_VERSION}.tar.gz)
# NOTE: When updating the python version, it's required to check the versions of
# it wants to use in PCbuild/get_externals.bat for the following dependencies:
# BZIP2, FFI, SQLITE and change the versions in this file as well. For compliance
# BZIP2, FFI, SQLITE and change the versions in this file as well. For compliance
# reasons there can be no exceptions to this.
set(PYTHON_VERSION 3.10.9)
@ -229,20 +229,34 @@ set(OPENVDB_HASH 64301c737e16b26c8f3085a31e6397e9)
set(OPENVDB_HASH_TYPE MD5)
set(OPENVDB_FILE openvdb-${OPENVDB_VERSION}.tar.gz)
# ------------------------------------------------------------------------------
# Python Modules
# Needed by: TODO.
set(IDNA_VERSION 3.3)
# Needed by: TODO.
set(CHARSET_NORMALIZER_VERSION 2.0.10)
# Needed by: TODO.
set(URLLIB3_VERSION 1.26.8)
set(URLLIB3_CPE "cpe:2.3:a:urllib3:urllib3:${URLLIB3_VERSION}:*:*:*:*:*:*:*")
# Needed by: Python's `requests` module (so add-ons can authenticate against trusted certificates).
set(CERTIFI_VERSION 2021.10.8)
# Needed by: Some of Blender's add-ons (to support convenient interaction with online services).
set(REQUESTS_VERSION 2.27.1)
# Needed by: Python's `numpy` module (used by some add-ons).
set(CYTHON_VERSION 0.29.30)
# The version of the zstd library used to build the Python package should match ZSTD_VERSION
# Needed by: Python scripts that read `.blend` files, as files may use Z-standard compression.
# The version of the ZSTD library used to build the Python package should match ZSTD_VERSION
# defined below. At this time of writing, 0.17.0 was already released,
# but built against zstd 1.5.1, while we use 1.5.0.
# but built against ZSTD 1.5.1, while we use 1.5.0.
set(ZSTANDARD_VERSION 0.16.0)
# Auto-format Python source (developer tool, not used by Blender at run-time).
set(AUTOPEP8_VERSION 1.6.0)
# Needed by: `autopep8` (so the version doesn't change on rebuild).
set(PYCODESTYLE_VERSION 2.8.0)
# Needed by: `autopep8` (so the version doesn't change on rebuild).
set(TOML_VERSION 0.10.2)
# Build system for other packages (not used by Blender at run-time).
set(MESON_VERSION 0.63.0)
set(NUMPY_VERSION 1.23.5)

View File

@ -30,6 +30,17 @@ set(VULKAN_LOADER_EXTRA_ARGS
-DVULKAN_HEADERS_INSTALL_DIR=${LIBDIR}/vulkan_headers
)
if(UNIX AND NOT APPLE)
# These are used in `cmake/FindWayland.cmake` from `external_vulkan_loader`.
# NOTE: When upgrading to CMAKE 3.22 we it would be cleaner to use: `PKG_CONFIG_ARGN`,
# so `pkgconfig` would find wayland.
set(VULKAN_LOADER_EXTRA_ARGS
${VULKAN_LOADER_EXTRA_ARGS}
-DPKG_WAYLAND_INCLUDE_DIRS=${LIBDIR}/wayland/include
-DPKG_WAYLAND_LIBRARY_DIRS=${LIBDIR}/wayland/lib64
)
endif()
ExternalProject_Add(external_vulkan_loader
URL file://${PACKAGE_DIR}/${VULKAN_LOADER_FILE}
URL_HASH ${VULKAN_LOADER_HASH_TYPE}=${VULKAN_LOADER_HASH}
@ -43,7 +54,12 @@ add_dependencies(
external_vulkan_headers
)
if(WIN32)
if(UNIX AND NOT APPLE)
add_dependencies(
external_vulkan_loader
external_wayland
)
elseif(WIN32)
if(BUILD_MODE STREQUAL Release)
ExternalProject_Add_Step(external_vulkan_loader after_install
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBDIR}/vulkan_loader/ ${HARVEST_TARGET}/vulkan

View File

@ -5,7 +5,6 @@ ExternalProject_Add(external_wayland
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH ${WAYLAND_HASH_TYPE}=${WAYLAND_HASH}
PREFIX ${BUILD_DIR}/wayland
PATCH_COMMAND ${PATCH_CMD} -d ${BUILD_DIR}/wayland/src/external_wayland < ${PATCH_DIR}/wayland.diff
# Use `-E` so the `PKG_CONFIG_PATH` can be defined to link against our own LIBEXPAT & LIBXML2.
#
# NOTE: passing link args "ffi/lib" should not be needed, but
@ -24,4 +23,7 @@ add_dependencies(
external_expat
external_xml2
external_ffi
# Needed for `MESON`.
external_python_site_packages
)

View File

@ -15,4 +15,6 @@ ExternalProject_Add(external_wayland_protocols
add_dependencies(
external_wayland_protocols
external_wayland
# Needed for `MESON`.
external_python_site_packages
)

View File

@ -1,11 +0,0 @@
--- meson.build.orig 2022-06-30 22:59:11.000000000 +0100
+++ meson.build 2022-09-27 13:21:26.428517668 +0100
@@ -2,7 +2,7 @@
'wayland', 'c',
version: '1.21.0',
license: 'MIT',
- meson_version: '>= 0.56.0',
+ meson_version: '>= 0.55.1',
default_options: [
'warning_level=2',
'buildtype=debugoptimized',

View File

@ -105,9 +105,10 @@ if(WITH_VULKAN_BACKEND)
set(VULKAN_ROOT_DIR ${LIBDIR}/vulkan/macOS)
set(VULKAN_INCLUDE_DIR ${VULKAN_ROOT_DIR}/include)
set(VULKAN_LIBRARY ${VULKAN_ROOT_DIR}/lib/libvulkan.1.dylib)
set(SHADERC_LIBRARY ${VULKAN_ROOT_DIR}/lib/libshaderc_combined.a)
set(VULKAN_INCLUDE_DIRS ${VULKAN_INCLUDE_DIR} ${MOLTENVK_INCLUDE_DIRS})
set(VULKAN_LIBRARIES ${VULKAN_LIBRARY} ${MOLTENVK_LIBRARIES})
set(VULKAN_LIBRARIES ${VULKAN_LIBRARY} ${SHADERC_LIBRARY} ${MOLTENVK_LIBRARIES})
else()
message(WARNING "Vulkan SDK was not found, disabling WITH_VULKAN_BACKEND")
set(WITH_VULKAN_BACKEND OFF)

View File

@ -163,10 +163,10 @@ void device_hip_info(vector<DeviceInfo> &devices)
/* If device has a kernel timeout and no compute preemption, we assume
* it is connected to a display and will freeze the display while doing
* computations. */
int timeout_attr = 0, preempt_attr = 0;
int timeout_attr = 0;
hipDeviceGetAttribute(&timeout_attr, hipDeviceAttributeKernelExecTimeout, num);
if (timeout_attr && !preempt_attr) {
if (timeout_attr) {
VLOG_INFO << "Device is recognized as display.";
info.description += " (Display)";
info.display_device = true;

View File

@ -26,9 +26,12 @@ class HdCyclesVolumeLoader : public VDBImageLoader {
HdCyclesVolumeLoader(const std::string &filePath, const std::string &gridName)
: VDBImageLoader(gridName)
{
/* Disably delay loading and file copying, this has poor performance
* on network drivers. */
const bool delay_load = false;
openvdb::io::File file(filePath);
file.setCopyMaxBytes(0);
if (file.open()) {
if (file.open(delay_load)) {
grid = file.readGrid(gridName);
}
}

View File

@ -255,8 +255,9 @@ ccl_device_inline bool area_light_sample(const ccl_global KernelLight *klight,
float3 inplane;
if (in_volume_segment) {
/* FIXME: handle rectangular light. */
inplane = ellipse_sample(axis_u * len_u * 0.5f, axis_v * len_v * 0.5f, randu, randv);
inplane = sample_rectangle ?
rectangle_sample(axis_u * len_u * 0.5f, axis_v * len_v * 0.5f, randu, randv) :
ellipse_sample(axis_u * len_u * 0.5f, axis_v * len_v * 0.5f, randu, randv);
ls->P += inplane;
ls->pdf = invarea;
}

View File

@ -30,8 +30,13 @@ typedef struct LightSample {
ccl_device_inline float3 ellipse_sample(float3 ru, float3 rv, float randu, float randv)
{
to_unit_disk(&randu, &randv);
return ru * randu + rv * randv;
const float2 rand = concentric_sample_disk(randu, randv);
return ru * rand.x + rv * rand.y;
}
ccl_device_inline float3 rectangle_sample(float3 ru, float3 rv, float randu, float randv)
{
return ru * (2.0f * randu - 1.0f) + rv * (2.0f * randv - 1.0f);
}
ccl_device float3 disk_light_sample(float3 v, float randu, float randv)

View File

@ -59,41 +59,9 @@ ccl_device_noinline bool light_distribution_sample(KernelGlobals kg,
{
/* Sample light index from distribution. */
const int index = light_distribution_sample(kg, &randu);
ccl_global const KernelLightDistribution *kdistribution = &kernel_data_fetch(light_distribution,
index);
const int prim = kdistribution->prim;
if (prim >= 0) {
/* Mesh light. */
const int object = kdistribution->mesh_light.object_id;
/* Exclude synthetic meshes from shadow catcher pass. */
if ((path_flag & PATH_RAY_SHADOW_CATCHER_PASS) &&
!(kernel_data_fetch(object_flag, object) & SD_OBJECT_SHADOW_CATCHER)) {
return false;
}
const int shader_flag = kdistribution->mesh_light.shader_flag;
if (!triangle_light_sample<in_volume_segment>(kg, prim, object, randu, randv, time, ls, P)) {
return false;
}
ls->shader |= shader_flag;
}
else {
const int lamp = -prim - 1;
if (UNLIKELY(light_select_reached_max_bounces(kg, lamp, bounce))) {
return false;
}
if (!light_sample<in_volume_segment>(kg, lamp, randu, randv, P, path_flag, ls)) {
return false;
}
ls->pdf_selection = kernel_data.integrator.distribution_pdf_lights;
}
ls->pdf *= ls->pdf_selection;
return (ls->pdf > 0.0f);
const float pdf_selection = kernel_data.integrator.distribution_pdf_lights;
return light_sample<in_volume_segment>(
kg, randu, randv, time, P, bounce, path_flag, index, pdf_selection, ls);
}
ccl_device_inline float light_distribution_pdf_lamp(KernelGlobals kg)

View File

@ -14,6 +14,13 @@
CCL_NAMESPACE_BEGIN
/* Light info. */
ccl_device_inline bool light_select_reached_max_bounces(KernelGlobals kg, int index, int bounce)
{
return (bounce > kernel_data_fetch(lights, index).max_bounces);
}
/* Sample point on an individual light. */
template<bool in_volume_segment>
@ -90,6 +97,68 @@ ccl_device_inline bool light_sample(KernelGlobals kg,
return in_volume_segment || (ls->pdf > 0.0f);
}
/* Sample a point on the chosen emitter. */
template<bool in_volume_segment>
ccl_device_noinline bool light_sample(KernelGlobals kg,
const float randu,
const float randv,
const float time,
const float3 P,
const int bounce,
const uint32_t path_flag,
const int emitter_index,
const float pdf_selection,
ccl_private LightSample *ls)
{
int prim;
MeshLight mesh_light;
if (kernel_data.integrator.use_light_tree) {
ccl_global const KernelLightTreeEmitter *kemitter = &kernel_data_fetch(light_tree_emitters,
emitter_index);
prim = kemitter->prim;
mesh_light = kemitter->mesh_light;
}
else {
ccl_global const KernelLightDistribution *kdistribution = &kernel_data_fetch(
light_distribution, emitter_index);
prim = kdistribution->prim;
mesh_light = kdistribution->mesh_light;
}
/* A different value would be assigned in `triangle_light_sample()` if `!use_light_tree`. */
ls->pdf_selection = pdf_selection;
if (prim >= 0) {
/* Mesh light. */
const int object = mesh_light.object_id;
/* Exclude synthetic meshes from shadow catcher pass. */
if ((path_flag & PATH_RAY_SHADOW_CATCHER_PASS) &&
!(kernel_data_fetch(object_flag, object) & SD_OBJECT_SHADOW_CATCHER)) {
return false;
}
const int shader_flag = mesh_light.shader_flag;
if (!triangle_light_sample<in_volume_segment>(kg, prim, object, randu, randv, time, ls, P)) {
return false;
}
ls->shader |= shader_flag;
}
else {
if (UNLIKELY(light_select_reached_max_bounces(kg, ~prim, bounce))) {
return false;
}
if (!light_sample<in_volume_segment>(kg, ~prim, randu, randv, P, path_flag, ls)) {
return false;
}
}
ls->pdf *= ls->pdf_selection;
return in_volume_segment || (ls->pdf > 0.0f);
}
/* Intersect ray with individual light. */
ccl_device bool lights_intersect(KernelGlobals kg,
@ -230,11 +299,4 @@ ccl_device_forceinline void light_update_position(KernelGlobals kg,
}
}
/* Light info. */
ccl_device_inline bool light_select_reached_max_bounces(KernelGlobals kg, int index, int bounce)
{
return (bounce > kernel_data_fetch(lights, index).max_bounces);
}
CCL_NAMESPACE_END

View File

@ -22,16 +22,15 @@
CCL_NAMESPACE_BEGIN
/* TODO: this seems like a relative expensive computation, and we can make it a lot cheaper
* by using a bounding sphere instead of a bounding box. This will be more inaccurate, but it
* might be fine when used along with the adaptive splitting. */
/* TODO: this seems like a relative expensive computation. We can make it a lot cheaper by using a
* bounding sphere instead of a bounding box, but this will reduce the accuracy sometimes. */
ccl_device float light_tree_cos_bounding_box_angle(const BoundingBox bbox,
const float3 P,
const float3 point_to_centroid)
{
if (P.x > bbox.min.x && P.y > bbox.min.y && P.z > bbox.min.z && P.x < bbox.max.x &&
P.y < bbox.max.y && P.z < bbox.max.z) {
/* If P is inside the bbox, `theta_u` covers the whole sphere */
/* If P is inside the bbox, `theta_u` covers the whole sphere. */
return -1.0f;
}
float cos_theta_u = 1.0f;
@ -53,7 +52,7 @@ ccl_device_forceinline float sin_from_cos(const float c)
return safe_sqrtf(1.0f - sqr(c));
}
/* Compute vector v as in Fig .8. P_v is the corresponding point along the ray ccl_device float3 */
/* Compute vector v as in Fig .8. P_v is the corresponding point along the ray. */
ccl_device float3 compute_v(
const float3 centroid, const float3 P, const float3 D, const float3 bcone_axis, const float t)
{
@ -95,12 +94,12 @@ ccl_device void light_tree_importance(const float3 N_or_D,
const float sin_theta_u = sin_from_cos(cos_theta_u);
/* cos(theta_i') in the paper, omitted for volume */
/* cos(theta_i') in the paper, omitted for volume. */
float cos_min_incidence_angle = 1.0f;
float cos_max_incidence_angle = 1.0f;
/* when sampling the light tree for the second time in `shade_volume.h` and when query the pdf in
* `sample.h` */
/* When sampling the light tree for the second time in `shade_volume.h` and when query the pdf in
* `sample.h`. */
const bool in_volume = is_zero(N_or_D);
if (!in_volume_segment && !in_volume) {
const float3 N = N_or_D;
@ -116,7 +115,7 @@ ccl_device void light_tree_importance(const float3 N_or_D,
/* If the node is guaranteed to be behind the surface we're sampling, and the surface is
* opaque, then we can give the node an importance of 0 as it contributes nothing to the
* surface. This is more accurate than the bbox test if we are calculating the importance of
* an emitter with radius */
* an emitter with radius. */
if (!has_transmission && cos_min_incidence_angle < 0) {
return;
}
@ -133,8 +132,8 @@ ccl_device void light_tree_importance(const float3 N_or_D,
float cos_theta_o, sin_theta_o;
fast_sincosf(bcone.theta_o, &sin_theta_o, &cos_theta_o);
/* minimum angle an emitters axis would form with the direction to the shading point,
* cos(theta') in the paper */
/* Minimum angle an emitters axis would form with the direction to the shading point,
* cos(theta') in the paper. */
float cos_min_outgoing_angle;
if ((cos_theta >= cos_theta_u) || (cos_theta_minus_theta_u >= cos_theta_o)) {
/* theta - theta_o - theta_u <= 0 */
@ -151,7 +150,7 @@ ccl_device void light_tree_importance(const float3 N_or_D,
sin_theta_minus_theta_u * sin_theta_o;
}
else {
/* cluster invisible */
/* Cluster is invisible. */
return;
}
@ -190,7 +189,7 @@ ccl_device bool compute_emitter_centroid_and_dir(KernelGlobals kg,
ccl_private float3 &centroid,
ccl_private packed_float3 &dir)
{
const int prim_id = kemitter->prim_id;
const int prim_id = kemitter->prim;
if (prim_id < 0) {
const ccl_global KernelLight *klight = &kernel_data_fetch(lights, ~prim_id);
centroid = klight->co;
@ -200,14 +199,14 @@ ccl_device bool compute_emitter_centroid_and_dir(KernelGlobals kg,
dir = klight->spot.dir;
break;
case LIGHT_POINT:
/* Disk-oriented normal */
/* Disk-oriented normal. */
dir = safe_normalize(P - centroid);
break;
case LIGHT_AREA:
dir = klight->area.dir;
break;
case LIGHT_BACKGROUND:
/* Aarbitrary centroid and direction */
/* Arbitrary centroid and direction. */
centroid = make_float3(0.0f, 0.0f, 1.0f);
dir = make_float3(0.0f, 0.0f, -1.0f);
return !in_volume_segment;
@ -224,14 +223,14 @@ ccl_device bool compute_emitter_centroid_and_dir(KernelGlobals kg,
triangle_world_space_vertices(kg, object, prim_id, -1.0f, vertices);
centroid = (vertices[0] + vertices[1] + vertices[2]) / 3.0f;
if (kemitter->mesh_light.emission_sampling == EMISSION_SAMPLING_FRONT) {
if (kemitter->emission_sampling == EMISSION_SAMPLING_FRONT) {
dir = safe_normalize(cross(vertices[1] - vertices[0], vertices[2] - vertices[0]));
}
else if (kemitter->mesh_light.emission_sampling == EMISSION_SAMPLING_BACK) {
else if (kemitter->emission_sampling == EMISSION_SAMPLING_BACK) {
dir = -safe_normalize(cross(vertices[1] - vertices[0], vertices[2] - vertices[0]));
}
else {
/* Double sided: any vector in the plane. */
/* Double-sided: any vector in the plane. */
dir = safe_normalize(vertices[0] - vertices[1]);
}
}
@ -265,13 +264,13 @@ ccl_device void light_tree_emitter_importance(KernelGlobals kg,
return;
}
const int prim_id = kemitter->prim_id;
const int prim_id = kemitter->prim;
if (in_volume_segment) {
const float3 D = N_or_D;
/* Closest point */
/* Closest point. */
P_c = P + dot(centroid - P, D) * D;
/* minimal distance of the ray to the cluster */
/* Minimal distance of the ray to the cluster. */
distance.x = len(centroid - P_c);
distance.y = distance.x;
point_to_centroid = -compute_v(centroid, P, D, bcone.axis, t);
@ -284,7 +283,7 @@ ccl_device void light_tree_emitter_importance(KernelGlobals kg,
if (prim_id < 0) {
const ccl_global KernelLight *klight = &kernel_data_fetch(lights, ~prim_id);
switch (klight->type) {
/* Function templates only modifies cos_theta_u when in_volume_segment = true */
/* Function templates only modifies cos_theta_u when in_volume_segment = true. */
case LIGHT_SPOT:
is_visible = spot_light_tree_parameters<in_volume_segment>(
klight, centroid, P_c, cos_theta_u, distance, point_to_centroid);
@ -310,7 +309,7 @@ ccl_device void light_tree_emitter_importance(KernelGlobals kg,
return;
}
}
else { /* mesh light */
else { /* Mesh light. */
is_visible = triangle_light_tree_parameters<in_volume_segment>(
kg, kemitter, centroid, P_c, N_or_D, bcone, cos_theta_u, distance, point_to_centroid);
}
@ -346,7 +345,7 @@ ccl_device void light_tree_node_importance(KernelGlobals kg,
max_importance = 0.0f;
min_importance = 0.0f;
if (knode->num_prims == 1) {
/* At a leaf node with only one emitter */
/* At a leaf node with only one emitter. */
light_tree_emitter_importance<in_volume_segment>(
kg, P, N_or_D, t, has_transmission, -knode->child_index, max_importance, min_importance);
}
@ -358,7 +357,7 @@ ccl_device void light_tree_node_importance(KernelGlobals kg,
float cos_theta_u;
float distance;
if (knode->bit_trail == 1) {
/* distant light node */
/* Distant light node. */
if (in_volume_segment) {
return;
}
@ -372,7 +371,7 @@ ccl_device void light_tree_node_importance(KernelGlobals kg,
if (in_volume_segment) {
const float3 D = N_or_D;
const float3 closest_point = P + dot(centroid - P, D) * D;
/* minimal distance of the ray to the cluster */
/* Minimal distance of the ray to the cluster. */
distance = len(centroid - closest_point);
point_to_centroid = -compute_v(centroid, P, D, bcone.axis, t);
cos_theta_u = light_tree_cos_bounding_box_angle(bbox, closest_point, point_to_centroid);
@ -393,7 +392,7 @@ ccl_device void light_tree_node_importance(KernelGlobals kg,
point_to_centroid = normalize_len(centroid - P, &distance);
cos_theta_u = light_tree_cos_bounding_box_angle(bbox, P, point_to_centroid);
}
/* clamp distance to half the radius of the cluster when splitting is disabled */
/* Clamp distance to half the radius of the cluster when splitting is disabled. */
distance = fmaxf(0.5f * len(centroid - bbox.max), distance);
}
/* TODO: currently max_distance = min_distance, max_importance = min_importance for the
@ -436,8 +435,8 @@ ccl_device void sample_resevoir(const int current_index,
return;
}
/* pick an emitter from a leaf node using resevoir sampling, keep two reservoirs for upper and
* lower bounds */
/* Pick an emitter from a leaf node using resevoir sampling, keep two reservoirs for upper and
* lower bounds. */
template<bool in_volume_segment>
ccl_device int light_tree_cluster_select_emitter(KernelGlobals kg,
ccl_private float &rand,
@ -452,11 +451,11 @@ ccl_device int light_tree_cluster_select_emitter(KernelGlobals kg,
float total_importance[2] = {0.0f, 0.0f};
int selected_index = -1;
/* Mark emitters with zero importance. Used for resevoir when total minimum importance = 0 */
/* Mark emitters with zero importance. Used for resevoir when total minimum importance = 0. */
kernel_assert(knode->num_prims <= sizeof(uint) * 8);
uint has_importance = 0;
const bool sample_max = (rand > 0.5f); /* sampling using the maximum importance */
const bool sample_max = (rand > 0.5f); /* Sampling using the maximum importance. */
rand = rand * 2.0f - float(sample_max);
for (int i = 0; i < knode->num_prims; i++) {
@ -485,7 +484,7 @@ ccl_device int light_tree_cluster_select_emitter(KernelGlobals kg,
}
if (total_importance[1] == 0.0f) {
/* uniformly sample emitters with positive maximum importance */
/* Uniformly sample emitters with positive maximum importance. */
if (sample_max) {
selected_importance[1] = 1.0f;
total_importance[1] = float(popcount(has_importance));
@ -540,7 +539,7 @@ ccl_device bool get_left_probability(KernelGlobals kg,
}
const float total_min_importance = min_left_importance + min_right_importance;
/* average two probabilities of picking the left child node using lower and upper bounds */
/* Average two probabilities of picking the left child node using lower and upper bounds. */
const float probability_max = max_left_importance / total_max_importance;
const float probability_min = total_min_importance > 0 ?
min_left_importance / total_min_importance :
@ -569,31 +568,31 @@ ccl_device_noinline bool light_tree_sample(KernelGlobals kg,
const bool has_transmission = (shader_flags & SD_BSDF_HAS_TRANSMISSION);
float pdf_leaf = 1.0f;
float pdf_emitter_from_leaf = 1.0f;
int selected_light = -1;
float pdf_selection = 1.0f;
int selected_emitter = -1;
int node_index = 0; /* root node */
int node_index = 0; /* Root node. */
/* Traverse the light tree until a leaf node is reached. */
while (true) {
const ccl_global KernelLightTreeNode *knode = &kernel_data_fetch(light_tree_nodes, node_index);
if (knode->child_index <= 0) {
/* At a leaf node, we pick an emitter */
selected_light = light_tree_cluster_select_emitter<in_volume_segment>(
kg, randv, P, N_or_D, t, has_transmission, knode, &pdf_emitter_from_leaf);
/* At a leaf node, we pick an emitter. */
selected_emitter = light_tree_cluster_select_emitter<in_volume_segment>(
kg, randv, P, N_or_D, t, has_transmission, knode, &pdf_selection);
break;
}
/* At an interior node, the left child is directly after the parent,
* while the right child is stored as the child index. */
/* At an interior node, the left child is directly after the parent, while the right child is
* stored as the child index. */
const int left_index = node_index + 1;
const int right_index = knode->child_index;
float left_prob;
if (!get_left_probability<in_volume_segment>(
kg, P, N_or_D, t, has_transmission, left_index, right_index, left_prob)) {
return false; /* both child nodes have zero importance */
return false; /* Both child nodes have zero importance. */
}
float discard;
@ -603,46 +602,14 @@ ccl_device_noinline bool light_tree_sample(KernelGlobals kg,
pdf_leaf *= (node_index == left_index) ? left_prob : (1.0f - left_prob);
}
if (selected_light < 0) {
if (selected_emitter < 0) {
return false;
}
/* Sample a point on the chosen emitter */
ccl_global const KernelLightTreeEmitter *kemitter = &kernel_data_fetch(light_tree_emitters,
selected_light);
pdf_selection *= pdf_leaf;
/* TODO: this is the same code as light_distribution_sample, except the index is determined
* differently. Would it be better to refactor this into a separate function? */
const int prim = kemitter->prim_id;
if (prim >= 0) {
/* Mesh light. */
const int object = kemitter->mesh_light.object_id;
/* Exclude synthetic meshes from shadow catcher pass. */
if ((path_flag & PATH_RAY_SHADOW_CATCHER_PASS) &&
!(kernel_data_fetch(object_flag, object) & SD_OBJECT_SHADOW_CATCHER)) {
return false;
}
const int mesh_shader_flag = kemitter->mesh_light.shader_flag;
if (!triangle_light_sample<in_volume_segment>(kg, prim, object, randu, randv, time, ls, P)) {
return false;
}
ls->shader |= mesh_shader_flag;
}
else {
if (UNLIKELY(light_select_reached_max_bounces(kg, ~prim, bounce))) {
return false;
}
if (!light_sample<in_volume_segment>(kg, ~prim, randu, randv, P, path_flag, ls)) {
return false;
}
}
ls->pdf_selection = pdf_leaf * pdf_emitter_from_leaf;
ls->pdf *= ls->pdf_selection;
return (ls->pdf > 0);
return light_sample<in_volume_segment>(
kg, randu, randv, time, P, bounce, path_flag, selected_emitter, pdf_selection, ls);
}
/* We need to be able to find the probability of selecting a given light for MIS. */
@ -650,7 +617,7 @@ ccl_device float light_tree_pdf(
KernelGlobals kg, const float3 P, const float3 N, const int path_flag, const int prim)
{
const bool has_transmission = (path_flag & PATH_RAY_MIS_HAD_TRANSMISSION);
/* Target emitter info */
/* Target emitter info. */
const int target_emitter = (prim >= 0) ? kernel_data_fetch(triangle_to_tree, prim) :
kernel_data_fetch(light_to_tree, ~prim);
ccl_global const KernelLightTreeEmitter *kemitter = &kernel_data_fetch(light_tree_emitters,
@ -659,11 +626,11 @@ ccl_device float light_tree_pdf(
ccl_global const KernelLightTreeNode *kleaf = &kernel_data_fetch(light_tree_nodes, target_leaf);
uint bit_trail = kleaf->bit_trail;
int node_index = 0; /* root node */
int node_index = 0; /* Root node. */
float pdf = 1.0f;
/* Traverse the light tree until we reach the target leaf node */
/* Traverse the light tree until we reach the target leaf node. */
while (true) {
const ccl_global KernelLightTreeNode *knode = &kernel_data_fetch(light_tree_nodes, node_index);
@ -671,7 +638,7 @@ ccl_device float light_tree_pdf(
break;
}
/* Interior node */
/* Interior node. */
const int left_index = node_index + 1;
const int right_index = knode->child_index;

View File

@ -306,7 +306,7 @@ ccl_device_forceinline bool triangle_light_tree_parameters(
const int object = kemitter->mesh_light.object_id;
float3 vertices[3];
triangle_world_space_vertices(kg, object, kemitter->prim_id, -1.0f, vertices);
triangle_world_space_vertices(kg, object, kemitter->prim, -1.0f, vertices);
bool shape_above_surface = false;
for (int i = 0; i < 3; i++) {

View File

@ -1338,13 +1338,15 @@ typedef struct KernelLight {
} KernelLight;
static_assert_align(KernelLight, 16);
using MeshLight = struct MeshLight {
int shader_flag;
int object_id;
};
typedef struct KernelLightDistribution {
float totarea;
int prim;
struct {
int shader_flag;
int object_id;
} mesh_light;
MeshLight mesh_light;
} KernelLightDistribution;
static_assert_align(KernelLightDistribution, 16);
@ -1393,12 +1395,9 @@ typedef struct KernelLightTreeEmitter {
float energy;
/* prim_id denotes the location in the lights or triangles array. */
int prim_id;
struct {
int shader_flag;
int object_id;
EmissionSampling emission_sampling;
} mesh_light;
int prim;
MeshLight mesh_light;
EmissionSampling emission_sampling;
/* Parent. */
int parent_index;

View File

@ -386,6 +386,46 @@ void ConstantFolder::fold_mix_color(NodeMix type, bool clamp_factor, bool clamp)
}
}
void ConstantFolder::fold_mix_float(bool clamp_factor, bool clamp) const
{
ShaderInput *fac_in = node->input("Factor");
ShaderInput *float1_in = node->input("A");
ShaderInput *float2_in = node->input("B");
float fac = clamp_factor ? saturatef(node->get_float(fac_in->socket_type)) :
node->get_float(fac_in->socket_type);
bool fac_is_zero = !fac_in->link && fac == 0.0f;
bool fac_is_one = !fac_in->link && fac == 1.0f;
/* remove no-op node when factor is 0.0 */
if (fac_is_zero) {
if (try_bypass_or_make_constant(float1_in, clamp)) {
return;
}
}
/* remove useless mix floats nodes */
if (float1_in->link && float2_in->link) {
if (float1_in->link == float2_in->link) {
try_bypass_or_make_constant(float1_in, clamp);
return;
}
}
else if (!float1_in->link && !float2_in->link) {
float value1 = node->get_float(float1_in->socket_type);
float value2 = node->get_float(float2_in->socket_type);
if (value1 == value2) {
try_bypass_or_make_constant(float1_in, clamp);
return;
}
}
/* remove no-op mix float node when factor is 1.0 */
if (fac_is_one) {
try_bypass_or_make_constant(float2_in, clamp);
return;
}
}
void ConstantFolder::fold_math(NodeMathType type) const
{
ShaderInput *value1_in = node->input("Value1");

View File

@ -52,6 +52,7 @@ class ConstantFolder {
/* Specific nodes. */
void fold_mix(NodeMix type, bool clamp) const;
void fold_mix_color(NodeMix type, bool clamp_factor, bool clamp) const;
void fold_mix_float(bool clamp_factor, bool clamp) const;
void fold_math(NodeMathType type) const;
void fold_vector_math(NodeVectorMathType type) const;
void fold_mapping(NodeMappingType type) const;

View File

@ -88,7 +88,7 @@ NODE_DEFINE(Film)
{
NodeType *type = NodeType::add("film", create);
SOCKET_FLOAT(exposure, "Exposure", 0.8f);
SOCKET_FLOAT(exposure, "Exposure", 1.0f);
SOCKET_FLOAT(pass_alpha_threshold, "Pass Alpha Threshold", 0.0f);
static NodeEnum filter_enum;

View File

@ -253,7 +253,7 @@ void Integrator::device_update(Device *device, DeviceScene *dscene, Scene *scene
kintegrator->use_light_tree = scene->integrator->use_light_tree;
if (light_sampling_threshold > 0.0f) {
kintegrator->light_inv_rr_threshold = 1.0f / light_sampling_threshold;
kintegrator->light_inv_rr_threshold = scene->film->get_exposure() / light_sampling_threshold;
}
else {
kintegrator->light_inv_rr_threshold = 0.0f;

View File

@ -616,18 +616,16 @@ void LightManager::device_update_tree(Device *,
shader_flag |= SHADER_EXCLUDE_SHADOW_CATCHER;
}
light_tree_emitters[emitter_index].prim_id = prim.prim_id + mesh->prim_offset;
light_tree_emitters[emitter_index].prim = prim.prim_id + mesh->prim_offset;
light_tree_emitters[emitter_index].mesh_light.shader_flag = shader_flag;
light_tree_emitters[emitter_index].mesh_light.emission_sampling =
shader->emission_sampling;
light_tree_emitters[emitter_index].emission_sampling = shader->emission_sampling;
triangle_array[prim.prim_id + object_lookup_offsets[prim.object_id]] = emitter_index;
}
else {
light_tree_emitters[emitter_index].prim_id = prim.prim_id;
light_tree_emitters[emitter_index].prim = prim.prim_id;
light_tree_emitters[emitter_index].mesh_light.shader_flag = 0;
light_tree_emitters[emitter_index].mesh_light.object_id = OBJECT_NONE;
light_tree_emitters[emitter_index].mesh_light.emission_sampling =
EMISSION_SAMPLING_FRONT_BACK;
light_tree_emitters[emitter_index].emission_sampling = EMISSION_SAMPLING_FRONT_BACK;
light_array[~prim.prim_id] = emitter_index;
}
@ -726,7 +724,6 @@ void LightManager::device_update_background(Device *device,
foreach (ShaderNode *node, shader->graph->nodes) {
if (node->type == EnvironmentTextureNode::get_node_type()) {
EnvironmentTextureNode *env = (EnvironmentTextureNode *)node;
ImageMetaData metadata;
if (!env->handle.empty()) {
ImageMetaData metadata = env->handle.metadata();
environment_res.x = max(environment_res.x, (int)metadata.width);

View File

@ -181,7 +181,9 @@ LightTreePrimitive::LightTreePrimitive(Scene *scene, int prim_id, int object_id)
strength *= lamp->get_shader()->emission_estimate;
}
energy = average(strength);
/* Use absolute value of energy so lights with negative strength are properly
* supported in the light tree. */
energy = fabsf(average(strength));
}
}

View File

@ -349,7 +349,7 @@ void Shader::estimate_emission()
}
ShaderInput *surf = graph->output()->input("Surface");
emission_estimate = output_estimate_emission(surf->link, emission_is_constant);
emission_estimate = fabs(output_estimate_emission(surf->link, emission_is_constant));
if (is_zero(emission_estimate)) {
emission_sampling = EMISSION_SAMPLING_NONE;

View File

@ -5132,6 +5132,9 @@ void MixFloatNode::constant_fold(const ConstantFolder &folder)
}
folder.make_constant(a * (1 - fac) + b * fac);
}
else {
folder.fold_mix_float(use_clamp, false);
}
}
/* Mix Vector */
@ -5185,6 +5188,9 @@ void MixVectorNode::constant_fold(const ConstantFolder &folder)
}
folder.make_constant(a * (one_float3() - fac) + b * fac);
}
else {
folder.fold_mix_color(NODE_MIX_BLEND, use_clamp, false);
}
}
/* Mix Vector Non Uniform */

View File

@ -32,8 +32,10 @@ class MotionPathButtonsPanel:
col.prop(mps, "frame_step", text="Step")
elif mps.type == 'RANGE':
col = layout.column(align=True)
col.prop(mps, "frame_start", text="Frame Range Start")
col.prop(mps, "frame_end", text="End")
start_end_group = col.column(align=True)
start_end_group.active = mps.range == 'MANUAL'
start_end_group.prop(mps, "frame_start", text="Frame Range Start")
start_end_group.prop(mps, "frame_end", text="End")
col.prop(mps, "frame_step", text="Step")
# Calculation Range

View File

@ -46,10 +46,13 @@ blender_add_lib(bf_asset_system "${SRC}" "${INC}" "${INC_SYS}" "${LIB}")
if(WITH_GTESTS)
set(TEST_SRC
tests/asset_catalog_test.cc
tests/asset_catalog_path_test.cc
tests/asset_catalog_test.cc
tests/asset_catalog_tree_test.cc
tests/asset_library_service_test.cc
tests/asset_library_test.cc
tests/asset_library_test_common.hh
)
set(TEST_LIB
bf_asset_system

View File

@ -17,6 +17,8 @@
#include "testing/testing.h"
#include "asset_library_test_common.hh"
namespace blender::asset_system::tests {
/* UUIDs from lib/tests/asset_library/blender_assets.cats.txt */
@ -76,130 +78,8 @@ class TestableAssetCatalogService : public AssetCatalogService {
}
};
class AssetCatalogTest : public testing::Test {
class AssetCatalogTest : public AssetLibraryTestBase {
protected:
CatalogFilePath asset_library_root_;
CatalogFilePath temp_library_path_;
static void SetUpTestSuite()
{
testing::Test::SetUpTestSuite();
CLG_init();
}
static void TearDownTestSuite()
{
CLG_exit();
testing::Test::TearDownTestSuite();
}
void SetUp() override
{
const std::string test_files_dir = blender::tests::flags_test_asset_dir();
if (test_files_dir.empty()) {
FAIL();
}
asset_library_root_ = test_files_dir + SEP_STR + "asset_library";
temp_library_path_ = "";
}
void TearDown() override
{
if (!temp_library_path_.empty()) {
BLI_delete(temp_library_path_.c_str(), true, true);
temp_library_path_ = "";
}
}
/* Register a temporary path, which will be removed at the end of the test.
* The returned path ends in a slash. */
CatalogFilePath use_temp_path()
{
BKE_tempdir_init("");
const CatalogFilePath tempdir = BKE_tempdir_session();
temp_library_path_ = tempdir + "test-temporary-path" + SEP_STR;
return temp_library_path_;
}
CatalogFilePath create_temp_path()
{
CatalogFilePath path = use_temp_path();
BLI_dir_create_recursive(path.c_str());
return path;
}
void assert_expected_item(const AssetCatalogPath &expected_path,
const AssetCatalogTreeItem &actual_item)
{
if (expected_path != actual_item.catalog_path().str()) {
/* This will fail, but with a nicer error message than just calling FAIL(). */
EXPECT_EQ(expected_path, actual_item.catalog_path());
return;
}
/* Is the catalog name as expected? "character", "Ellie", ... */
EXPECT_EQ(expected_path.name(), actual_item.get_name());
/* Does the computed number of parents match? */
const std::string expected_path_str = expected_path.str();
const size_t expected_parent_count = std::count(
expected_path_str.begin(), expected_path_str.end(), AssetCatalogPath::SEPARATOR);
EXPECT_EQ(expected_parent_count, actual_item.count_parents());
}
/**
* Recursively iterate over all tree items using #AssetCatalogTree::foreach_item() and check if
* the items map exactly to \a expected_paths.
*/
void assert_expected_tree_items(AssetCatalogTree *tree,
const std::vector<AssetCatalogPath> &expected_paths)
{
int i = 0;
tree->foreach_item([&](const AssetCatalogTreeItem &actual_item) {
ASSERT_LT(i, expected_paths.size())
<< "More catalogs in tree than expected; did not expect " << actual_item.catalog_path();
assert_expected_item(expected_paths[i], actual_item);
i++;
});
}
/**
* Iterate over the root items of \a tree and check if the items map exactly to \a
* expected_paths. Similar to #assert_expected_tree_items() but calls
* #AssetCatalogTree::foreach_root_item() instead of #AssetCatalogTree::foreach_item().
*/
void assert_expected_tree_root_items(AssetCatalogTree *tree,
const std::vector<AssetCatalogPath> &expected_paths)
{
int i = 0;
tree->foreach_root_item([&](const AssetCatalogTreeItem &actual_item) {
ASSERT_LT(i, expected_paths.size())
<< "More catalogs in tree root than expected; did not expect "
<< actual_item.catalog_path();
assert_expected_item(expected_paths[i], actual_item);
i++;
});
}
/**
* Iterate over the child items of \a parent_item and check if the items map exactly to \a
* expected_paths. Similar to #assert_expected_tree_items() but calls
* #AssetCatalogTreeItem::foreach_child() instead of #AssetCatalogTree::foreach_item().
*/
void assert_expected_tree_item_child_items(AssetCatalogTreeItem *parent_item,
const std::vector<AssetCatalogPath> &expected_paths)
{
int i = 0;
parent_item->foreach_child([&](const AssetCatalogTreeItem &actual_item) {
ASSERT_LT(i, expected_paths.size())
<< "More catalogs in tree item than expected; did not expect "
<< actual_item.catalog_path();
assert_expected_item(expected_paths[i], actual_item);
i++;
});
}
/* Used by on_blendfile_save__from_memory_into_existing_asset_lib* test functions. */
void save_from_memory_into_existing_asset_lib(const bool should_top_level_cdf_exist)
{
@ -356,149 +236,6 @@ TEST_F(AssetCatalogTest, is_first_loaded_flag)
<< "The first-seen definition of a catalog should be returned";
}
TEST_F(AssetCatalogTest, insert_item_into_tree)
{
{
AssetCatalogTree tree;
std::unique_ptr<AssetCatalog> catalog_empty_path = AssetCatalog::from_path("");
tree.insert_item(*catalog_empty_path);
assert_expected_tree_items(&tree, {});
}
{
AssetCatalogTree tree;
std::unique_ptr<AssetCatalog> catalog = AssetCatalog::from_path("item");
tree.insert_item(*catalog);
assert_expected_tree_items(&tree, {"item"});
/* Insert child after parent already exists. */
std::unique_ptr<AssetCatalog> child_catalog = AssetCatalog::from_path("item/child");
tree.insert_item(*catalog);
assert_expected_tree_items(&tree, {"item", "item/child"});
std::vector<AssetCatalogPath> expected_paths;
/* Test inserting multi-component sub-path. */
std::unique_ptr<AssetCatalog> grandgrandchild_catalog = AssetCatalog::from_path(
"item/child/grandchild/grandgrandchild");
tree.insert_item(*catalog);
expected_paths = {
"item", "item/child", "item/child/grandchild", "item/child/grandchild/grandgrandchild"};
assert_expected_tree_items(&tree, expected_paths);
std::unique_ptr<AssetCatalog> root_level_catalog = AssetCatalog::from_path("root level");
tree.insert_item(*catalog);
expected_paths = {"item",
"item/child",
"item/child/grandchild",
"item/child/grandchild/grandgrandchild",
"root level"};
assert_expected_tree_items(&tree, expected_paths);
}
{
AssetCatalogTree tree;
std::unique_ptr<AssetCatalog> catalog = AssetCatalog::from_path("item/child");
tree.insert_item(*catalog);
assert_expected_tree_items(&tree, {"item", "item/child"});
}
{
AssetCatalogTree tree;
std::unique_ptr<AssetCatalog> catalog = AssetCatalog::from_path("white space");
tree.insert_item(*catalog);
assert_expected_tree_items(&tree, {"white space"});
}
{
AssetCatalogTree tree;
std::unique_ptr<AssetCatalog> catalog = AssetCatalog::from_path("/item/white space");
tree.insert_item(*catalog);
assert_expected_tree_items(&tree, {"item", "item/white space"});
}
{
AssetCatalogTree tree;
std::unique_ptr<AssetCatalog> catalog_unicode_path = AssetCatalog::from_path("Ružena");
tree.insert_item(*catalog_unicode_path);
assert_expected_tree_items(&tree, {"Ružena"});
catalog_unicode_path = AssetCatalog::from_path("Ružena/Ružena");
tree.insert_item(*catalog_unicode_path);
assert_expected_tree_items(&tree, {"Ružena", "Ružena/Ružena"});
}
}
TEST_F(AssetCatalogTest, load_single_file_into_tree)
{
AssetCatalogService service(asset_library_root_);
service.load_from_disk(asset_library_root_ + SEP_STR + "blender_assets.cats.txt");
/* Contains not only paths from the CDF but also the missing parents (implicitly defined
* catalogs). */
std::vector<AssetCatalogPath> expected_paths{
"character",
"character/Ellie",
"character/Ellie/backslashes",
"character/Ellie/poselib",
"character/Ellie/poselib/tailslash",
"character/Ellie/poselib/white space",
"character/Ružena",
"character/Ružena/poselib",
"character/Ružena/poselib/face",
"character/Ružena/poselib/hand",
"path", /* Implicit. */
"path/without", /* Implicit. */
"path/without/simplename", /* From CDF. */
};
AssetCatalogTree *tree = service.get_catalog_tree();
assert_expected_tree_items(tree, expected_paths);
}
TEST_F(AssetCatalogTest, foreach_in_tree)
{
{
AssetCatalogTree tree{};
const std::vector<AssetCatalogPath> no_catalogs{};
assert_expected_tree_items(&tree, no_catalogs);
assert_expected_tree_root_items(&tree, no_catalogs);
/* Need a root item to check child items. */
std::unique_ptr<AssetCatalog> catalog = AssetCatalog::from_path("something");
tree.insert_item(*catalog);
tree.foreach_root_item([&no_catalogs, this](AssetCatalogTreeItem &item) {
assert_expected_tree_item_child_items(&item, no_catalogs);
});
}
AssetCatalogService service(asset_library_root_);
service.load_from_disk(asset_library_root_ + SEP_STR + "blender_assets.cats.txt");
std::vector<AssetCatalogPath> expected_root_items{{"character", "path"}};
AssetCatalogTree *tree = service.get_catalog_tree();
assert_expected_tree_root_items(tree, expected_root_items);
/* Test if the direct children of the root item are what's expected. */
std::vector<std::vector<AssetCatalogPath>> expected_root_child_items = {
/* Children of the "character" root item. */
{"character/Ellie", "character/Ružena"},
/* Children of the "path" root item. */
{"path/without"},
};
int i = 0;
tree->foreach_root_item([&expected_root_child_items, &i, this](AssetCatalogTreeItem &item) {
assert_expected_tree_item_child_items(&item, expected_root_child_items[i]);
i++;
});
}
TEST_F(AssetCatalogTest, find_catalog_by_path)
{
TestableAssetCatalogService service(asset_library_root_);
@ -832,7 +569,7 @@ TEST_F(AssetCatalogTest, delete_catalog_leaf)
};
AssetCatalogTree *tree = service.get_catalog_tree();
assert_expected_tree_items(tree, expected_paths);
AssetCatalogTreeTestFunctions::expect_tree_items(tree, expected_paths);
}
TEST_F(AssetCatalogTest, delete_catalog_parent_by_id)
@ -886,7 +623,7 @@ TEST_F(AssetCatalogTest, delete_catalog_parent_by_path)
};
AssetCatalogTree *tree = service.get_catalog_tree();
assert_expected_tree_items(tree, expected_paths);
AssetCatalogTreeTestFunctions::expect_tree_items(tree, expected_paths);
}
TEST_F(AssetCatalogTest, delete_catalog_write_to_disk)

View File

@ -0,0 +1,241 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2020 Blender Foundation. All rights reserved. */
#include "AS_asset_catalog.hh"
#include "AS_asset_catalog_tree.hh"
#include "BKE_appdir.h"
#include "BKE_preferences.h"
#include "BLI_fileops.h"
#include "BLI_path_util.h"
#include "DNA_asset_types.h"
#include "DNA_userdef_types.h"
#include "CLG_log.h"
#include "testing/testing.h"
#include "asset_library_test_common.hh"
namespace blender::asset_system::tests {
static void compare_item_with_path(const AssetCatalogPath &expected_path,
const AssetCatalogTreeItem &actual_item)
{
if (expected_path != actual_item.catalog_path().str()) {
/* This will fail, but with a nicer error message than just calling FAIL(). */
EXPECT_EQ(expected_path, actual_item.catalog_path());
return;
}
/* Is the catalog name as expected? "character", "Ellie", ... */
EXPECT_EQ(expected_path.name(), actual_item.get_name());
/* Does the computed number of parents match? */
const std::string expected_path_str = expected_path.str();
const size_t expected_parent_count = std::count(
expected_path_str.begin(), expected_path_str.end(), AssetCatalogPath::SEPARATOR);
EXPECT_EQ(expected_parent_count, actual_item.count_parents());
}
/**
* Recursively iterate over all tree items using #AssetCatalogTree::foreach_item() and check if
* the items map exactly to \a expected_paths.
*/
void AssetCatalogTreeTestFunctions::expect_tree_items(
AssetCatalogTree *tree, const std::vector<AssetCatalogPath> &expected_paths)
{
int i = 0;
tree->foreach_item([&](const AssetCatalogTreeItem &actual_item) {
ASSERT_LT(i, expected_paths.size())
<< "More catalogs in tree than expected; did not expect " << actual_item.catalog_path();
compare_item_with_path(expected_paths[i], actual_item);
i++;
});
}
/**
* Iterate over the root items of \a tree and check if the items map exactly to \a
* expected_paths. Similar to #assert_expected_tree_items() but calls
* #AssetCatalogTree::foreach_root_item() instead of #AssetCatalogTree::foreach_item().
*/
void AssetCatalogTreeTestFunctions::expect_tree_root_items(
AssetCatalogTree *tree, const std::vector<AssetCatalogPath> &expected_paths)
{
int i = 0;
tree->foreach_root_item([&](const AssetCatalogTreeItem &actual_item) {
ASSERT_LT(i, expected_paths.size())
<< "More catalogs in tree root than expected; did not expect "
<< actual_item.catalog_path();
compare_item_with_path(expected_paths[i], actual_item);
i++;
});
}
/**
* Iterate over the child items of \a parent_item and check if the items map exactly to \a
* expected_paths. Similar to #assert_expected_tree_items() but calls
* #AssetCatalogTreeItem::foreach_child() instead of #AssetCatalogTree::foreach_item().
*/
void AssetCatalogTreeTestFunctions::expect_tree_item_child_items(
AssetCatalogTreeItem *parent_item, const std::vector<AssetCatalogPath> &expected_paths)
{
int i = 0;
parent_item->foreach_child([&](const AssetCatalogTreeItem &actual_item) {
ASSERT_LT(i, expected_paths.size())
<< "More catalogs in tree item than expected; did not expect "
<< actual_item.catalog_path();
compare_item_with_path(expected_paths[i], actual_item);
i++;
});
}
class AssetCatalogTreeTest : public AssetLibraryTestBase, public AssetCatalogTreeTestFunctions {
};
TEST_F(AssetCatalogTreeTest, insert_item_into_tree)
{
{
AssetCatalogTree tree;
std::unique_ptr<AssetCatalog> catalog_empty_path = AssetCatalog::from_path("");
tree.insert_item(*catalog_empty_path);
expect_tree_items(&tree, {});
}
{
AssetCatalogTree tree;
std::unique_ptr<AssetCatalog> catalog = AssetCatalog::from_path("item");
tree.insert_item(*catalog);
expect_tree_items(&tree, {"item"});
/* Insert child after parent already exists. */
std::unique_ptr<AssetCatalog> child_catalog = AssetCatalog::from_path("item/child");
tree.insert_item(*catalog);
expect_tree_items(&tree, {"item", "item/child"});
std::vector<AssetCatalogPath> expected_paths;
/* Test inserting multi-component sub-path. */
std::unique_ptr<AssetCatalog> grandgrandchild_catalog = AssetCatalog::from_path(
"item/child/grandchild/grandgrandchild");
tree.insert_item(*catalog);
expected_paths = {
"item", "item/child", "item/child/grandchild", "item/child/grandchild/grandgrandchild"};
expect_tree_items(&tree, expected_paths);
std::unique_ptr<AssetCatalog> root_level_catalog = AssetCatalog::from_path("root level");
tree.insert_item(*catalog);
expected_paths = {"item",
"item/child",
"item/child/grandchild",
"item/child/grandchild/grandgrandchild",
"root level"};
expect_tree_items(&tree, expected_paths);
}
{
AssetCatalogTree tree;
std::unique_ptr<AssetCatalog> catalog = AssetCatalog::from_path("item/child");
tree.insert_item(*catalog);
expect_tree_items(&tree, {"item", "item/child"});
}
{
AssetCatalogTree tree;
std::unique_ptr<AssetCatalog> catalog = AssetCatalog::from_path("white space");
tree.insert_item(*catalog);
expect_tree_items(&tree, {"white space"});
}
{
AssetCatalogTree tree;
std::unique_ptr<AssetCatalog> catalog = AssetCatalog::from_path("/item/white space");
tree.insert_item(*catalog);
expect_tree_items(&tree, {"item", "item/white space"});
}
{
AssetCatalogTree tree;
std::unique_ptr<AssetCatalog> catalog_unicode_path = AssetCatalog::from_path("Ružena");
tree.insert_item(*catalog_unicode_path);
expect_tree_items(&tree, {"Ružena"});
catalog_unicode_path = AssetCatalog::from_path("Ružena/Ružena");
tree.insert_item(*catalog_unicode_path);
expect_tree_items(&tree, {"Ružena", "Ružena/Ružena"});
}
}
TEST_F(AssetCatalogTreeTest, load_single_file_into_tree)
{
AssetCatalogService service(asset_library_root_);
service.load_from_disk(asset_library_root_ + SEP_STR + "blender_assets.cats.txt");
/* Contains not only paths from the CDF but also the missing parents (implicitly defined
* catalogs). */
std::vector<AssetCatalogPath> expected_paths{
"character",
"character/Ellie",
"character/Ellie/backslashes",
"character/Ellie/poselib",
"character/Ellie/poselib/tailslash",
"character/Ellie/poselib/white space",
"character/Ružena",
"character/Ružena/poselib",
"character/Ružena/poselib/face",
"character/Ružena/poselib/hand",
"path", /* Implicit. */
"path/without", /* Implicit. */
"path/without/simplename", /* From CDF. */
};
AssetCatalogTree *tree = service.get_catalog_tree();
expect_tree_items(tree, expected_paths);
}
TEST_F(AssetCatalogTreeTest, foreach_in_tree)
{
{
AssetCatalogTree tree{};
const std::vector<AssetCatalogPath> no_catalogs{};
expect_tree_items(&tree, no_catalogs);
expect_tree_root_items(&tree, no_catalogs);
/* Need a root item to check child items. */
std::unique_ptr<AssetCatalog> catalog = AssetCatalog::from_path("something");
tree.insert_item(*catalog);
tree.foreach_root_item([&no_catalogs](AssetCatalogTreeItem &item) {
expect_tree_item_child_items(&item, no_catalogs);
});
}
AssetCatalogService service(asset_library_root_);
service.load_from_disk(asset_library_root_ + SEP_STR + "blender_assets.cats.txt");
std::vector<AssetCatalogPath> expected_root_items{{"character", "path"}};
AssetCatalogTree *tree = service.get_catalog_tree();
expect_tree_root_items(tree, expected_root_items);
/* Test if the direct children of the root item are what's expected. */
std::vector<std::vector<AssetCatalogPath>> expected_root_child_items = {
/* Children of the "character" root item. */
{"character/Ellie", "character/Ružena"},
/* Children of the "path" root item. */
{"path/without"},
};
int i = 0;
tree->foreach_root_item([&expected_root_child_items, &i](AssetCatalogTreeItem &item) {
expect_tree_item_child_items(&item, expected_root_child_items[i]);
i++;
});
}
} // namespace blender::asset_system::tests

View File

@ -0,0 +1,109 @@
/* SPDX-License-Identifier: Apache-2.0 */
#pragma once
#include <string>
#include <vector>
#include "BKE_appdir.h"
#include "BLI_fileops.h"
#include "BLI_path_util.h"
#include "CLG_log.h"
#include "testing/testing.h"
namespace blender::asset_system {
class AssetCatalogTree;
class AssetCatalogTreeItem;
class AssetCatalogPath;
} // namespace blender::asset_system
namespace blender::asset_system::tests {
/**
* Functionality to setup and access directories on disk within which asset library related testing
* can be done.
*/
class AssetLibraryTestBase : public testing::Test {
protected:
std::string asset_library_root_;
std::string temp_library_path_;
static void SetUpTestSuite()
{
testing::Test::SetUpTestSuite();
CLG_init();
}
static void TearDownTestSuite()
{
CLG_exit();
testing::Test::TearDownTestSuite();
}
void SetUp() override
{
const std::string test_files_dir = blender::tests::flags_test_asset_dir();
if (test_files_dir.empty()) {
FAIL();
}
asset_library_root_ = test_files_dir + SEP_STR + "asset_library";
temp_library_path_ = "";
}
void TearDown() override
{
if (!temp_library_path_.empty()) {
BLI_delete(temp_library_path_.c_str(), true, true);
temp_library_path_ = "";
}
}
/* Register a temporary path, which will be removed at the end of the test.
* The returned path ends in a slash. */
std::string use_temp_path()
{
BKE_tempdir_init("");
const std::string tempdir = BKE_tempdir_session();
temp_library_path_ = tempdir + "test-temporary-path" + SEP_STR;
return temp_library_path_;
}
std::string create_temp_path()
{
std::string path = use_temp_path();
BLI_dir_create_recursive(path.c_str());
return path;
}
};
class AssetCatalogTreeTestFunctions {
public:
/**
* Recursively iterate over all tree items using #AssetCatalogTree::foreach_item() and check if
* the items map exactly to \a expected_paths.
*/
static void expect_tree_items(AssetCatalogTree *tree,
const std::vector<AssetCatalogPath> &expected_paths);
/**
* Iterate over the root items of \a tree and check if the items map exactly to \a
* expected_paths. Similar to #assert_expected_tree_items() but calls
* #AssetCatalogTree::foreach_root_item() instead of #AssetCatalogTree::foreach_item().
*/
static void expect_tree_root_items(AssetCatalogTree *tree,
const std::vector<AssetCatalogPath> &expected_paths);
/**
* Iterate over the child items of \a parent_item and check if the items map exactly to \a
* expected_paths. Similar to #assert_expected_tree_items() but calls
* #AssetCatalogTreeItem::foreach_child() instead of #AssetCatalogTree::foreach_item().
*/
static void expect_tree_item_child_items(AssetCatalogTreeItem *parent_item,
const std::vector<AssetCatalogPath> &expected_paths);
};
} // namespace blender::asset_system::tests

View File

@ -356,12 +356,12 @@ Array<Vector<int>> build_vert_to_loop_map(Span<MLoop> loops, int verts_num);
Array<Vector<int>> build_edge_to_loop_map(Span<MLoop> loops, int edges_num);
Vector<Vector<int>> build_edge_to_loop_map_resizable(Span<MLoop> loops, int edges_num);
inline int previous_poly_loop(const MPoly &poly, int loop_i)
inline int poly_loop_prev(const MPoly &poly, int loop_i)
{
return loop_i - 1 + (loop_i == poly.loopstart) * poly.totloop;
}
inline int next_poly_loop(const MPoly &poly, int loop_i)
inline int poly_loop_next(const MPoly &poly, int loop_i)
{
if (loop_i == poly.loopstart + poly.totloop - 1) {
return poly.loopstart;

View File

@ -668,6 +668,10 @@ void nodeUnlinkNode(struct bNodeTree *ntree, struct bNode *node);
* Find the first available, non-duplicate name for a given node.
*/
void nodeUniqueName(struct bNodeTree *ntree, struct bNode *node);
/**
* Create a new unique integer identifier for the node. Also set the node's
* index in the tree, which is an eagerly maintained cache.
*/
void nodeUniqueID(struct bNodeTree *ntree, struct bNode *node);
/**

View File

@ -253,12 +253,14 @@ class bNodeRuntime : NonCopyable, NonMovable {
/** List of cached internal links (input to output), for muted nodes and operators. */
Vector<bNodeLink *> internal_links;
/** Eagerly maintained cache of the node's index in the tree. */
int index_in_tree = -1;
/** Only valid if #topology_cache_is_dirty is false. */
Vector<bNodeSocket *> inputs;
Vector<bNodeSocket *> outputs;
Map<StringRefNull, bNodeSocket *> inputs_by_identifier;
Map<StringRefNull, bNodeSocket *> outputs_by_identifier;
int index_in_tree = -1;
bool has_available_linked_inputs = false;
bool has_available_linked_outputs = false;
Vector<bNode *> direct_children_in_frame;
@ -322,6 +324,10 @@ inline bool topology_cache_is_available(const bNodeSocket &socket)
} // namespace node_tree_runtime
namespace node_field_inferencing {
bool update_field_inferencing(const bNodeTree &tree);
}
} // namespace blender::bke
/* -------------------------------------------------------------------- */
@ -467,6 +473,15 @@ inline blender::Span<bNode *> bNodeTree::root_frames() const
/** \name #bNode Inline Methods
* \{ */
inline int bNode::index() const
{
const int index = this->runtime->index_in_tree;
/* The order of nodes should always be consistent with the `nodes_by_id` vector. */
BLI_assert(index ==
this->runtime->owner_tree->runtime->nodes_by_id.index_of_as(this->identifier));
return index;
}
inline blender::Span<bNodeSocket *> bNode::input_sockets()
{
BLI_assert(blender::bke::node_tree_runtime::topology_cache_is_available(*this));

View File

@ -229,6 +229,7 @@ set(SRC
intern/nla.c
intern/node.cc
intern/node_runtime.cc
intern/node_tree_field_inferencing.cc
intern/node_tree_update.cc
intern/object.cc
intern/object_deform.c
@ -480,6 +481,7 @@ set(SRC
BKE_type_conversions.hh
BKE_undo_system.h
BKE_unit.h
BKE_uv_islands.hh
BKE_vfont.h
BKE_vfontdata.h
BKE_viewer_path.h
@ -504,6 +506,7 @@ set(SRC
intern/multires_unsubdivide.h
intern/ocean_intern.h
intern/pbvh_intern.h
intern/pbvh_uv_islands.hh
intern/subdiv_converter.h
intern/subdiv_inline.h
)

View File

@ -1127,7 +1127,7 @@ void boid_brain(BoidBrainData *bbd, int p, ParticleData *pa)
/* decide on jumping & liftoff */
if (bpa->data.mode == eBoidMode_OnLand) {
/* fuzziness makes boids capable of misjudgement */
/* Fuzziness makes boids capable of misjudgment. */
float mul = 1.0f + state->rule_fuzziness;
if (boids->options & BOID_ALLOW_FLIGHT && bbd->wanted_co[2] > 0.0f) {

View File

@ -618,7 +618,7 @@ void adapt_mesh_domain_edge_to_corner_impl(const Mesh &mesh,
/* For every corner, mix the values from the adjacent edges on the face. */
for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
const int loop_index_prev = mesh_topology::previous_poly_loop(poly, loop_index);
const int loop_index_prev = mesh_topology::poly_loop_prev(poly, loop_index);
const MLoop &loop = loops[loop_index];
const MLoop &loop_prev = loops[loop_index_prev];
mixer.mix_in(loop_index, old_values[loop.e]);
@ -645,7 +645,7 @@ void adapt_mesh_domain_edge_to_corner_impl(const Mesh &mesh,
for (const int poly_index : range) {
const MPoly &poly = polys[poly_index];
for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
const int loop_index_prev = mesh_topology::previous_poly_loop(poly, loop_index);
const int loop_index_prev = mesh_topology::poly_loop_prev(poly, loop_index);
const MLoop &loop = loops[loop_index];
const MLoop &loop_prev = loops[loop_index_prev];
if (old_values[loop.e] && old_values[loop_prev.e]) {

View File

@ -1877,7 +1877,7 @@ KeyBlock *BKE_keyblock_add_ctime(Key *key, const char *name, const bool do_force
const float cpos = key->ctime / 100.0f;
/* In case of absolute keys, there is no point in adding more than one key with the same pos.
* Hence only set new keybloc pos to current time if none previous one already use it.
* Hence only set new key-block pos to current time if none previous one already use it.
* Now at least people just adding absolute keys without touching to ctime
* won't have to systematically use retiming func (and have ordering issues, too). See T39897.
*/

View File

@ -500,14 +500,19 @@ static int customdata_compare(
for (int i1 = 0; i1 < c1->totlayer; i1++) {
l1 = c1->layers + i1;
if (l1->anonymous_id != nullptr) {
continue;
}
bool found_corresponding_layer = false;
for (int i2 = 0; i2 < c2->totlayer; i2++) {
l2 = c2->layers + i2;
if (l1->type != l2->type || !STREQ(l1->name, l2->name) || l1->anonymous_id != nullptr ||
l2->anonymous_id != nullptr) {
if (l1->type != l2->type || !STREQ(l1->name, l2->name) || l2->anonymous_id != nullptr) {
continue;
}
/* At this point `l1` and `l2` have the same name and type, so they should be compared. */
found_corresponding_layer = true;
switch (l1->type) {
case CD_MVERT: {
@ -719,6 +724,11 @@ static int customdata_compare(
}
}
}
if (!found_corresponding_layer) {
if ((1 << l1->type) & CD_MASK_PROP_ALL) {
return MESHCMP_CDLAYERS_MISMATCH;
}
}
}
return 0;

View File

@ -27,6 +27,7 @@
#include "BLI_stack.h"
#include "BLI_task.h"
#include "BLI_task.hh"
#include "BLI_timeit.hh"
#include "BLI_utildefines.h"
#include "BKE_customdata.h"
@ -792,24 +793,20 @@ void BKE_lnor_space_custom_normal_to_data(const MLoopNorSpace *lnor_space,
#define LOOP_SPLIT_TASK_BLOCK_SIZE 1024
struct LoopSplitTaskData {
/* Specific to each instance (each task). */
enum class Type : int8_t {
BlockEnd = 0, /* Set implicitly by calloc. */
Fan = 1,
Single = 2,
};
/** We have to create those outside of tasks, since #MemArena is not thread-safe. */
MLoopNorSpace *lnor_space;
float3 *lnor;
const MLoop *ml_curr;
const MLoop *ml_prev;
int ml_curr_index;
int ml_prev_index;
/** Also used a flag to switch between single or fan process! */
const int *e2l_prev;
int ml_prev_index;
int mp_index;
/** This one is special, it's owned and managed by worker tasks,
* avoid to have to create it for each fan! */
BLI_Stack *edge_vectors;
char pad_c;
Type flag;
};
struct LoopSplitTaskDataCommon {
@ -825,7 +822,7 @@ struct LoopSplitTaskDataCommon {
Span<MEdge> edges;
Span<MLoop> loops;
Span<MPoly> polys;
MutableSpan<int2> edge_to_loops;
Span<int2> edge_to_loops;
Span<int> loop_to_poly;
Span<float3> polynors;
Span<float3> vert_normals;
@ -952,11 +949,13 @@ static void loop_manifold_fan_around_vert_next(const Span<MLoop> loops,
const Span<int> loop_to_poly,
const int *e2lfan_curr,
const uint mv_pivot_index,
const MLoop **r_mlfan_curr,
int *r_mlfan_curr_index,
int *r_mlfan_vert_index,
int *r_mpfan_curr_index)
{
const int mlfan_curr_orig = *r_mlfan_curr_index;
const uint vert_fan_orig = loops[mlfan_curr_orig].v;
/* WARNING: This is rather complex!
* We have to find our next edge around the vertex (fan mode).
* First we find the next loop, which is either previous or next to mlfan_curr_index, depending
@ -970,10 +969,10 @@ static void loop_manifold_fan_around_vert_next(const Span<MLoop> loops,
BLI_assert(*r_mlfan_curr_index >= 0);
BLI_assert(*r_mpfan_curr_index >= 0);
const MLoop &mlfan_next = loops[*r_mlfan_curr_index];
const uint vert_fan_next = loops[*r_mlfan_curr_index].v;
const MPoly &mpfan_next = polys[*r_mpfan_curr_index];
if (((*r_mlfan_curr)->v == mlfan_next.v && (*r_mlfan_curr)->v == mv_pivot_index) ||
((*r_mlfan_curr)->v != mlfan_next.v && (*r_mlfan_curr)->v != mv_pivot_index)) {
if ((vert_fan_orig == vert_fan_next && vert_fan_orig == mv_pivot_index) ||
(vert_fan_orig != vert_fan_next && vert_fan_orig != mv_pivot_index)) {
/* We need the previous loop, but current one is our vertex's loop. */
*r_mlfan_vert_index = *r_mlfan_curr_index;
if (--(*r_mlfan_curr_index) < mpfan_next.loopstart) {
@ -987,8 +986,6 @@ static void loop_manifold_fan_around_vert_next(const Span<MLoop> loops,
}
*r_mlfan_vert_index = *r_mlfan_curr_index;
}
*r_mlfan_curr = &loops[*r_mlfan_curr_index];
/* And now we are back in sync, mlfan_curr_index is the index of `mlfan_curr`! Pff! */
}
static void split_loop_nor_single_do(LoopSplitTaskDataCommon *common_data, LoopSplitTaskData *data)
@ -998,29 +995,25 @@ static void split_loop_nor_single_do(LoopSplitTaskDataCommon *common_data, LoopS
const Span<MVert> verts = common_data->verts;
const Span<MEdge> edges = common_data->edges;
const Span<MLoop> loops = common_data->loops;
const Span<float3> polynors = common_data->polynors;
MutableSpan<float3> loop_normals = common_data->loopnors;
MLoopNorSpace *lnor_space = data->lnor_space;
float3 *lnor = data->lnor;
const MLoop *ml_curr = data->ml_curr;
const MLoop *ml_prev = data->ml_prev;
const int ml_curr_index = data->ml_curr_index;
#if 0 /* Not needed for 'single' loop. */
const int ml_prev_index = data->ml_prev_index;
const int *e2l_prev = data->e2l_prev;
#endif
const int mp_index = data->mp_index;
/* Simple case (both edges around that vertex are sharp in current polygon),
* this loop just takes its poly normal.
*/
copy_v3_v3(*lnor, polynors[mp_index]);
loop_normals[ml_curr_index] = polynors[mp_index];
#if 0
printf("BASIC: handling loop %d / edge %d / vert %d / poly %d\n",
ml_curr_index,
ml_curr->e,
ml_curr->v,
loops[ml_curr_index].e,
loops[ml_curr_index].v,
mp_index);
#endif
@ -1028,12 +1021,12 @@ static void split_loop_nor_single_do(LoopSplitTaskDataCommon *common_data, LoopS
if (lnors_spacearr) {
float vec_curr[3], vec_prev[3];
const uint mv_pivot_index = ml_curr->v; /* The vertex we are "fanning" around! */
const uint mv_pivot_index = loops[ml_curr_index].v; /* The vertex we are "fanning" around! */
const MVert *mv_pivot = &verts[mv_pivot_index];
const MEdge *me_curr = &edges[ml_curr->e];
const MEdge *me_curr = &edges[loops[ml_curr_index].e];
const MVert *mv_2 = (me_curr->v1 == mv_pivot_index) ? &verts[me_curr->v2] :
&verts[me_curr->v1];
const MEdge *me_prev = &edges[ml_prev->e];
const MEdge *me_prev = &edges[loops[ml_prev_index].e];
const MVert *mv_3 = (me_prev->v1 == mv_pivot_index) ? &verts[me_prev->v2] :
&verts[me_prev->v1];
@ -1042,17 +1035,20 @@ static void split_loop_nor_single_do(LoopSplitTaskDataCommon *common_data, LoopS
sub_v3_v3v3(vec_prev, mv_3->co, mv_pivot->co);
normalize_v3(vec_prev);
BKE_lnor_space_define(lnor_space, *lnor, vec_curr, vec_prev, nullptr);
BKE_lnor_space_define(lnor_space, loop_normals[ml_curr_index], vec_curr, vec_prev, nullptr);
/* We know there is only one loop in this space, no need to create a link-list in this case. */
BKE_lnor_space_add_loop(lnors_spacearr, lnor_space, ml_curr_index, nullptr, true);
if (!clnors_data.is_empty()) {
BKE_lnor_space_custom_data_to_normal(lnor_space, clnors_data[ml_curr_index], *lnor);
BKE_lnor_space_custom_data_to_normal(
lnor_space, clnors_data[ml_curr_index], loop_normals[ml_curr_index]);
}
}
}
static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSplitTaskData *data)
static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data,
LoopSplitTaskData *data,
BLI_Stack *edge_vectors)
{
MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr;
MutableSpan<float3> loopnors = common_data->loopnors;
@ -1070,14 +1066,9 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
#if 0 /* Not needed for 'fan' loops. */
float(*lnor)[3] = data->lnor;
#endif
const MLoop *ml_curr = data->ml_curr;
const MLoop *ml_prev = data->ml_prev;
const int ml_curr_index = data->ml_curr_index;
const int ml_prev_index = data->ml_prev_index;
const int mp_index = data->mp_index;
const int *e2l_prev = data->e2l_prev;
BLI_Stack *edge_vectors = data->edge_vectors;
/* Sigh! we have to fan around current vertex, until we find the other non-smooth edge,
* and accumulate face normals into the vertex!
@ -1085,11 +1076,11 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
* same as the vertex normal, but I do not see any easy way to detect that (would need to count
* number of sharp edges per vertex, I doubt the additional memory usage would be worth it,
* especially as it should not be a common case in real-life meshes anyway). */
const uint mv_pivot_index = ml_curr->v; /* The vertex we are "fanning" around! */
const uint mv_pivot_index = loops[ml_curr_index].v; /* The vertex we are "fanning" around! */
const MVert *mv_pivot = &verts[mv_pivot_index];
/* `ml_curr` would be mlfan_prev if we needed that one. */
const MEdge *me_org = &edges[ml_curr->e];
/* `ml_curr_index` would be mlfan_prev if we needed that one. */
const MEdge *me_org = &edges[loops[ml_curr_index].e];
float vec_curr[3], vec_prev[3], vec_org[3];
float lnor[3] = {0.0f, 0.0f, 0.0f};
@ -1105,8 +1096,6 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
/* Temp clnors stack. */
BLI_SMALLSTACK_DECLARE(clnors, short *);
const int *e2lfan_curr = e2l_prev;
const MLoop *mlfan_curr = ml_prev;
/* `mlfan_vert_index` the loop of our current edge might not be the loop of our current vertex!
*/
int mlfan_curr_index = ml_prev_index;
@ -1133,7 +1122,7 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
// printf("FAN: vert %d, start edge %d\n", mv_pivot_index, ml_curr->e);
while (true) {
const MEdge *me_curr = &edges[mlfan_curr->e];
const MEdge *me_curr = &edges[loops[mlfan_curr_index].e];
/* Compute edge vectors.
* NOTE: We could pre-compute those into an array, in the first iteration, instead of computing
* them twice (or more) here. However, time gained is not worth memory and time lost,
@ -1147,7 +1136,7 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
normalize_v3(vec_curr);
}
// printf("\thandling edge %d / loop %d\n", mlfan_curr->e, mlfan_curr_index);
// printf("\thandling edge %d / loop %d\n", loops[mlfan_curr_index].e, mlfan_curr_index);
{
/* Code similar to accumulate_vertex_normals_poly_v3. */
@ -1185,7 +1174,7 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
}
}
if (IS_EDGE_SHARP(e2lfan_curr) || (me_curr == me_org)) {
if (IS_EDGE_SHARP(edge_to_loops[loops[mlfan_curr_index].e]) || (me_curr == me_org)) {
/* Current edge is sharp and we have finished with this fan of faces around this vert,
* or this vert is smooth, and we have completed a full turn around it. */
// printf("FAN: Finished!\n");
@ -1198,14 +1187,11 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
loop_manifold_fan_around_vert_next(loops,
polys,
loop_to_poly,
e2lfan_curr,
edge_to_loops[loops[mlfan_curr_index].e],
mv_pivot_index,
&mlfan_curr,
&mlfan_curr_index,
&mlfan_vert_index,
&mpfan_curr_index);
e2lfan_curr = edge_to_loops[mlfan_curr->e];
}
{
@ -1265,11 +1251,9 @@ static void loop_split_worker_do(LoopSplitTaskDataCommon *common_data,
LoopSplitTaskData *data,
BLI_Stack *edge_vectors)
{
BLI_assert(data->ml_curr);
if (data->e2l_prev) {
if (data->flag == LoopSplitTaskData::Type::Fan) {
BLI_assert((edge_vectors == nullptr) || BLI_stack_is_empty(edge_vectors));
data->edge_vectors = edge_vectors;
split_loop_nor_fan_do(common_data, data);
split_loop_nor_fan_do(common_data, data, edge_vectors);
}
else {
/* No need for edge_vectors for 'single' case! */
@ -1288,8 +1272,7 @@ static void loop_split_worker(TaskPool *__restrict pool, void *taskdata)
nullptr;
for (int i = 0; i < LOOP_SPLIT_TASK_BLOCK_SIZE; i++, data++) {
/* A nullptr ml_curr is used to tag ended data! */
if (data->ml_curr == nullptr) {
if (data->flag == LoopSplitTaskData::Type::BlockEnd) {
break;
}
@ -1312,13 +1295,11 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const Span<MLoop> mloop
const Span<int> loop_to_poly,
const int *e2l_prev,
BitVector<> &skip_loops,
const MLoop *ml_curr,
const MLoop *ml_prev,
const int ml_curr_index,
const int ml_prev_index,
const int mp_curr_index)
{
const uint mv_pivot_index = ml_curr->v; /* The vertex we are "fanning" around! */
const uint mv_pivot_index = mloops[ml_curr_index].v; /* The vertex we are "fanning" around! */
const int *e2lfan_curr = e2l_prev;
if (IS_EDGE_SHARP(e2lfan_curr)) {
@ -1328,7 +1309,6 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const Span<MLoop> mloop
/* `mlfan_vert_index` the loop of our current edge might not be the loop of our current vertex!
*/
const MLoop *mlfan_curr = ml_prev;
int mlfan_curr_index = ml_prev_index;
int mlfan_vert_index = ml_curr_index;
int mpfan_curr_index = mp_curr_index;
@ -1347,12 +1327,11 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const Span<MLoop> mloop
loop_to_poly,
e2lfan_curr,
mv_pivot_index,
&mlfan_curr,
&mlfan_curr_index,
&mlfan_vert_index,
&mpfan_curr_index);
e2lfan_curr = edge_to_loops[mlfan_curr->e];
e2lfan_curr = edge_to_loops[mloops[mlfan_curr_index].e];
if (IS_EDGE_SHARP(e2lfan_curr)) {
/* Sharp loop/edge, so not a cyclic smooth fan. */
@ -1362,7 +1341,7 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const Span<MLoop> mloop
if (skip_loops[mlfan_vert_index]) {
if (mlfan_vert_index == ml_curr_index) {
/* We walked around a whole cyclic smooth fan without finding any already-processed loop,
* means we can use initial `ml_curr` / `ml_prev` edge as start for this smooth fan. */
* means we can use initial current / previous edge as start for this smooth fan. */
return true;
}
/* Already checked in some previous looping, we can abort. */
@ -1376,8 +1355,9 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const Span<MLoop> mloop
static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common_data)
{
using namespace blender;
using namespace blender::bke;
MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr;
MutableSpan<float3> loopnors = common_data->loopnors;
const Span<MLoop> loops = common_data->loops;
const Span<MPoly> polys = common_data->polys;
@ -1408,24 +1388,16 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
*/
for (const int mp_index : polys.index_range()) {
const MPoly &poly = polys[mp_index];
const int ml_last_index = (poly.loopstart + poly.totloop) - 1;
int ml_curr_index = poly.loopstart;
int ml_prev_index = ml_last_index;
const MLoop *ml_curr = &loops[ml_curr_index];
const MLoop *ml_prev = &loops[ml_prev_index];
float3 *lnors = &loopnors[ml_curr_index];
for (; ml_curr_index <= ml_last_index; ml_curr++, ml_curr_index++, lnors++) {
const int *e2l_curr = edge_to_loops[ml_curr->e];
const int *e2l_prev = edge_to_loops[ml_prev->e];
for (const int ml_curr_index : IndexRange(poly.loopstart, poly.totloop)) {
const int ml_prev_index = mesh_topology::poly_loop_prev(poly, ml_curr_index);
#if 0
printf("Checking loop %d / edge %u / vert %u (sharp edge: %d, skiploop: %d)",
ml_curr_index,
ml_curr->e,
ml_curr->v,
IS_EDGE_SHARP(e2l_curr),
loops[ml_curr_index].e,
loops[ml_curr_index].v,
IS_EDGE_SHARP(edge_to_loops[loops[ml_curr_index].e]),
skip_loops[ml_curr_index]);
#endif
@ -1439,18 +1411,17 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
* However, this would complicate the code, add more memory usage, and despite its logical
* complexity, #loop_manifold_fan_around_vert_next() is quite cheap in term of CPU cycles,
* so really think it's not worth it. */
if (!IS_EDGE_SHARP(e2l_curr) && (skip_loops[ml_curr_index] ||
!loop_split_generator_check_cyclic_smooth_fan(loops,
polys,
edge_to_loops,
loop_to_poly,
e2l_prev,
skip_loops,
ml_curr,
ml_prev,
ml_curr_index,
ml_prev_index,
mp_index))) {
if (!IS_EDGE_SHARP(edge_to_loops[loops[ml_curr_index].e]) &&
(skip_loops[ml_curr_index] ||
!loop_split_generator_check_cyclic_smooth_fan(loops,
polys,
edge_to_loops,
loop_to_poly,
edge_to_loops[loops[ml_prev_index].e],
skip_loops,
ml_curr_index,
ml_prev_index,
mp_index))) {
// printf("SKIPPING!\n");
}
else {
@ -1470,38 +1441,27 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
memset(data, 0, sizeof(*data));
}
if (IS_EDGE_SHARP(e2l_curr) && IS_EDGE_SHARP(e2l_prev)) {
data->lnor = lnors;
data->ml_curr = ml_curr;
data->ml_prev = ml_prev;
if (IS_EDGE_SHARP(edge_to_loops[loops[ml_curr_index].e]) &&
IS_EDGE_SHARP(edge_to_loops[loops[ml_prev_index].e])) {
data->ml_curr_index = ml_curr_index;
#if 0 /* Not needed for 'single' loop. */
data->ml_prev_index = ml_prev_index;
data->e2l_prev = nullptr; /* Tag as 'single' task. */
#endif
data->flag = LoopSplitTaskData::Type::Single;
data->mp_index = mp_index;
if (lnors_spacearr) {
data->lnor_space = BKE_lnor_space_create(lnors_spacearr);
}
}
/* We *do not need* to check/tag loops as already computed!
* Due to the fact a loop only links to one of its two edges,
* a same fan *will never be walked more than once!*
* Since we consider edges having neighbor polys with inverted
* (flipped) normals as sharp, we are sure that no fan will be skipped,
* even only considering the case (sharp curr_edge, smooth prev_edge),
* and not the alternative (smooth curr_edge, sharp prev_edge).
* All this due/thanks to link between normals and loop ordering (i.e. winding).
*/
else {
#if 0 /* Not needed for 'fan' loops. */
data->lnor = lnors;
#endif
data->ml_curr = ml_curr;
data->ml_prev = ml_prev;
/* We do not need to check/tag loops as already computed. Due to the fact that a loop
* only points to one of its two edges, the same fan will never be walked more than once.
* Since we consider edges that have neighbor polys with inverted (flipped) normals as
* sharp, we are sure that no fan will be skipped, even only considering the case (sharp
* current edge, smooth previous edge), and not the alternative (smooth current edge,
* sharp previous edge). All this due/thanks to the link between normals and loop
* ordering (i.e. winding). */
data->ml_curr_index = ml_curr_index;
data->ml_prev_index = ml_prev_index;
data->e2l_prev = e2l_prev; /* Also tag as 'fan' task. */
data->flag = LoopSplitTaskData::Type::Fan;
data->mp_index = mp_index;
if (lnors_spacearr) {
data->lnor_space = BKE_lnor_space_create(lnors_spacearr);
@ -1519,14 +1479,9 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
loop_split_worker_do(common_data, data, edge_vectors);
}
}
ml_prev = ml_curr;
ml_prev_index = ml_curr_index;
}
}
/* Last block of data. Since it is calloc'ed and we use first nullptr item as stopper,
* everything is fine. */
if (pool && data_idx) {
BLI_task_pool_push(pool, loop_split_worker, data_buff, true, nullptr);
}

View File

@ -145,11 +145,13 @@ static void ntree_copy_data(Main * /*bmain*/, ID *id_dst, const ID *id_src, cons
dst_runtime.nodes_by_id.reserve(ntree_src->all_nodes().size());
BLI_listbase_clear(&ntree_dst->nodes);
LISTBASE_FOREACH (const bNode *, src_node, &ntree_src->nodes) {
int i;
LISTBASE_FOREACH_INDEX (const bNode *, src_node, &ntree_src->nodes, i) {
/* Don't find a unique name for every node, since they should have valid names already. */
bNode *new_node = blender::bke::node_copy_with_mapping(
ntree_dst, *src_node, flag_subdata, false, socket_map);
dst_runtime.nodes_by_id.add_new(new_node);
new_node->runtime->index_in_tree = i;
}
/* copy links */
@ -673,9 +675,11 @@ void ntreeBlendReadData(BlendDataReader *reader, ID *owner_id, bNodeTree *ntree)
BKE_animdata_blend_read_data(reader, ntree->adt);
BLO_read_list(reader, &ntree->nodes);
LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
int i;
LISTBASE_FOREACH_INDEX (bNode *, node, &ntree->nodes, i) {
node->runtime = MEM_new<bNodeRuntime>(__func__);
node->typeinfo = nullptr;
node->runtime->index_in_tree = i;
/* Create the `nodes_by_id` cache eagerly so it can be expected to be valid. Because
* we create it here we also have to check for zero identifiers from previous versions. */
@ -1373,8 +1377,7 @@ void nodeRegisterType(bNodeType *nt)
if (nt->declare && !nt->declaration_is_dynamic) {
if (nt->fixed_declaration == nullptr) {
nt->fixed_declaration = new blender::nodes::NodeDeclaration();
blender::nodes::NodeDeclarationBuilder builder{*nt->fixed_declaration};
nt->declare(builder);
blender::nodes::build_node_declaration(*nt, *nt->fixed_declaration);
}
}
@ -2198,6 +2201,8 @@ void nodeUniqueID(bNodeTree *ntree, bNode *node)
node->identifier = new_id;
ntree->runtime->nodes_by_id.add_new(node);
node->runtime->index_in_tree = ntree->runtime->nodes_by_id.index_range().last();
BLI_assert(node->runtime->index_in_tree == ntree->runtime->nodes_by_id.index_of(node));
}
bNode *nodeAddNode(const bContext *C, bNodeTree *ntree, const char *idname)
@ -2940,8 +2945,10 @@ void nodeRebuildIDVector(bNodeTree *node_tree)
{
/* Rebuild nodes #VectorSet which must have the same order as the list. */
node_tree->runtime->nodes_by_id.clear();
LISTBASE_FOREACH (bNode *, node, &node_tree->nodes) {
int i;
LISTBASE_FOREACH_INDEX (bNode *, node, &node_tree->nodes, i) {
node_tree->runtime->nodes_by_id.add_new(node);
node->runtime->index_in_tree = i;
}
}
@ -3613,8 +3620,7 @@ bool nodeDeclarationEnsureOnOutdatedNode(bNodeTree * /*ntree*/, bNode *node)
}
if (node->typeinfo->declaration_is_dynamic) {
node->runtime->declaration = new blender::nodes::NodeDeclaration();
blender::nodes::NodeDeclarationBuilder builder{*node->runtime->declaration};
node->typeinfo->declare(builder);
blender::nodes::build_node_declaration(*node->typeinfo, *node->runtime->declaration);
}
else {
/* Declaration should have been created in #nodeRegisterType. */

View File

@ -278,7 +278,7 @@ static void toposort_from_start_node(const ToposortDirection direction,
Stack<Item, 64> nodes_to_check;
nodes_to_check.push({&start_node});
node_states[start_node.runtime->index_in_tree].is_in_stack = true;
node_states[start_node.index()].is_in_stack = true;
while (!nodes_to_check.is_empty()) {
Item &item = nodes_to_check.peek();
bNode &node = *item.node;
@ -306,7 +306,7 @@ static void toposort_from_start_node(const ToposortDirection direction,
}
bNodeSocket &linked_socket = *socket.runtime->directly_linked_sockets[item.link_index];
bNode &linked_node = *linked_socket.runtime->owner_node;
ToposortNodeState &linked_node_state = node_states[linked_node.runtime->index_in_tree];
ToposortNodeState &linked_node_state = node_states[linked_node.index()];
if (linked_node_state.is_done) {
/* The linked node has already been visited. */
item.link_index++;
@ -324,7 +324,7 @@ static void toposort_from_start_node(const ToposortDirection direction,
/* If no other element has been pushed, the current node can be pushed to the sorted list. */
if (&item == &nodes_to_check.peek()) {
ToposortNodeState &node_state = node_states[node.runtime->index_in_tree];
ToposortNodeState &node_state = node_states[node.index()];
node_state.is_done = true;
node_state.is_in_stack = false;
r_sorted_nodes.append(&node);
@ -345,7 +345,7 @@ static void update_toposort(const bNodeTree &ntree,
Array<ToposortNodeState> node_states(tree_runtime.nodes_by_id.size());
for (bNode *node : tree_runtime.nodes_by_id) {
if (node_states[node->runtime->index_in_tree].is_done) {
if (node_states[node->index()].is_done) {
/* Ignore nodes that are done already. */
continue;
}
@ -361,7 +361,7 @@ static void update_toposort(const bNodeTree &ntree,
if (r_sorted_nodes.size() < tree_runtime.nodes_by_id.size()) {
r_cycle_detected = true;
for (bNode *node : tree_runtime.nodes_by_id) {
if (node_states[node->runtime->index_in_tree].is_done) {
if (node_states[node->index()].is_done) {
/* Ignore nodes that are done already. */
continue;
}

View File

@ -0,0 +1,519 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BKE_node.h"
#include "BKE_node_runtime.hh"
#include "NOD_node_declaration.hh"
#include "BLI_set.hh"
#include "BLI_stack.hh"
namespace blender::bke::node_field_inferencing {
using nodes::FieldInferencingInterface;
using nodes::InputSocketFieldType;
using nodes::NodeDeclaration;
using nodes::OutputFieldDependency;
using nodes::OutputSocketFieldType;
using nodes::SocketDeclaration;
static bool is_field_socket_type(eNodeSocketDatatype type)
{
return ELEM(type, SOCK_FLOAT, SOCK_INT, SOCK_BOOLEAN, SOCK_VECTOR, SOCK_RGBA);
}
static bool is_field_socket_type(const bNodeSocket &socket)
{
return is_field_socket_type((eNodeSocketDatatype)socket.typeinfo->type);
}
static InputSocketFieldType get_interface_input_field_type(const bNode &node,
const bNodeSocket &socket)
{
if (!is_field_socket_type(socket)) {
return InputSocketFieldType::None;
}
if (node.type == NODE_REROUTE) {
return InputSocketFieldType::IsSupported;
}
if (node.type == NODE_GROUP_OUTPUT) {
/* Outputs always support fields when the data type is correct. */
return InputSocketFieldType::IsSupported;
}
if (node.typeinfo == &NodeTypeUndefined) {
return InputSocketFieldType::None;
}
if (node.type == NODE_CUSTOM) {
return InputSocketFieldType::None;
}
/* TODO: Ensure declaration exists. */
const NodeDeclaration *node_decl = node.declaration();
/* Node declarations should be implemented for nodes involved here. */
BLI_assert(node_decl != nullptr);
/* Get the field type from the declaration. */
const SocketDeclaration &socket_decl = *node_decl->inputs()[socket.index()];
const InputSocketFieldType field_type = socket_decl.input_field_type();
return field_type;
}
static OutputFieldDependency get_interface_output_field_dependency(const bNode &node,
const bNodeSocket &socket)
{
if (!is_field_socket_type(socket)) {
/* Non-field sockets always output data. */
return OutputFieldDependency::ForDataSource();
}
if (node.type == NODE_REROUTE) {
/* The reroute just forwards what is passed in. */
return OutputFieldDependency::ForDependentField();
}
if (node.type == NODE_GROUP_INPUT) {
/* Input nodes get special treatment in #determine_group_input_states. */
return OutputFieldDependency::ForDependentField();
}
if (node.typeinfo == &NodeTypeUndefined) {
return OutputFieldDependency::ForDataSource();
}
if (node.type == NODE_CUSTOM) {
return OutputFieldDependency::ForDataSource();
}
const NodeDeclaration *node_decl = node.declaration();
/* Node declarations should be implemented for nodes involved here. */
BLI_assert(node_decl != nullptr);
/* Use the socket declaration. */
const SocketDeclaration &socket_decl = *node_decl->outputs()[socket.index()];
return socket_decl.output_field_dependency();
}
static FieldInferencingInterface get_dummy_field_inferencing_interface(const bNode &node)
{
FieldInferencingInterface inferencing_interface;
inferencing_interface.inputs.append_n_times(InputSocketFieldType::None,
node.input_sockets().size());
inferencing_interface.outputs.append_n_times(OutputFieldDependency::ForDataSource(),
node.output_sockets().size());
return inferencing_interface;
}
/**
* Retrieves information about how the node interacts with fields.
* In the future, this information can be stored in the node declaration. This would allow this
* function to return a reference, making it more efficient.
*/
static FieldInferencingInterface get_node_field_inferencing_interface(const bNode &node)
{
/* Node groups already reference all required information, so just return that. */
if (node.is_group()) {
bNodeTree *group = (bNodeTree *)node.id;
if (group == nullptr) {
return FieldInferencingInterface();
}
if (!ntreeIsRegistered(group)) {
/* This can happen when there is a linked node group that was not found (see T92799). */
return get_dummy_field_inferencing_interface(node);
}
if (!group->runtime->field_inferencing_interface) {
/* This shouldn't happen because referenced node groups should always be updated first. */
BLI_assert_unreachable();
}
return *group->runtime->field_inferencing_interface;
}
FieldInferencingInterface inferencing_interface;
for (const bNodeSocket *input_socket : node.input_sockets()) {
inferencing_interface.inputs.append(get_interface_input_field_type(node, *input_socket));
}
for (const bNodeSocket *output_socket : node.output_sockets()) {
inferencing_interface.outputs.append(
get_interface_output_field_dependency(node, *output_socket));
}
return inferencing_interface;
}
/**
* This struct contains information for every socket. The values are propagated through the
* network.
*/
struct SocketFieldState {
/* This socket starts a new field. */
bool is_field_source = false;
/* This socket can never become a field, because the node itself does not support it. */
bool is_always_single = false;
/* This socket is currently a single value. It could become a field though. */
bool is_single = true;
/* This socket is required to be a single value. This can be because the node itself only
* supports this socket to be a single value, or because a node afterwards requires this to be a
* single value. */
bool requires_single = false;
};
static Vector<const bNodeSocket *> gather_input_socket_dependencies(
const OutputFieldDependency &field_dependency, const bNode &node)
{
const OutputSocketFieldType type = field_dependency.field_type();
Vector<const bNodeSocket *> input_sockets;
switch (type) {
case OutputSocketFieldType::FieldSource:
case OutputSocketFieldType::None: {
break;
}
case OutputSocketFieldType::DependentField: {
/* This output depends on all inputs. */
input_sockets.extend(node.input_sockets());
break;
}
case OutputSocketFieldType::PartiallyDependent: {
/* This output depends only on a few inputs. */
for (const int i : field_dependency.linked_input_indices()) {
input_sockets.append(&node.input_socket(i));
}
break;
}
}
return input_sockets;
}
/**
* Check what the group output socket depends on. Potentially traverses the node tree
* to figure out if it is always a field or if it depends on any group inputs.
*/
static OutputFieldDependency find_group_output_dependencies(
const bNodeSocket &group_output_socket, const Span<SocketFieldState> field_state_by_socket_id)
{
if (!is_field_socket_type(group_output_socket)) {
return OutputFieldDependency::ForDataSource();
}
/* Use a Set here instead of an array indexed by socket id, because we my only need to look at
* very few sockets. */
Set<const bNodeSocket *> handled_sockets;
Stack<const bNodeSocket *> sockets_to_check;
handled_sockets.add(&group_output_socket);
sockets_to_check.push(&group_output_socket);
/* Keeps track of group input indices that are (indirectly) connected to the output. */
Vector<int> linked_input_indices;
while (!sockets_to_check.is_empty()) {
const bNodeSocket *input_socket = sockets_to_check.pop();
if (!input_socket->is_directly_linked() &&
!field_state_by_socket_id[input_socket->index_in_tree()].is_single) {
/* This socket uses a field as input by default. */
return OutputFieldDependency::ForFieldSource();
}
for (const bNodeSocket *origin_socket : input_socket->directly_linked_sockets()) {
const bNode &origin_node = origin_socket->owner_node();
const SocketFieldState &origin_state =
field_state_by_socket_id[origin_socket->index_in_tree()];
if (origin_state.is_field_source) {
if (origin_node.type == NODE_GROUP_INPUT) {
/* Found a group input that the group output depends on. */
linked_input_indices.append_non_duplicates(origin_socket->index());
}
else {
/* Found a field source that is not the group input. So the output is always a field. */
return OutputFieldDependency::ForFieldSource();
}
}
else if (!origin_state.is_single) {
const FieldInferencingInterface inferencing_interface =
get_node_field_inferencing_interface(origin_node);
const OutputFieldDependency &field_dependency =
inferencing_interface.outputs[origin_socket->index()];
/* Propagate search further to the left. */
for (const bNodeSocket *origin_input_socket :
gather_input_socket_dependencies(field_dependency, origin_node)) {
if (!origin_input_socket->is_available()) {
continue;
}
if (!field_state_by_socket_id[origin_input_socket->index_in_tree()].is_single) {
if (handled_sockets.add(origin_input_socket)) {
sockets_to_check.push(origin_input_socket);
}
}
}
}
}
}
return OutputFieldDependency::ForPartiallyDependentField(std::move(linked_input_indices));
}
static void propagate_data_requirements_from_right_to_left(
const bNodeTree &tree, const MutableSpan<SocketFieldState> field_state_by_socket_id)
{
const Span<const bNode *> toposort_result = tree.toposort_right_to_left();
for (const bNode *node : toposort_result) {
const FieldInferencingInterface inferencing_interface = get_node_field_inferencing_interface(
*node);
for (const bNodeSocket *output_socket : node->output_sockets()) {
SocketFieldState &state = field_state_by_socket_id[output_socket->index_in_tree()];
const OutputFieldDependency &field_dependency =
inferencing_interface.outputs[output_socket->index()];
if (field_dependency.field_type() == OutputSocketFieldType::FieldSource) {
continue;
}
if (field_dependency.field_type() == OutputSocketFieldType::None) {
state.requires_single = true;
state.is_always_single = true;
continue;
}
/* The output is required to be a single value when it is connected to any input that does
* not support fields. */
for (const bNodeSocket *target_socket : output_socket->directly_linked_sockets()) {
if (target_socket->is_available()) {
state.requires_single |=
field_state_by_socket_id[target_socket->index_in_tree()].requires_single;
}
}
if (state.requires_single) {
bool any_input_is_field_implicitly = false;
const Vector<const bNodeSocket *> connected_inputs = gather_input_socket_dependencies(
field_dependency, *node);
for (const bNodeSocket *input_socket : connected_inputs) {
if (!input_socket->is_available()) {
continue;
}
if (inferencing_interface.inputs[input_socket->index()] ==
InputSocketFieldType::Implicit) {
if (!input_socket->is_logically_linked()) {
any_input_is_field_implicitly = true;
break;
}
}
}
if (any_input_is_field_implicitly) {
/* This output isn't a single value actually. */
state.requires_single = false;
}
else {
/* If the output is required to be a single value, the connected inputs in the same node
* must not be fields as well. */
for (const bNodeSocket *input_socket : connected_inputs) {
field_state_by_socket_id[input_socket->index_in_tree()].requires_single = true;
}
}
}
}
/* Some inputs do not require fields independent of what the outputs are connected to. */
for (const bNodeSocket *input_socket : node->input_sockets()) {
SocketFieldState &state = field_state_by_socket_id[input_socket->index_in_tree()];
if (inferencing_interface.inputs[input_socket->index()] == InputSocketFieldType::None) {
state.requires_single = true;
state.is_always_single = true;
}
}
}
}
static void determine_group_input_states(
const bNodeTree &tree,
FieldInferencingInterface &new_inferencing_interface,
const MutableSpan<SocketFieldState> field_state_by_socket_id)
{
{
/* Non-field inputs never support fields. */
int index;
LISTBASE_FOREACH_INDEX (bNodeSocket *, group_input, &tree.inputs, index) {
if (!is_field_socket_type((eNodeSocketDatatype)group_input->type)) {
new_inferencing_interface.inputs[index] = InputSocketFieldType::None;
}
}
}
/* Check if group inputs are required to be single values, because they are (indirectly)
* connected to some socket that does not support fields. */
for (const bNode *node : tree.nodes_by_type("NodeGroupInput")) {
for (const bNodeSocket *output_socket : node->output_sockets().drop_back(1)) {
SocketFieldState &state = field_state_by_socket_id[output_socket->index_in_tree()];
if (state.requires_single) {
new_inferencing_interface.inputs[output_socket->index()] = InputSocketFieldType::None;
}
}
}
/* If an input does not support fields, this should be reflected in all Group Input nodes. */
for (const bNode *node : tree.nodes_by_type("NodeGroupInput")) {
for (const bNodeSocket *output_socket : node->output_sockets().drop_back(1)) {
SocketFieldState &state = field_state_by_socket_id[output_socket->index_in_tree()];
const bool supports_field = new_inferencing_interface.inputs[output_socket->index()] !=
InputSocketFieldType::None;
if (supports_field) {
state.is_single = false;
state.is_field_source = true;
}
else {
state.requires_single = true;
}
}
SocketFieldState &dummy_socket_state =
field_state_by_socket_id[node->output_sockets().last()->index_in_tree()];
dummy_socket_state.requires_single = true;
}
}
static void propagate_field_status_from_left_to_right(
const bNodeTree &tree, const MutableSpan<SocketFieldState> field_state_by_socket_id)
{
const Span<const bNode *> toposort_result = tree.toposort_left_to_right();
for (const bNode *node : toposort_result) {
if (node->type == NODE_GROUP_INPUT) {
continue;
}
const FieldInferencingInterface inferencing_interface = get_node_field_inferencing_interface(
*node);
/* Update field state of input sockets, also taking into account linked origin sockets. */
for (const bNodeSocket *input_socket : node->input_sockets()) {
SocketFieldState &state = field_state_by_socket_id[input_socket->index_in_tree()];
if (state.is_always_single) {
state.is_single = true;
continue;
}
state.is_single = true;
if (!input_socket->is_directly_linked()) {
if (inferencing_interface.inputs[input_socket->index()] ==
InputSocketFieldType::Implicit) {
state.is_single = false;
}
}
else {
for (const bNodeSocket *origin_socket : input_socket->directly_linked_sockets()) {
if (!field_state_by_socket_id[origin_socket->index_in_tree()].is_single) {
state.is_single = false;
break;
}
}
}
}
/* Update field state of output sockets, also taking into account input sockets. */
for (const bNodeSocket *output_socket : node->output_sockets()) {
SocketFieldState &state = field_state_by_socket_id[output_socket->index_in_tree()];
const OutputFieldDependency &field_dependency =
inferencing_interface.outputs[output_socket->index()];
switch (field_dependency.field_type()) {
case OutputSocketFieldType::None: {
state.is_single = true;
break;
}
case OutputSocketFieldType::FieldSource: {
state.is_single = false;
state.is_field_source = true;
break;
}
case OutputSocketFieldType::PartiallyDependent:
case OutputSocketFieldType::DependentField: {
for (const bNodeSocket *input_socket :
gather_input_socket_dependencies(field_dependency, *node)) {
if (!input_socket->is_available()) {
continue;
}
if (!field_state_by_socket_id[input_socket->index_in_tree()].is_single) {
state.is_single = false;
break;
}
}
break;
}
}
}
}
}
static void determine_group_output_states(const bNodeTree &tree,
FieldInferencingInterface &new_inferencing_interface,
const Span<SocketFieldState> field_state_by_socket_id)
{
const bNode *group_output_node = tree.group_output_node();
if (!group_output_node) {
return;
}
for (const bNodeSocket *group_output_socket : group_output_node->input_sockets().drop_back(1)) {
OutputFieldDependency field_dependency = find_group_output_dependencies(
*group_output_socket, field_state_by_socket_id);
new_inferencing_interface.outputs[group_output_socket->index()] = std::move(field_dependency);
}
}
static void update_socket_shapes(const bNodeTree &tree,
const Span<SocketFieldState> field_state_by_socket_id)
{
const eNodeSocketDisplayShape requires_data_shape = SOCK_DISPLAY_SHAPE_CIRCLE;
const eNodeSocketDisplayShape data_but_can_be_field_shape = SOCK_DISPLAY_SHAPE_DIAMOND_DOT;
const eNodeSocketDisplayShape is_field_shape = SOCK_DISPLAY_SHAPE_DIAMOND;
auto get_shape_for_state = [&](const SocketFieldState &state) {
if (state.is_always_single) {
return requires_data_shape;
}
if (!state.is_single) {
return is_field_shape;
}
if (state.requires_single) {
return requires_data_shape;
}
return data_but_can_be_field_shape;
};
for (const bNodeSocket *socket : tree.all_input_sockets()) {
const SocketFieldState &state = field_state_by_socket_id[socket->index_in_tree()];
const_cast<bNodeSocket *>(socket)->display_shape = get_shape_for_state(state);
}
for (const bNodeSocket *socket : tree.all_sockets()) {
const SocketFieldState &state = field_state_by_socket_id[socket->index_in_tree()];
const_cast<bNodeSocket *>(socket)->display_shape = get_shape_for_state(state);
}
}
bool update_field_inferencing(const bNodeTree &tree)
{
tree.ensure_topology_cache();
/* Create new inferencing interface for this node group. */
std::unique_ptr<FieldInferencingInterface> new_inferencing_interface =
std::make_unique<FieldInferencingInterface>();
new_inferencing_interface->inputs.resize(BLI_listbase_count(&tree.inputs),
InputSocketFieldType::IsSupported);
new_inferencing_interface->outputs.resize(BLI_listbase_count(&tree.outputs),
OutputFieldDependency::ForDataSource());
/* Keep track of the state of all sockets. The index into this array is #SocketRef::id(). */
Array<SocketFieldState> field_state_by_socket_id(tree.all_sockets().size());
propagate_data_requirements_from_right_to_left(tree, field_state_by_socket_id);
determine_group_input_states(tree, *new_inferencing_interface, field_state_by_socket_id);
propagate_field_status_from_left_to_right(tree, field_state_by_socket_id);
determine_group_output_states(tree, *new_inferencing_interface, field_state_by_socket_id);
update_socket_shapes(tree, field_state_by_socket_id);
/* Update the previous group interface. */
const bool group_interface_changed = !tree.runtime->field_inferencing_interface ||
*tree.runtime->field_inferencing_interface !=
*new_inferencing_interface;
tree.runtime->field_inferencing_interface = std::move(new_inferencing_interface);
return group_interface_changed;
}
} // namespace blender::bke::node_field_inferencing

View File

@ -68,521 +68,6 @@ static void add_socket_tag(bNodeTree *ntree, bNodeSocket *socket, const eNodeTre
namespace blender::bke {
namespace node_field_inferencing {
static bool is_field_socket_type(eNodeSocketDatatype type)
{
return ELEM(type, SOCK_FLOAT, SOCK_INT, SOCK_BOOLEAN, SOCK_VECTOR, SOCK_RGBA);
}
static bool is_field_socket_type(const bNodeSocket &socket)
{
return is_field_socket_type((eNodeSocketDatatype)socket.typeinfo->type);
}
static InputSocketFieldType get_interface_input_field_type(const bNode &node,
const bNodeSocket &socket)
{
if (!is_field_socket_type(socket)) {
return InputSocketFieldType::None;
}
if (node.type == NODE_REROUTE) {
return InputSocketFieldType::IsSupported;
}
if (node.type == NODE_GROUP_OUTPUT) {
/* Outputs always support fields when the data type is correct. */
return InputSocketFieldType::IsSupported;
}
if (node.typeinfo == &NodeTypeUndefined) {
return InputSocketFieldType::None;
}
if (node.type == NODE_CUSTOM) {
return InputSocketFieldType::None;
}
/* TODO: Ensure declaration exists. */
const NodeDeclaration *node_decl = node.declaration();
/* Node declarations should be implemented for nodes involved here. */
BLI_assert(node_decl != nullptr);
/* Get the field type from the declaration. */
const SocketDeclaration &socket_decl = *node_decl->inputs()[socket.index()];
const InputSocketFieldType field_type = socket_decl.input_field_type();
if (field_type == InputSocketFieldType::Implicit) {
return field_type;
}
if (node_decl->is_function_node()) {
/* In a function node, every socket supports fields. */
return InputSocketFieldType::IsSupported;
}
return field_type;
}
static OutputFieldDependency get_interface_output_field_dependency(const bNode &node,
const bNodeSocket &socket)
{
if (!is_field_socket_type(socket)) {
/* Non-field sockets always output data. */
return OutputFieldDependency::ForDataSource();
}
if (node.type == NODE_REROUTE) {
/* The reroute just forwards what is passed in. */
return OutputFieldDependency::ForDependentField();
}
if (node.type == NODE_GROUP_INPUT) {
/* Input nodes get special treatment in #determine_group_input_states. */
return OutputFieldDependency::ForDependentField();
}
if (node.typeinfo == &NodeTypeUndefined) {
return OutputFieldDependency::ForDataSource();
}
if (node.type == NODE_CUSTOM) {
return OutputFieldDependency::ForDataSource();
}
const NodeDeclaration *node_decl = node.declaration();
/* Node declarations should be implemented for nodes involved here. */
BLI_assert(node_decl != nullptr);
if (node_decl->is_function_node()) {
/* In a generic function node, all outputs depend on all inputs. */
return OutputFieldDependency::ForDependentField();
}
/* Use the socket declaration. */
const SocketDeclaration &socket_decl = *node_decl->outputs()[socket.index()];
return socket_decl.output_field_dependency();
}
static FieldInferencingInterface get_dummy_field_inferencing_interface(const bNode &node)
{
FieldInferencingInterface inferencing_interface;
inferencing_interface.inputs.append_n_times(InputSocketFieldType::None,
node.input_sockets().size());
inferencing_interface.outputs.append_n_times(OutputFieldDependency::ForDataSource(),
node.output_sockets().size());
return inferencing_interface;
}
/**
* Retrieves information about how the node interacts with fields.
* In the future, this information can be stored in the node declaration. This would allow this
* function to return a reference, making it more efficient.
*/
static FieldInferencingInterface get_node_field_inferencing_interface(const bNode &node)
{
/* Node groups already reference all required information, so just return that. */
if (node.is_group()) {
bNodeTree *group = (bNodeTree *)node.id;
if (group == nullptr) {
return FieldInferencingInterface();
}
if (!ntreeIsRegistered(group)) {
/* This can happen when there is a linked node group that was not found (see T92799). */
return get_dummy_field_inferencing_interface(node);
}
if (!group->runtime->field_inferencing_interface) {
/* This shouldn't happen because referenced node groups should always be updated first. */
BLI_assert_unreachable();
}
return *group->runtime->field_inferencing_interface;
}
FieldInferencingInterface inferencing_interface;
for (const bNodeSocket *input_socket : node.input_sockets()) {
inferencing_interface.inputs.append(get_interface_input_field_type(node, *input_socket));
}
for (const bNodeSocket *output_socket : node.output_sockets()) {
inferencing_interface.outputs.append(
get_interface_output_field_dependency(node, *output_socket));
}
return inferencing_interface;
}
/**
* This struct contains information for every socket. The values are propagated through the
* network.
*/
struct SocketFieldState {
/* This socket starts a new field. */
bool is_field_source = false;
/* This socket can never become a field, because the node itself does not support it. */
bool is_always_single = false;
/* This socket is currently a single value. It could become a field though. */
bool is_single = true;
/* This socket is required to be a single value. This can be because the node itself only
* supports this socket to be a single value, or because a node afterwards requires this to be a
* single value. */
bool requires_single = false;
};
static Vector<const bNodeSocket *> gather_input_socket_dependencies(
const OutputFieldDependency &field_dependency, const bNode &node)
{
const OutputSocketFieldType type = field_dependency.field_type();
Vector<const bNodeSocket *> input_sockets;
switch (type) {
case OutputSocketFieldType::FieldSource:
case OutputSocketFieldType::None: {
break;
}
case OutputSocketFieldType::DependentField: {
/* This output depends on all inputs. */
input_sockets.extend(node.input_sockets());
break;
}
case OutputSocketFieldType::PartiallyDependent: {
/* This output depends only on a few inputs. */
for (const int i : field_dependency.linked_input_indices()) {
input_sockets.append(&node.input_socket(i));
}
break;
}
}
return input_sockets;
}
/**
* Check what the group output socket depends on. Potentially traverses the node tree
* to figure out if it is always a field or if it depends on any group inputs.
*/
static OutputFieldDependency find_group_output_dependencies(
const bNodeSocket &group_output_socket, const Span<SocketFieldState> field_state_by_socket_id)
{
if (!is_field_socket_type(group_output_socket)) {
return OutputFieldDependency::ForDataSource();
}
/* Use a Set here instead of an array indexed by socket id, because we my only need to look at
* very few sockets. */
Set<const bNodeSocket *> handled_sockets;
Stack<const bNodeSocket *> sockets_to_check;
handled_sockets.add(&group_output_socket);
sockets_to_check.push(&group_output_socket);
/* Keeps track of group input indices that are (indirectly) connected to the output. */
Vector<int> linked_input_indices;
while (!sockets_to_check.is_empty()) {
const bNodeSocket *input_socket = sockets_to_check.pop();
if (!input_socket->is_directly_linked() &&
!field_state_by_socket_id[input_socket->index_in_tree()].is_single) {
/* This socket uses a field as input by default. */
return OutputFieldDependency::ForFieldSource();
}
for (const bNodeSocket *origin_socket : input_socket->directly_linked_sockets()) {
const bNode &origin_node = origin_socket->owner_node();
const SocketFieldState &origin_state =
field_state_by_socket_id[origin_socket->index_in_tree()];
if (origin_state.is_field_source) {
if (origin_node.type == NODE_GROUP_INPUT) {
/* Found a group input that the group output depends on. */
linked_input_indices.append_non_duplicates(origin_socket->index());
}
else {
/* Found a field source that is not the group input. So the output is always a field. */
return OutputFieldDependency::ForFieldSource();
}
}
else if (!origin_state.is_single) {
const FieldInferencingInterface inferencing_interface =
get_node_field_inferencing_interface(origin_node);
const OutputFieldDependency &field_dependency =
inferencing_interface.outputs[origin_socket->index()];
/* Propagate search further to the left. */
for (const bNodeSocket *origin_input_socket :
gather_input_socket_dependencies(field_dependency, origin_node)) {
if (!origin_input_socket->is_available()) {
continue;
}
if (!field_state_by_socket_id[origin_input_socket->index_in_tree()].is_single) {
if (handled_sockets.add(origin_input_socket)) {
sockets_to_check.push(origin_input_socket);
}
}
}
}
}
}
return OutputFieldDependency::ForPartiallyDependentField(std::move(linked_input_indices));
}
static void propagate_data_requirements_from_right_to_left(
const bNodeTree &tree, const MutableSpan<SocketFieldState> field_state_by_socket_id)
{
const Span<const bNode *> toposort_result = tree.toposort_right_to_left();
for (const bNode *node : toposort_result) {
const FieldInferencingInterface inferencing_interface = get_node_field_inferencing_interface(
*node);
for (const bNodeSocket *output_socket : node->output_sockets()) {
SocketFieldState &state = field_state_by_socket_id[output_socket->index_in_tree()];
const OutputFieldDependency &field_dependency =
inferencing_interface.outputs[output_socket->index()];
if (field_dependency.field_type() == OutputSocketFieldType::FieldSource) {
continue;
}
if (field_dependency.field_type() == OutputSocketFieldType::None) {
state.requires_single = true;
state.is_always_single = true;
continue;
}
/* The output is required to be a single value when it is connected to any input that does
* not support fields. */
for (const bNodeSocket *target_socket : output_socket->directly_linked_sockets()) {
if (target_socket->is_available()) {
state.requires_single |=
field_state_by_socket_id[target_socket->index_in_tree()].requires_single;
}
}
if (state.requires_single) {
bool any_input_is_field_implicitly = false;
const Vector<const bNodeSocket *> connected_inputs = gather_input_socket_dependencies(
field_dependency, *node);
for (const bNodeSocket *input_socket : connected_inputs) {
if (!input_socket->is_available()) {
continue;
}
if (inferencing_interface.inputs[input_socket->index()] ==
InputSocketFieldType::Implicit) {
if (!input_socket->is_logically_linked()) {
any_input_is_field_implicitly = true;
break;
}
}
}
if (any_input_is_field_implicitly) {
/* This output isn't a single value actually. */
state.requires_single = false;
}
else {
/* If the output is required to be a single value, the connected inputs in the same node
* must not be fields as well. */
for (const bNodeSocket *input_socket : connected_inputs) {
field_state_by_socket_id[input_socket->index_in_tree()].requires_single = true;
}
}
}
}
/* Some inputs do not require fields independent of what the outputs are connected to. */
for (const bNodeSocket *input_socket : node->input_sockets()) {
SocketFieldState &state = field_state_by_socket_id[input_socket->index_in_tree()];
if (inferencing_interface.inputs[input_socket->index()] == InputSocketFieldType::None) {
state.requires_single = true;
state.is_always_single = true;
}
}
}
}
static void determine_group_input_states(
const bNodeTree &tree,
FieldInferencingInterface &new_inferencing_interface,
const MutableSpan<SocketFieldState> field_state_by_socket_id)
{
{
/* Non-field inputs never support fields. */
int index;
LISTBASE_FOREACH_INDEX (bNodeSocket *, group_input, &tree.inputs, index) {
if (!is_field_socket_type((eNodeSocketDatatype)group_input->type)) {
new_inferencing_interface.inputs[index] = InputSocketFieldType::None;
}
}
}
/* Check if group inputs are required to be single values, because they are (indirectly)
* connected to some socket that does not support fields. */
for (const bNode *node : tree.nodes_by_type("NodeGroupInput")) {
for (const bNodeSocket *output_socket : node->output_sockets().drop_back(1)) {
SocketFieldState &state = field_state_by_socket_id[output_socket->index_in_tree()];
if (state.requires_single) {
new_inferencing_interface.inputs[output_socket->index()] = InputSocketFieldType::None;
}
}
}
/* If an input does not support fields, this should be reflected in all Group Input nodes. */
for (const bNode *node : tree.nodes_by_type("NodeGroupInput")) {
for (const bNodeSocket *output_socket : node->output_sockets().drop_back(1)) {
SocketFieldState &state = field_state_by_socket_id[output_socket->index_in_tree()];
const bool supports_field = new_inferencing_interface.inputs[output_socket->index()] !=
InputSocketFieldType::None;
if (supports_field) {
state.is_single = false;
state.is_field_source = true;
}
else {
state.requires_single = true;
}
}
SocketFieldState &dummy_socket_state =
field_state_by_socket_id[node->output_sockets().last()->index_in_tree()];
dummy_socket_state.requires_single = true;
}
}
static void propagate_field_status_from_left_to_right(
const bNodeTree &tree, const MutableSpan<SocketFieldState> field_state_by_socket_id)
{
const Span<const bNode *> toposort_result = tree.toposort_left_to_right();
for (const bNode *node : toposort_result) {
if (node->type == NODE_GROUP_INPUT) {
continue;
}
const FieldInferencingInterface inferencing_interface = get_node_field_inferencing_interface(
*node);
/* Update field state of input sockets, also taking into account linked origin sockets. */
for (const bNodeSocket *input_socket : node->input_sockets()) {
SocketFieldState &state = field_state_by_socket_id[input_socket->index_in_tree()];
if (state.is_always_single) {
state.is_single = true;
continue;
}
state.is_single = true;
if (!input_socket->is_directly_linked()) {
if (inferencing_interface.inputs[input_socket->index()] ==
InputSocketFieldType::Implicit) {
state.is_single = false;
}
}
else {
for (const bNodeSocket *origin_socket : input_socket->directly_linked_sockets()) {
if (!field_state_by_socket_id[origin_socket->index_in_tree()].is_single) {
state.is_single = false;
break;
}
}
}
}
/* Update field state of output sockets, also taking into account input sockets. */
for (const bNodeSocket *output_socket : node->output_sockets()) {
SocketFieldState &state = field_state_by_socket_id[output_socket->index_in_tree()];
const OutputFieldDependency &field_dependency =
inferencing_interface.outputs[output_socket->index()];
switch (field_dependency.field_type()) {
case OutputSocketFieldType::None: {
state.is_single = true;
break;
}
case OutputSocketFieldType::FieldSource: {
state.is_single = false;
state.is_field_source = true;
break;
}
case OutputSocketFieldType::PartiallyDependent:
case OutputSocketFieldType::DependentField: {
for (const bNodeSocket *input_socket :
gather_input_socket_dependencies(field_dependency, *node)) {
if (!input_socket->is_available()) {
continue;
}
if (!field_state_by_socket_id[input_socket->index_in_tree()].is_single) {
state.is_single = false;
break;
}
}
break;
}
}
}
}
}
static void determine_group_output_states(const bNodeTree &tree,
FieldInferencingInterface &new_inferencing_interface,
const Span<SocketFieldState> field_state_by_socket_id)
{
const bNode *group_output_node = tree.group_output_node();
if (!group_output_node) {
return;
}
for (const bNodeSocket *group_output_socket : group_output_node->input_sockets().drop_back(1)) {
OutputFieldDependency field_dependency = find_group_output_dependencies(
*group_output_socket, field_state_by_socket_id);
new_inferencing_interface.outputs[group_output_socket->index()] = std::move(field_dependency);
}
}
static void update_socket_shapes(const bNodeTree &tree,
const Span<SocketFieldState> field_state_by_socket_id)
{
const eNodeSocketDisplayShape requires_data_shape = SOCK_DISPLAY_SHAPE_CIRCLE;
const eNodeSocketDisplayShape data_but_can_be_field_shape = SOCK_DISPLAY_SHAPE_DIAMOND_DOT;
const eNodeSocketDisplayShape is_field_shape = SOCK_DISPLAY_SHAPE_DIAMOND;
auto get_shape_for_state = [&](const SocketFieldState &state) {
if (state.is_always_single) {
return requires_data_shape;
}
if (!state.is_single) {
return is_field_shape;
}
if (state.requires_single) {
return requires_data_shape;
}
return data_but_can_be_field_shape;
};
for (const bNodeSocket *socket : tree.all_input_sockets()) {
const SocketFieldState &state = field_state_by_socket_id[socket->index_in_tree()];
const_cast<bNodeSocket *>(socket)->display_shape = get_shape_for_state(state);
}
for (const bNodeSocket *socket : tree.all_sockets()) {
const SocketFieldState &state = field_state_by_socket_id[socket->index_in_tree()];
const_cast<bNodeSocket *>(socket)->display_shape = get_shape_for_state(state);
}
}
static bool update_field_inferencing(const bNodeTree &tree)
{
tree.ensure_topology_cache();
/* Create new inferencing interface for this node group. */
std::unique_ptr<FieldInferencingInterface> new_inferencing_interface =
std::make_unique<FieldInferencingInterface>();
new_inferencing_interface->inputs.resize(BLI_listbase_count(&tree.inputs),
InputSocketFieldType::IsSupported);
new_inferencing_interface->outputs.resize(BLI_listbase_count(&tree.outputs),
OutputFieldDependency::ForDataSource());
/* Keep track of the state of all sockets. The index into this array is #SocketRef::id(). */
Array<SocketFieldState> field_state_by_socket_id(tree.all_sockets().size());
propagate_data_requirements_from_right_to_left(tree, field_state_by_socket_id);
determine_group_input_states(tree, *new_inferencing_interface, field_state_by_socket_id);
propagate_field_status_from_left_to_right(tree, field_state_by_socket_id);
determine_group_output_states(tree, *new_inferencing_interface, field_state_by_socket_id);
update_socket_shapes(tree, field_state_by_socket_id);
/* Update the previous group interface. */
const bool group_interface_changed = !tree.runtime->field_inferencing_interface ||
*tree.runtime->field_inferencing_interface !=
*new_inferencing_interface;
tree.runtime->field_inferencing_interface = std::move(new_inferencing_interface);
return group_interface_changed;
}
} // namespace node_field_inferencing
/**
* Common datatype priorities, works for compositor, shader and texture nodes alike
* defines priority of datatype connection based on output type (to):
@ -1007,9 +492,12 @@ class NodeTreeMainUpdater {
#ifdef DEBUG
/* Check the uniqueness of node identifiers. */
Set<int32_t> node_identifiers;
for (bNode *node : ntree.all_nodes()) {
BLI_assert(node->identifier > 0);
node_identifiers.add_new(node->identifier);
const Span<const bNode *> nodes = ntree.all_nodes();
for (const int i : nodes.index_range()) {
const bNode &node = *nodes[i];
BLI_assert(node.identifier > 0);
node_identifiers.add_new(node.identifier);
BLI_assert(node.runtime->index_in_tree == i);
}
#endif
@ -1276,15 +764,14 @@ class NodeTreeMainUpdater {
Array<int> toposort_indices(toposort.size());
for (const int i : toposort.index_range()) {
const bNode &node = *toposort[i];
toposort_indices[node.runtime->index_in_tree] = i;
toposort_indices[node.index()] = i;
}
LISTBASE_FOREACH (bNodeLink *, link, &ntree.links) {
link->flag |= NODE_LINK_VALID;
const bNode &from_node = *link->fromnode;
const bNode &to_node = *link->tonode;
if (toposort_indices[from_node.runtime->index_in_tree] >
toposort_indices[to_node.runtime->index_in_tree]) {
if (toposort_indices[from_node.index()] > toposort_indices[to_node.index()]) {
link->flag &= ~NODE_LINK_VALID;
continue;
}

View File

@ -361,8 +361,10 @@ static void update_pixels(PBVH *pbvh, Mesh *mesh, Image *image, ImageUser *image
return;
}
uv_islands::MeshData mesh_data(
pbvh->looptri, pbvh->totprim, pbvh->totvert, pbvh->mloop, ldata_uv);
uv_islands::MeshData mesh_data({pbvh->looptri, pbvh->totprim},
{pbvh->mloop, mesh->totloop},
pbvh->totvert,
{ldata_uv, mesh->totloop});
uv_islands::UVIslands islands(mesh_data);
uv_islands::UVIslandsMask uv_masks;
@ -385,7 +387,7 @@ static void update_pixels(PBVH *pbvh, Mesh *mesh, Image *image, ImageUser *image
islands.extend_borders(uv_masks);
update_geom_primitives(*pbvh, mesh_data);
UVPrimitiveLookup uv_primitive_lookup(mesh_data.looptri_len, islands);
UVPrimitiveLookup uv_primitive_lookup(mesh_data.looptris.size(), islands);
EncodePixelsUserData user_data;
user_data.pbvh = pbvh;

View File

@ -95,8 +95,8 @@ rctf MeshPrimitive::uv_bounds() const
static void mesh_data_init_vertices(MeshData &mesh_data)
{
mesh_data.vertices.reserve(mesh_data.vert_len);
for (int64_t i = 0; i < mesh_data.vert_len; i++) {
mesh_data.vertices.reserve(mesh_data.verts_num);
for (int64_t i = 0; i < mesh_data.verts_num; i++) {
MeshVertex vert;
vert.v = i;
mesh_data.vertices.append(vert);
@ -105,9 +105,9 @@ static void mesh_data_init_vertices(MeshData &mesh_data)
static void mesh_data_init_primitives(MeshData &mesh_data)
{
mesh_data.primitives.reserve(mesh_data.looptri_len);
for (int64_t i = 0; i < mesh_data.looptri_len; i++) {
const MLoopTri &tri = mesh_data.looptri[i];
mesh_data.primitives.reserve(mesh_data.looptris.size());
for (int64_t i = 0; i < mesh_data.looptris.size(); i++) {
const MLoopTri &tri = mesh_data.looptris[i];
MeshPrimitive primitive;
primitive.index = i;
primitive.poly = tri.poly;
@ -115,7 +115,7 @@ static void mesh_data_init_primitives(MeshData &mesh_data)
for (int j = 0; j < 3; j++) {
MeshUVVert uv_vert;
uv_vert.loop = tri.tri[j];
uv_vert.vertex = &mesh_data.vertices[mesh_data.mloop[uv_vert.loop].v];
uv_vert.vertex = &mesh_data.vertices[mesh_data.loops[uv_vert.loop].v];
uv_vert.uv = mesh_data.mloopuv[uv_vert.loop].uv;
primitive.vertices.append(uv_vert);
}
@ -125,14 +125,14 @@ static void mesh_data_init_primitives(MeshData &mesh_data)
static void mesh_data_init_edges(MeshData &mesh_data)
{
mesh_data.edges.reserve(mesh_data.looptri_len * 2);
EdgeHash *eh = BLI_edgehash_new_ex(__func__, mesh_data.looptri_len * 3);
for (int64_t i = 0; i < mesh_data.looptri_len; i++) {
const MLoopTri &tri = mesh_data.looptri[i];
mesh_data.edges.reserve(mesh_data.looptris.size() * 2);
EdgeHash *eh = BLI_edgehash_new_ex(__func__, mesh_data.looptris.size() * 3);
for (int64_t i = 0; i < mesh_data.looptris.size(); i++) {
const MLoopTri &tri = mesh_data.looptris[i];
MeshPrimitive &primitive = mesh_data.primitives[i];
for (int j = 0; j < 3; j++) {
int v1 = mesh_data.mloop[tri.tri[j]].v;
int v2 = mesh_data.mloop[tri.tri[(j + 1) % 3]].v;
int v1 = mesh_data.loops[tri.tri[j]].v;
int v2 = mesh_data.loops[tri.tri[(j + 1) % 3]].v;
void **edge_index_ptr;
int64_t edge_index;
@ -215,16 +215,11 @@ static void mesh_data_init(MeshData &mesh_data)
mesh_data_init_primitive_uv_island_ids(mesh_data);
}
MeshData::MeshData(const MLoopTri *looptri,
const int64_t looptri_len,
const int64_t vert_len,
const MLoop *mloop,
const MLoopUV *mloopuv)
: looptri(looptri),
looptri_len(looptri_len),
vert_len(vert_len),
mloop(mloop),
mloopuv(mloopuv)
MeshData::MeshData(const Span<MLoopTri> looptris,
const Span<MLoop> loops,
const int verts_num,
const Span<MLoopUV> mloopuv)
: looptris(looptris), verts_num(verts_num), loops(loops), mloopuv(mloopuv)
{
mesh_data_init(*this);
}

View File

@ -92,11 +92,10 @@ struct MeshPrimitive {
*/
struct MeshData {
public:
const MLoopTri *looptri;
const int64_t looptri_len;
const int64_t vert_len;
const MLoop *mloop;
const MLoopUV *mloopuv;
const Span<MLoopTri> looptris;
const int64_t verts_num;
const Span<MLoop> loops;
const Span<MLoopUV> mloopuv;
Vector<MeshPrimitive> primitives;
Vector<MeshEdge> edges;
@ -105,11 +104,10 @@ struct MeshData {
int64_t uv_island_len;
public:
explicit MeshData(const MLoopTri *looptri,
const int64_t looptri_len,
const int64_t vert_len,
const MLoop *mloop,
const MLoopUV *mloopuv);
explicit MeshData(const Span<MLoopTri> looptris,
const Span<MLoop> loops,
const int verts_num,
const Span<MLoopUV> mloopuv);
};
struct UVVertex {

View File

@ -320,12 +320,15 @@ struct VolumeGrid {
openvdb::io::File file(filepath);
/* Isolate file loading since that's potentially multithreaded and we are
/* Isolate file loading since that's potentially multi-threaded and we are
* holding a mutex lock. */
blender::threading::isolate_task([&] {
try {
/* Disable delay loading and file copying, this has poor performance
* on network drivers. */
const bool delay_load = false;
file.setCopyMaxBytes(0);
file.open();
file.open(delay_load);
openvdb::GridBase::Ptr vdb_grid = file.readGrid(name());
entry->grid->setTree(vdb_grid->baseTreePtr());
}
@ -883,8 +886,11 @@ bool BKE_volume_load(const Volume *volume, const Main *bmain)
openvdb::GridPtrVec vdb_grids;
try {
/* Disable delay loading and file copying, this has poor performance
* on network drivers. */
const bool delay_load = false;
file.setCopyMaxBytes(0);
file.open();
file.open(delay_load);
vdb_grids = *(file.readAllGridMetadata());
grids.metadata = file.getMetadata();
}

View File

@ -276,13 +276,22 @@ static void do_version_hue_sat_node(bNodeTree *ntree, bNode *node)
return;
}
/* Make sure new sockets are properly created. */
node_verify_sockets(ntree, node, false);
/* Convert value from old storage to new sockets. */
NodeHueSat *nhs = node->storage;
bNodeSocket *hue = nodeFindSocket(node, SOCK_IN, "Hue"),
*saturation = nodeFindSocket(node, SOCK_IN, "Saturation"),
*value = nodeFindSocket(node, SOCK_IN, "Value");
bNodeSocket *hue = nodeFindSocket(node, SOCK_IN, "Hue");
bNodeSocket *saturation = nodeFindSocket(node, SOCK_IN, "Saturation");
bNodeSocket *value = nodeFindSocket(node, SOCK_IN, "Value");
if (hue == NULL) {
hue = nodeAddStaticSocket(ntree, node, SOCK_IN, SOCK_FLOAT, PROP_FACTOR, "Hue", "Hue");
}
if (saturation == NULL) {
saturation = nodeAddStaticSocket(
ntree, node, SOCK_IN, SOCK_FLOAT, PROP_FACTOR, "Saturation", "Saturation");
}
if (value == NULL) {
value = nodeAddStaticSocket(ntree, node, SOCK_IN, SOCK_FLOAT, PROP_FACTOR, "Value", "Value");
}
((bNodeSocketValueFloat *)hue->default_value)->value = nhs->hue;
((bNodeSocketValueFloat *)saturation->default_value)->value = nhs->sat;
((bNodeSocketValueFloat *)value->default_value)->value = nhs->val;

View File

@ -20,9 +20,9 @@ vec3 compute_chromatic_distortion_scale(float distance_squared)
/* Compute the image coordinates after distortion by the given distortion scale computed by the
* compute_distortion_scale function. Note that the function expects centered normalized UV
* coordinates but outputs non-centered image coordinates. */
vec2 compute_distorted_uv(vec2 uv, float scale)
vec2 compute_distorted_uv(vec2 uv, float uv_scale)
{
return (uv * scale + 0.5) * texture_size(input_tx) - 0.5;
return (uv * uv_scale + 0.5) * texture_size(input_tx) - 0.5;
}
/* Compute the number of integration steps that should be used to approximate the distorted pixel

View File

@ -1,35 +1,35 @@
/* A shorthand for 1D textureSize with a zero LOD. */
int texture_size(sampler1D sampler)
int texture_size(sampler1D sampler_1d)
{
return textureSize(sampler, 0);
return textureSize(sampler_1d, 0);
}
/* A shorthand for 1D texelFetch with zero LOD and bounded access clamped to border. */
vec4 texture_load(sampler1D sampler, int x)
vec4 texture_load(sampler1D sampler_1d, int x)
{
const int texture_bound = texture_size(sampler) - 1;
return texelFetch(sampler, clamp(x, 0, texture_bound), 0);
const int texture_bound = texture_size(sampler_1d) - 1;
return texelFetch(sampler_1d, clamp(x, 0, texture_bound), 0);
}
/* A shorthand for 2D textureSize with a zero LOD. */
ivec2 texture_size(sampler2D sampler)
ivec2 texture_size(sampler2D sampler_2d)
{
return textureSize(sampler, 0);
return textureSize(sampler_2d, 0);
}
/* A shorthand for 2D texelFetch with zero LOD and bounded access clamped to border. */
vec4 texture_load(sampler2D sampler, ivec2 texel)
vec4 texture_load(sampler2D sampler_2d, ivec2 texel)
{
const ivec2 texture_bounds = texture_size(sampler) - ivec2(1);
return texelFetch(sampler, clamp(texel, ivec2(0), texture_bounds), 0);
const ivec2 texture_bounds = texture_size(sampler_2d) - ivec2(1);
return texelFetch(sampler_2d, clamp(texel, ivec2(0), texture_bounds), 0);
}
/* A shorthand for 2D texelFetch with zero LOD and a fallback value for out-of-bound access. */
vec4 texture_load(sampler2D sampler, ivec2 texel, vec4 fallback)
vec4 texture_load(sampler2D sampler_2d, ivec2 texel, vec4 fallback)
{
const ivec2 texture_bounds = texture_size(sampler) - ivec2(1);
const ivec2 texture_bounds = texture_size(sampler_2d) - ivec2(1);
if (any(lessThan(texel, ivec2(0))) || any(greaterThan(texel, texture_bounds))) {
return fallback;
}
return texelFetch(sampler, texel, 0);
return texelFetch(sampler_2d, texel, 0);
}

View File

@ -280,6 +280,7 @@ set(SRC
engines/image/image_buffer_cache.hh
engines/image/image_drawing_mode.hh
engines/image/image_engine.h
engines/image/image_enums.hh
engines/image/image_instance_data.hh
engines/image/image_partial_updater.hh
engines/image/image_private.hh
@ -534,6 +535,7 @@ set(GLSL_SRC
intern/draw_command_shared.hh
intern/draw_common_shader_shared.h
intern/draw_defines.h
intern/draw_pointcloud_private.hh
intern/draw_shader_shared.h
engines/gpencil/shaders/gpencil_frag.glsl

View File

@ -1158,7 +1158,7 @@ World *EEVEE_world_default_get(void)
* Source is provided separately, rather than via create-info as source is manipulated
* by `eevee_shader_material_create_info_amend`.
*
* We also retain the previous behaviour for ensuring library includes occur in the
* We also retain the previous behavior for ensuring library includes occur in the
* correct order. */
static const char *eevee_get_vert_info(int options, char **r_src)
{
@ -1288,7 +1288,7 @@ static char *eevee_get_defines(int options)
* CreateInfo's for EEVEE materials are declared in:
* `eevee/shaders/infos/eevee_legacy_material_info.hh`
*
* This function should only contain defines which alter behaviour, but do not affect shader
* This function should only contain defines which alter behavior, but do not affect shader
* resources. */
if ((options & VAR_WORLD_BACKGROUND) != 0) {

View File

@ -21,7 +21,7 @@ GPU_SHADER_CREATE_INFO(eevee_legacy_irradiance_lib)
GPU_SHADER_CREATE_INFO(eevee_legacy_common_utiltex_lib)
.sampler(2, ImageType::FLOAT_2D_ARRAY, "utilTex");
/* Raytrace lib. */
/* Ray-trace lib. */
GPU_SHADER_CREATE_INFO(eevee_legacy_raytrace_lib)
.additional_info("draw_view")
.additional_info("eevee_legacy_common_lib")
@ -33,7 +33,7 @@ GPU_SHADER_CREATE_INFO(eevee_legacy_ambient_occlusion_lib)
.additional_info("eevee_legacy_raytrace_lib")
.sampler(5, ImageType::FLOAT_2D, "horizonBuffer");
/* Lightprobe lib. */
/* Light-probe lib. */
GPU_SHADER_CREATE_INFO(eevee_legacy_lightprobe_lib)
.additional_info("eevee_legacy_common_lib")
.additional_info("eevee_legacy_common_utiltex_lib")
@ -128,7 +128,7 @@ GPU_SHADER_CREATE_INFO(eevee_legacy_surface_lib_hair)
GPU_SHADER_CREATE_INFO(eevee_legacy_surface_lib_pointcloud)
.define("USE_SURFACE_LIB_POINTCLOUD")
/* Pointcloud still uses the common interface as well. */
/* Point-cloud still uses the common interface as well. */
.additional_info("eevee_legacy_surface_lib_common")
.vertex_out(eevee_legacy_surface_point_cloud_iface);

View File

@ -2,7 +2,7 @@
#pragma once
/* Voluemtric iface. */
/* Volumetric iface. */
GPU_SHADER_INTERFACE_INFO(legacy_volume_vert_geom_iface, "volumetric_vert_iface")
.smooth(Type::VEC4, "vPos");

View File

@ -9,7 +9,9 @@
* dragging larger headers into the createInfo pipeline which would cause problems.
*/
#pragma once
#ifndef GPU_SHADER
# pragma once
#endif
/* Hierarchical Z down-sampling. */
#define HIZ_MIP_COUNT 8

View File

@ -26,7 +26,7 @@ shared uint bg_min_coc;
shared uint bg_max_coc;
shared uint bg_min_intersectable_coc;
const uint dof_tile_large_coc_uint = floatBitsToUint(dof_tile_large_coc);
uint dof_tile_large_coc_uint = floatBitsToUint(dof_tile_large_coc);
void main()
{

View File

@ -13,8 +13,12 @@ vec2 proj(vec4 pos)
return (0.5 * (pos.xy / pos.w) + 0.5) * sizeViewport.xy;
}
#define SET_INTENSITY(A, B, C, min, max) \
(((1.0 - (float(C - B) / float(C - A))) * (max - min)) + min)
float calc_intensity(int segment_start, int segment_current, int segment_end, float min, float max)
{
return ((1.0 - (float(segment_end - segment_current) / float(segment_end - segment_start))) *
(max - min)) +
min;
}
void main()
{
@ -39,10 +43,10 @@ void main()
else {
/* black - before frameCurrent */
if (selected) {
intensity = SET_INTENSITY(frameStart, frame, frameCurrent, 0.25, 0.75);
intensity = calc_intensity(frameStart, frame, frameCurrent, 0.25, 0.75);
}
else {
intensity = SET_INTENSITY(frameStart, frame, frameCurrent, 0.68, 0.92);
intensity = calc_intensity(frameStart, frame, frameCurrent, 0.68, 0.92);
}
interp.color.rgb = mix(colorWire.rgb, blend_base, intensity);
}
@ -55,10 +59,10 @@ void main()
else {
/* blue - after frameCurrent */
if (selected) {
intensity = SET_INTENSITY(frameCurrent, frame, frameEnd, 0.25, 0.75);
intensity = calc_intensity(frameCurrent, frame, frameEnd, 0.25, 0.75);
}
else {
intensity = SET_INTENSITY(frameCurrent, frame, frameEnd, 0.68, 0.92);
intensity = calc_intensity(frameCurrent, frame, frameEnd, 0.68, 0.92);
}
interp.color.rgb = mix(colorBonePose.rgb, blend_base, intensity);

View File

@ -5,5 +5,6 @@ void main()
{
vData.pos = pos;
vData.frontPosition = point_object_to_ndc(pos);
vData.backPosition = point_object_to_ndc(pos + lightDirection * lightDistance);
vec3 back_pos = pos + lightDirection * lightDistance;
vData.backPosition = point_object_to_ndc(back_pos);
}

View File

@ -217,4 +217,7 @@ void workbench_render(void *ved, RenderEngine *engine, RenderLayer *render_layer
void workbench_render_update_passes(RenderEngine *engine, Scene *scene, ViewLayer *view_layer)
{
RE_engine_register_pass(engine, scene, view_layer, RE_PASSNAME_COMBINED, 4, "RGBA", SOCK_RGBA);
if ((view_layer->passflag & SCE_PASS_Z) != 0) {
RE_engine_register_pass(engine, scene, view_layer, RE_PASSNAME_Z, 1, "Z", SOCK_FLOAT);
}
}

View File

@ -569,7 +569,7 @@ SamplerState PointSampler
# define SMAAGather(tex, coord) tex.Gather(LinearSampler, coord, 0)
# endif
#endif
#if defined(SMAA_GLSL_3) || defined(SMAA_GLSL_4) || defined(GPU_METAL)
#if defined(SMAA_GLSL_3) || defined(SMAA_GLSL_4) || defined(GPU_METAL) || defined(GPU_VULKAN)
# define SMAATexture2D(tex) sampler2D tex
# define SMAATexturePass2D(tex) tex
# define SMAASampleLevelZero(tex, coord) textureLod(tex, coord, 0.0)
@ -583,8 +583,28 @@ SamplerState PointSampler
# define lerp(a, b, t) mix(a, b, t)
# define saturate(a) clamp(a, 0.0, 1.0)
# if defined(SMAA_GLSL_4)
# define mad(a, b, c) fma(a, b, c)
# define SMAAGather(tex, coord) textureGather(tex, coord)
# endif
# if defined(SMAA_GLSL_4)
# define mad(a, b, c) fma(a, b, c)
# elif defined(GPU_VULKAN)
/* NOTE(Vulkan) mad macro doesn't work, define each override as work-around. */
vec4 mad(vec4 a, vec4 b, vec4 c)
{
return fma(a, b, c);
}
vec3 mad(vec3 a, vec3 b, vec3 c)
{
return fma(a, b, c);
}
vec2 mad(vec2 a, vec2 b, vec2 c)
{
return fma(a, b, c);
}
float mad(float a, float b, float c)
{
return fma(a, b, c);
}
# else
# define mad(a, b, c) (a * b + c)
# endif

View File

@ -234,12 +234,28 @@ uniform mat4 ModelMatrixInverse;
(ProjectionMatrix * (ViewMatrix * vec4((ModelMatrix * vec4(p, 1.0)).xyz, 1.0)))
#define point_object_to_view(p) ((ViewMatrix * vec4((ModelMatrix * vec4(p, 1.0)).xyz, 1.0)).xyz)
#define point_object_to_world(p) ((ModelMatrix * vec4(p, 1.0)).xyz)
#define point_view_to_ndc(p) (ProjectionMatrix * vec4(p, 1.0))
#define point_view_to_object(p) ((ModelMatrixInverse * (ViewMatrixInverse * vec4(p, 1.0))).xyz)
#define point_view_to_world(p) ((ViewMatrixInverse * vec4(p, 1.0)).xyz)
#define point_world_to_ndc(p) (ProjectionMatrix * (ViewMatrix * vec4(p, 1.0)))
#define point_world_to_object(p) ((ModelMatrixInverse * vec4(p, 1.0)).xyz)
#define point_world_to_view(p) ((ViewMatrix * vec4(p, 1.0)).xyz)
vec4 point_view_to_ndc(vec3 p)
{
return ProjectionMatrix * vec4(p, 1.0);
}
vec3 point_view_to_world(vec3 p)
{
return (ViewMatrixInverse * vec4(p, 1.0)).xyz;
}
vec4 point_world_to_ndc(vec3 p)
{
return ProjectionMatrix * (ViewMatrix * vec4(p, 1.0));
}
vec3 point_world_to_view(vec3 p)
{
return (ViewMatrix * vec4(p, 1.0)).xyz;
}
/* Due to some shader compiler bug, we somewhat need to access gl_VertexID
* to make vertex shaders work. even if it's actually dead code. */

View File

@ -344,9 +344,16 @@ void animviz_motionpath_compute_range(Object *ob, Scene *scene)
{
bAnimVizSettings *avs = ob->mode == OB_MODE_POSE ? &ob->pose->avs : &ob->avs;
if (avs->path_range == MOTIONPATH_RANGE_MANUAL) {
/* Don't touch manually-determined ranges. */
return;
}
const bool has_action = ob->adt && ob->adt->action;
if (avs->path_range == MOTIONPATH_RANGE_SCENE || !has_action ||
BLI_listbase_is_empty(&ob->adt->action->curves)) {
/* Default to the scene (preview) range if there is no animation data to
* find selected keys in. */
avs->path_sf = PSFRA;
avs->path_ef = PEFRA;
return;
@ -367,6 +374,7 @@ void animviz_motionpath_compute_range(Object *ob, Scene *scene)
case MOTIONPATH_RANGE_KEYS_ALL:
ED_keylist_all_keys_frame_range(keylist, &frame_range);
break;
case MOTIONPATH_RANGE_MANUAL:
case MOTIONPATH_RANGE_SCENE:
BLI_assert_msg(false, "This should not happen, function should have exited earlier.");
};

View File

@ -2,6 +2,7 @@
#pragma once
#include "BLI_vector_set.hh"
#include "ED_node.h"
struct SpaceNode;
@ -11,6 +12,8 @@ struct bNodeTree;
namespace blender::ed::space_node {
VectorSet<bNode *> get_selected_nodes(bNodeTree &node_tree);
void node_insert_on_link_flags_set(SpaceNode &snode, const ARegion &region);
/**

View File

@ -995,7 +995,7 @@ static void ui_apply_but_funcs_after(bContext *C)
BLI_listbase_clear(&UIAfterFuncs);
LISTBASE_FOREACH_MUTABLE (uiAfterFunc *, afterf, &funcs) {
uiAfterFunc after = *afterf; /* copy to avoid memleak on exit() */
uiAfterFunc after = *afterf; /* Copy to avoid memory leak on exit(). */
BLI_freelinkN(&funcs, afterf);
if (after.context) {

View File

@ -2312,7 +2312,7 @@ int UI_icon_from_rnaptr(const bContext *C, PointerRNA *ptr, int rnaicon, const b
return rnaicon;
}
/* try ID, material, texture or dynapaint slot */
/* Try ID, material, texture or dynamic-paint slot. */
if (RNA_struct_is_ID(ptr->type)) {
id = ptr->owner_id;
}

View File

@ -1319,6 +1319,7 @@ static bool sculpt_brush_use_topology_rake(const SculptSession *ss, const Brush
*/
static int sculpt_brush_needs_normal(const SculptSession *ss, Sculpt *sd, const Brush *brush)
{
const MTex *mask_tex = BKE_brush_mask_texture_get(brush, OB_MODE_SCULPT);
return ((SCULPT_TOOL_HAS_NORMAL_WEIGHT(brush->sculpt_tool) &&
(ss->cache->normal_weight > 0.0f)) ||
SCULPT_automasking_needs_normal(ss, sd, brush) ||
@ -1334,7 +1335,7 @@ static int sculpt_brush_needs_normal(const SculptSession *ss, Sculpt *sd, const
SCULPT_TOOL_ELASTIC_DEFORM,
SCULPT_TOOL_THUMB) ||
(brush->mtex.brush_map_mode == MTEX_MAP_MODE_AREA)) ||
(mask_tex->brush_map_mode == MTEX_MAP_MODE_AREA)) ||
sculpt_brush_use_topology_rake(ss, brush);
}
@ -2861,7 +2862,10 @@ static void calc_local_y(ViewContext *vc, const float center[3], float y[3])
mul_m4_v3(ob->world_to_object, y);
}
static void calc_brush_local_mat(const Brush *brush, Object *ob, float local_mat[4][4])
static void calc_brush_local_mat(const Brush *brush,
const MTex *mtex,
Object *ob,
float local_mat[4][4])
{
const StrokeCache *cache = ob->sculpt->cache;
float tmat[4][4];
@ -2885,7 +2889,7 @@ static void calc_brush_local_mat(const Brush *brush, Object *ob, float local_mat
/* Calculate the X axis of the local matrix. */
cross_v3_v3v3(v, up, cache->sculpt_normal);
/* Apply rotation (user angle, rake, etc.) to X axis. */
angle = brush->mtex.rot - cache->special_rotation;
angle = mtex->rot - cache->special_rotation;
rotate_v3_v3v3fl(mat[0], v, cache->sculpt_normal, angle);
/* Get other axes. */
@ -2932,7 +2936,9 @@ static void update_brush_local_mat(Sculpt *sd, Object *ob)
StrokeCache *cache = ob->sculpt->cache;
if (cache->mirror_symmetry_pass == 0 && cache->radial_symmetry_pass == 0) {
calc_brush_local_mat(BKE_paint_brush(&sd->paint), ob, cache->brush_local_mat);
const Brush *brush = BKE_paint_brush(&sd->paint);
const MTex *mask_tex = BKE_brush_mask_texture_get(brush, OB_MODE_SCULPT);
calc_brush_local_mat(brush, mask_tex, ob, cache->brush_local_mat);
}
}
@ -3512,7 +3518,8 @@ static void do_brush_action(Sculpt *sd,
update_sculpt_normal(sd, ob, nodes, totnode);
}
if (brush->mtex.brush_map_mode == MTEX_MAP_MODE_AREA) {
const MTex *mask_tex = BKE_brush_mask_texture_get(brush, static_cast<eObjectMode>(ob->mode));
if (mask_tex->brush_map_mode == MTEX_MAP_MODE_AREA) {
update_brush_local_mat(sd, ob);
}
@ -4047,7 +4054,7 @@ static void sculpt_fix_noise_tear(Sculpt *sd, Object *ob)
{
SculptSession *ss = ob->sculpt;
Brush *brush = BKE_paint_brush(&sd->paint);
MTex *mtex = &brush->mtex;
const MTex *mtex = BKE_brush_mask_texture_get(brush, OB_MODE_SCULPT);
if (ss->multires.active && mtex->tex && mtex->tex->type == TEX_NOISE) {
multires_stitch_grids(ob);
@ -5196,12 +5203,12 @@ bool SCULPT_stroke_get_location(bContext *C,
static void sculpt_brush_init_tex(Sculpt *sd, SculptSession *ss)
{
Brush *brush = BKE_paint_brush(&sd->paint);
MTex *mtex = &brush->mtex;
const MTex *mask_tex = BKE_brush_mask_texture_get(brush, OB_MODE_SCULPT);
/* Init mtex nodes. */
if (mtex->tex && mtex->tex->nodetree) {
if (mask_tex->tex && mask_tex->tex->nodetree) {
/* Has internal flag to detect it only does it once. */
ntreeTexBeginExecTree(mtex->tex->nodetree);
ntreeTexBeginExecTree(mask_tex->tex->nodetree);
}
if (ss->tex_pool == nullptr) {
@ -5630,10 +5637,10 @@ static void sculpt_stroke_update_step(bContext *C,
static void sculpt_brush_exit_tex(Sculpt *sd)
{
Brush *brush = BKE_paint_brush(&sd->paint);
MTex *mtex = &brush->mtex;
const MTex *mask_tex = BKE_brush_mask_texture_get(brush, OB_MODE_SCULPT);
if (mtex->tex && mtex->tex->nodetree) {
ntreeTexEndExecTree(mtex->tex->nodetree->runtime->execdata);
if (mask_tex->tex && mask_tex->tex->nodetree) {
ntreeTexEndExecTree(mask_tex->tex->nodetree->runtime->execdata);
}
}

View File

@ -193,7 +193,8 @@ static float sculpt_expand_max_vertex_falloff_get(ExpandCache *expand_cache)
return expand_cache->max_vert_falloff;
}
if (!expand_cache->brush->mtex.tex) {
const MTex *mask_tex = BKE_brush_mask_texture_get(expand_cache->brush, OB_MODE_SCULPT);
if (!mask_tex->tex) {
return expand_cache->max_vert_falloff;
}
@ -1882,13 +1883,14 @@ static int sculpt_expand_modal(bContext *C, wmOperator *op, const wmEvent *event
}
case SCULPT_EXPAND_MODAL_TEXTURE_DISTORTION_INCREASE: {
if (expand_cache->texture_distortion_strength == 0.0f) {
if (expand_cache->brush->mtex.tex == NULL) {
const MTex *mask_tex = BKE_brush_mask_texture_get(expand_cache->brush, OB_MODE_SCULPT);
if (mask_tex->tex == NULL) {
BKE_report(op->reports,
RPT_WARNING,
"Active brush does not contain any texture to distort the expand boundary");
break;
}
if (expand_cache->brush->mtex.brush_map_mode != MTEX_MAP_MODE_3D) {
if (mask_tex->brush_map_mode != MTEX_MAP_MODE_3D) {
BKE_report(op->reports,
RPT_WARNING,
"Texture mapping not set to 3D, results may be unpredictable");
@ -2052,7 +2054,6 @@ static void sculpt_expand_cache_initial_config_set(bContext *C,
IMB_colormanagement_srgb_to_scene_linear_v3(expand_cache->fill_color, expand_cache->fill_color);
expand_cache->scene = CTX_data_scene(C);
expand_cache->mtex = &expand_cache->brush->mtex;
expand_cache->texture_distortion_strength = 0.0f;
expand_cache->blend_mode = expand_cache->brush->blend;
}

View File

@ -752,7 +752,7 @@ typedef struct ExpandCache {
/* Texture distortion data. */
Brush *brush;
struct Scene *scene;
struct MTex *mtex;
// struct MTex *mtex;
/* Controls how much texture distortion will be applied to the current falloff */
float texture_distortion_strength;

View File

@ -282,6 +282,7 @@ void node_sort(bNodeTree &ntree)
for (const int i : sort_nodes.index_range()) {
BLI_addtail(&ntree.nodes, sort_nodes[i]);
ntree.runtime->nodes_by_id.add_new(sort_nodes[i]);
sort_nodes[i]->runtime->index_in_tree = i;
}
}

View File

@ -33,6 +33,7 @@
#include "DEG_depsgraph_build.h"
#include "ED_node.h" /* own include */
#include "ED_node.hh"
#include "ED_render.h"
#include "ED_screen.h"

View File

@ -183,7 +183,6 @@ void node_keymap(wmKeyConfig *keyconf);
rctf node_frame_rect_inside(const bNode &node);
bool node_or_socket_isect_event(const bContext &C, const wmEvent &event);
VectorSet<bNode *> get_selected_nodes(bNodeTree &node_tree);
void node_deselect_all(SpaceNode &snode);
void node_socket_select(bNode *node, bNodeSocket &sock);
void node_socket_deselect(bNode *node, bNodeSocket &sock, bool deselect_node);

View File

@ -1641,31 +1641,31 @@ static void node_join_attach_recursive(bNodeTree &ntree,
bNode *frame,
const VectorSet<bNode *> &selected_nodes)
{
join_states[node->runtime->index_in_tree].done = true;
join_states[node->index()].done = true;
if (node == frame) {
join_states[node->runtime->index_in_tree].descendent = true;
join_states[node->index()].descendent = true;
}
else if (node->parent) {
/* call recursively */
if (!join_states[node->parent->runtime->index_in_tree].done) {
if (!join_states[node->parent->index()].done) {
node_join_attach_recursive(ntree, join_states, node->parent, frame, selected_nodes);
}
/* in any case: if the parent is a descendant, so is the child */
if (join_states[node->parent->runtime->index_in_tree].descendent) {
join_states[node->runtime->index_in_tree].descendent = true;
if (join_states[node->parent->index()].descendent) {
join_states[node->index()].descendent = true;
}
else if (selected_nodes.contains(node)) {
/* if parent is not an descendant of the frame, reattach the node */
nodeDetachNode(&ntree, node);
nodeAttachNode(&ntree, node, frame);
join_states[node->runtime->index_in_tree].descendent = true;
join_states[node->index()].descendent = true;
}
}
else if (selected_nodes.contains(node)) {
nodeAttachNode(&ntree, node, frame);
join_states[node->runtime->index_in_tree].descendent = true;
join_states[node->index()].descendent = true;
}
}
@ -1685,7 +1685,7 @@ static int node_join_exec(bContext *C, wmOperator * /*op*/)
Array<NodeJoinState> join_states(ntree.all_nodes().size(), NodeJoinState{false, false});
for (bNode *node : ntree.all_nodes()) {
if (!join_states[node->runtime->index_in_tree].done) {
if (!join_states[node->index()].done) {
node_join_attach_recursive(ntree, join_states, node, frame_node, selected_nodes);
}
}
@ -1818,26 +1818,26 @@ static void node_detach_recursive(bNodeTree &ntree,
MutableSpan<NodeDetachstate> detach_states,
bNode *node)
{
detach_states[node->runtime->index_in_tree].done = true;
detach_states[node->index()].done = true;
if (node->parent) {
/* call recursively */
if (!detach_states[node->parent->runtime->index_in_tree].done) {
if (!detach_states[node->parent->index()].done) {
node_detach_recursive(ntree, detach_states, node->parent);
}
/* in any case: if the parent is a descendant, so is the child */
if (detach_states[node->parent->runtime->index_in_tree].descendent) {
detach_states[node->runtime->index_in_tree].descendent = true;
if (detach_states[node->parent->index()].descendent) {
detach_states[node->index()].descendent = true;
}
else if (node->flag & NODE_SELECT) {
/* if parent is not a descendant of a selected node, detach */
nodeDetachNode(&ntree, node);
detach_states[node->runtime->index_in_tree].descendent = true;
detach_states[node->index()].descendent = true;
}
}
else if (node->flag & NODE_SELECT) {
detach_states[node->runtime->index_in_tree].descendent = true;
detach_states[node->index()].descendent = true;
}
}
@ -1853,7 +1853,7 @@ static int node_detach_exec(bContext *C, wmOperator * /*op*/)
* relative order is preserved here!
*/
for (bNode *node : ntree.all_nodes()) {
if (!detach_states[node->runtime->index_in_tree].done) {
if (!detach_states[node->index()].done) {
node_detach_recursive(ntree, detach_states, node);
}
}

View File

@ -26,7 +26,8 @@
#include "BKE_node_tree_update.h"
#include "BKE_workspace.h"
#include "ED_node.h" /* own include */
#include "ED_node.h" /* own include */
#include "ED_node.hh" /* own include */
#include "ED_screen.h"
#include "ED_select_utils.h"
#include "ED_view3d.h"
@ -1267,7 +1268,7 @@ static int node_select_same_type_step_exec(bContext *C, wmOperator *op)
}
}
bNode *new_active_node = node_tree.all_nodes()[toposort[new_index]->runtime->index_in_tree];
bNode *new_active_node = node_tree.all_nodes()[toposort[new_index]->index()];
if (new_active_node == &active_node) {
return OPERATOR_CANCELLED;
}

View File

@ -361,8 +361,7 @@ static Vector<NodeLinkItem> ui_node_link_items(NodeLinkArg *arg,
using namespace blender::nodes;
r_node_decl.emplace(NodeDeclaration());
NodeDeclarationBuilder node_decl_builder{*r_node_decl};
arg->node_type->declare(node_decl_builder);
blender::nodes::build_node_declaration(*arg->node_type, *r_node_decl);
Span<SocketDeclarationPtr> socket_decls = (in_out == SOCK_IN) ? r_node_decl->inputs() :
r_node_decl->outputs();
int index = 0;

View File

@ -524,7 +524,7 @@ static bool v3d_cursor_is_snap_invert(SnapCursorDataIntern *data_intern, const w
const int snap_on = data_intern->snap_on;
wmKeyMap *keymap = WM_keymap_active(wm, data_intern->keymap);
for (wmKeyMapItem *kmi = keymap->items.first; kmi; kmi = kmi->next) {
LISTBASE_FOREACH (const wmKeyMapItem *, kmi, &keymap->items) {
if (kmi->flag & KMI_INACTIVE) {
continue;
}

View File

@ -1336,7 +1336,7 @@ bool calculateTransformCenter(bContext *C, int centerMode, float cent3d[3], floa
t->state = TRANS_RUNNING;
/* avoid calculating PET */
/* Avoid calculating proportional editing. */
t->options = CTX_NO_PET;
t->mode = TFM_DUMMY;
@ -1859,9 +1859,7 @@ bool initTransform(bContext *C, TransInfo *t, wmOperator *op, const wmEvent *eve
* lead to keymap conflicts for other modes (see T31584)
*/
if (ELEM(mode, TFM_TRANSLATION, TFM_ROTATION, TFM_RESIZE)) {
wmKeyMapItem *kmi;
for (kmi = t->keymap->items.first; kmi; kmi = kmi->next) {
LISTBASE_FOREACH (const wmKeyMapItem *, kmi, &t->keymap->items) {
if (kmi->flag & KMI_INACTIVE) {
continue;
}

View File

@ -23,10 +23,6 @@
extern "C" {
#endif
/* use node center for transform instead of upper-left corner.
* disabled since it makes absolute snapping not work so nicely
*/
// #define USE_NODE_CENTER
/* -------------------------------------------------------------------- */
/** \name Types/

View File

@ -709,7 +709,7 @@ static int countAndCleanTransDataContainer(TransInfo *t)
static void init_proportional_edit(TransInfo *t)
{
/* NOTE: PET is not usable in pose mode yet T32444. */
/* NOTE: Proportional editing is not usable in pose mode yet T32444. */
if (!ELEM(t->data_type,
&TransConvertType_Action,
&TransConvertType_Curve,
@ -726,7 +726,7 @@ static void init_proportional_edit(TransInfo *t)
&TransConvertType_Node,
&TransConvertType_Object,
&TransConvertType_Particle)) {
/* Disable PET */
/* Disable proportional editing */
t->options |= CTX_NO_PET;
t->flag &= ~T_PROP_EDIT_ALL;
return;

View File

@ -131,7 +131,8 @@ static void createTransCurveVerts(bContext *UNUSED(C), TransInfo *t)
}
}
/* Support other objects using PET to adjust these, unless connected is enabled. */
/* Support other objects using proportional editing to adjust these, unless connected is
* enabled. */
if (((is_prop_edit && !is_prop_connected) ? count : countsel) == 0) {
tc->data_len = 0;
continue;

View File

@ -52,7 +52,8 @@ static void createTransLatticeVerts(bContext *UNUSED(C), TransInfo *t)
bp++;
}
/* Support other objects using PET to adjust these, unless connected is enabled. */
/* Support other objects using proportional editing to adjust these, unless connected is
* enabled. */
if (((is_prop_edit && !is_prop_connected) ? count : countsel) == 0) {
tc->data_len = 0;
continue;

View File

@ -44,7 +44,8 @@ static void createTransMBallVerts(bContext *UNUSED(C), TransInfo *t)
}
}
/* Support other objects using PET to adjust these, unless connected is enabled. */
/* Support other objects using proportional editing to adjust these, unless connected is
* enabled. */
if (((is_prop_edit && !is_prop_connected) ? count : countsel) == 0) {
tc->data_len = 0;
continue;

View File

@ -843,7 +843,8 @@ void transform_convert_mesh_islands_calc(struct BMEditMesh *em,
MEM_freeN(group_index);
}
/* for PET we need islands of 1 so connected vertices can use it with V3D_AROUND_LOCAL_ORIGINS */
/* for proportional editing we need islands of 1 so connected vertices can use it with
* V3D_AROUND_LOCAL_ORIGINS */
if (calc_single_islands) {
BMIter viter;
BMVert *v;
@ -1484,7 +1485,8 @@ static void createTransEditVerts(bContext *UNUSED(C), TransInfo *t)
* transform data is created by selected vertices.
*/
/* Support other objects using PET to adjust these, unless connected is enabled. */
/* Support other objects using proportional editing to adjust these, unless connected is
* enabled. */
if ((!prop_mode || (prop_mode & T_PROP_CONNECTED)) && (bm->totvertsel == 0)) {
continue;
}

View File

@ -94,7 +94,8 @@ static void createTransMeshSkin(bContext *UNUSED(C), TransInfo *t)
continue;
}
/* Support other objects using PET to adjust these, unless connected is enabled. */
/* Support other objects using proportional editing to adjust these, unless connected is
* enabled. */
if ((!prop_mode || (prop_mode & T_PROP_CONNECTED)) && (bm->totvertsel == 0)) {
continue;
}

View File

@ -314,7 +314,8 @@ static void createTransUVs(bContext *C, TransInfo *t)
float *prop_dists = NULL;
/* Support other objects using PET to adjust these, unless connected is enabled. */
/* Support other objects using proportional editing to adjust these, unless connected is
* enabled. */
if (((is_prop_edit && !is_prop_connected) ? count : countsel) == 0) {
goto finally;
}

View File

@ -77,7 +77,8 @@ static void createTransMeshVertCData(bContext *UNUSED(C), TransInfo *t)
struct TransMirrorData mirror_data = {NULL};
struct TransMeshDataCrazySpace crazyspace_data = {NULL};
/* Support other objects using PET to adjust these, unless connected is enabled. */
/* Support other objects using proportional editing to adjust these, unless connected is
* enabled. */
if ((!prop_mode || (prop_mode & T_PROP_CONNECTED)) && (bm->totvertsel == 0)) {
continue;
}

View File

@ -9,8 +9,8 @@
#include "MEM_guardedalloc.h"
#include "BLI_listbase.h"
#include "BLI_math.h"
#include "BLI_math_vector.h"
#include "BLI_math_vector.hh"
#include "BLI_rect.h"
#include "BKE_context.h"
@ -39,58 +39,55 @@ struct TransCustomDataNode {
/** \name Node Transform Creation
* \{ */
/* transcribe given node into TransData2D for Transforming */
static void NodeToTransData(TransData *td, TransData2D *td2d, bNode *node, const float dpi_fac)
static void create_transform_data_for_node(TransData &td,
TransData2D &td2d,
bNode &node,
const float dpi_fac)
{
float locx, locy;
/* account for parents (nested nodes) */
if (node->parent) {
nodeToView(node->parent, node->locx, node->locy, &locx, &locy);
if (node.parent) {
nodeToView(node.parent, node.locx, node.locy, &locx, &locy);
}
else {
locx = node->locx;
locy = node->locy;
locx = node.locx;
locy = node.locy;
}
/* use top-left corner as the transform origin for nodes */
/* Weirdo - but the node system is a mix of free 2d elements and DPI sensitive UI. */
#ifdef USE_NODE_CENTER
td2d->loc[0] = (locx * dpi_fac) + (BLI_rctf_size_x(&node->runtime->totr) * +0.5f);
td2d->loc[1] = (locy * dpi_fac) + (BLI_rctf_size_y(&node->runtime->totr) * -0.5f);
#else
td2d->loc[0] = locx * dpi_fac;
td2d->loc[1] = locy * dpi_fac;
#endif
td2d->loc[2] = 0.0f;
td2d->loc2d = td2d->loc; /* current location */
td2d.loc[0] = locx * dpi_fac;
td2d.loc[1] = locy * dpi_fac;
td2d.loc[2] = 0.0f;
td2d.loc2d = td2d.loc; /* current location */
td->loc = td2d->loc;
copy_v3_v3(td->iloc, td->loc);
td.loc = td2d.loc;
copy_v3_v3(td.iloc, td.loc);
/* use node center instead of origin (top-left corner) */
td->center[0] = td2d->loc[0];
td->center[1] = td2d->loc[1];
td->center[2] = 0.0f;
td.center[0] = td2d.loc[0];
td.center[1] = td2d.loc[1];
td.center[2] = 0.0f;
memset(td->axismtx, 0, sizeof(td->axismtx));
td->axismtx[2][2] = 1.0f;
memset(td.axismtx, 0, sizeof(td.axismtx));
td.axismtx[2][2] = 1.0f;
td->ext = nullptr;
td->val = nullptr;
td.ext = nullptr;
td.val = nullptr;
td->flag = TD_SELECTED;
td->dist = 0.0f;
td.flag = TD_SELECTED;
td.dist = 0.0f;
unit_m3(td->mtx);
unit_m3(td->smtx);
unit_m3(td.mtx);
unit_m3(td.smtx);
td->extra = node;
td.extra = &node;
}
static bool is_node_parent_select(bNode *node)
static bool is_node_parent_select(const bNode *node)
{
while ((node = node->parent)) {
if (node->flag & NODE_TRANSFORM) {
if (node->flag & NODE_SELECT) {
return true;
}
}
@ -99,8 +96,13 @@ static bool is_node_parent_select(bNode *node)
static void createTransNodeData(bContext * /*C*/, TransInfo *t)
{
const float dpi_fac = UI_DPI_FAC;
using namespace blender;
using namespace blender::ed;
SpaceNode *snode = static_cast<SpaceNode *>(t->area->spacedata.first);
bNodeTree *node_tree = snode->edittree;
if (!node_tree) {
return;
}
/* Custom data to enable edge panning during the node transform */
TransCustomDataNode *customdata = MEM_cnew<TransCustomDataNode>(__func__);
@ -119,37 +121,21 @@ static void createTransNodeData(bContext * /*C*/, TransInfo *t)
TransDataContainer *tc = TRANS_DATA_CONTAINER_FIRST_SINGLE(t);
tc->data_len = 0;
if (!snode->edittree) {
return;
}
/* Nodes don't support PET and probably never will. */
/* Nodes don't support proportional editing and probably never will. */
t->flag = t->flag & ~T_PROP_EDIT_ALL;
/* set transform flags on nodes */
for (bNode *node : snode->edittree->all_nodes()) {
if (node->flag & NODE_SELECT && !is_node_parent_select(node)) {
node->flag |= NODE_TRANSFORM;
tc->data_len++;
}
else {
node->flag &= ~NODE_TRANSFORM;
}
}
if (tc->data_len == 0) {
VectorSet<bNode *> nodes = space_node::get_selected_nodes(*node_tree);
nodes.remove_if([&](bNode *node) { return is_node_parent_select(node); });
if (nodes.is_empty()) {
return;
}
TransData *td = tc->data = MEM_cnew_array<TransData>(tc->data_len, __func__);
TransData2D *td2d = tc->data_2d = MEM_cnew_array<TransData2D>(tc->data_len, __func__);
tc->data_len = nodes.size();
tc->data = MEM_cnew_array<TransData>(tc->data_len, __func__);
tc->data_2d = MEM_cnew_array<TransData2D>(tc->data_len, __func__);
for (bNode *node : snode->edittree->all_nodes()) {
if (node->flag & NODE_TRANSFORM) {
NodeToTransData(td++, td2d++, node, dpi_fac);
}
for (const int i : nodes.index_range()) {
create_transform_data_for_node(tc->data[i], tc->data_2d[i], *nodes[i], UI_DPI_FAC);
}
}
@ -161,43 +147,41 @@ static void createTransNodeData(bContext * /*C*/, TransInfo *t)
static void node_snap_grid_apply(TransInfo *t)
{
int i;
using namespace blender;
if (!(activeSnap(t) && (t->tsnap.mode & (SCE_SNAP_MODE_INCREMENT | SCE_SNAP_MODE_GRID)))) {
return;
}
float grid_size[2];
copy_v2_v2(grid_size, t->snap_spatial);
float2 grid_size = t->snap_spatial;
if (t->modifiers & MOD_PRECISION) {
mul_v2_fl(grid_size, t->snap_spatial_precision);
grid_size *= t->snap_spatial_precision;
}
/* Early exit on unusable grid size. */
if (is_zero_v2(grid_size)) {
if (math::is_zero(grid_size)) {
return;
}
FOREACH_TRANS_DATA_CONTAINER (t, tc) {
TransData *td;
for (i = 0, td = tc->data; i < tc->data_len; i++, td++) {
for (const int i : IndexRange(tc->data_len)) {
TransData &td = tc->data[i];
float iloc[2], loc[2], tvec[2];
if (td->flag & TD_SKIP) {
if (td.flag & TD_SKIP) {
continue;
}
if ((t->flag & T_PROP_EDIT) && (td->factor == 0.0f)) {
if ((t->flag & T_PROP_EDIT) && (td.factor == 0.0f)) {
continue;
}
copy_v2_v2(iloc, td->loc);
copy_v2_v2(iloc, td.loc);
loc[0] = roundf(iloc[0] / grid_size[0]) * grid_size[0];
loc[1] = roundf(iloc[1] / grid_size[1]) * grid_size[1];
sub_v2_v2v2(tvec, loc, iloc);
add_v2_v2(td->loc, tvec);
add_v2_v2(td.loc, tvec);
}
}
}
@ -246,11 +230,6 @@ static void flushTransNodes(TransInfo *t)
float loc[2];
add_v2_v2v2(loc, td2d->loc, offset);
#ifdef USE_NODE_CENTER
loc[0] -= 0.5f * BLI_rctf_size_x(&node->runtime->totr);
loc[1] += 0.5f * BLI_rctf_size_y(&node->runtime->totr);
#endif
/* Weirdo - but the node system is a mix of free 2d elements and DPI sensitive UI. */
loc[0] /= dpi_fac;
loc[1] /= dpi_fac;

View File

@ -578,7 +578,8 @@ void initTransInfo(bContext *C, TransInfo *t, wmOperator *op, const wmEvent *eve
t->flag |= T_NO_MIRROR;
}
/* setting PET flag only if property exist in operator. Otherwise, assume it's not supported */
/* Setting proportional editing flag only if property exist in operator. Otherwise, assume it's
* not supported. */
if (op && (prop = RNA_struct_find_property(op->ptr, "use_proportional_edit"))) {
if (RNA_property_is_set(op->ptr, prop)) {
if (RNA_property_boolean_get(op->ptr, prop)) {
@ -669,7 +670,7 @@ void initTransInfo(bContext *C, TransInfo *t, wmOperator *op, const wmEvent *eve
}
}
/* Mirror is not supported with PET, turn it off. */
/* Mirror is not supported with proportional editing, turn it off. */
#if 0
if (t->flag & T_PROP_EDIT) {
t->flag &= ~T_MIRROR;

View File

@ -71,7 +71,7 @@ static void applyCurveShrinkFatten(TransInfo *t, const int UNUSED(mval[2]))
*td->val = td->ival * ratio;
}
/* apply PET */
/* Apply proportional editing. */
*td->val = interpf(*td->val, td->ival, td->factor);
CLAMP_MIN(*td->val, 0.0f);
}

View File

@ -73,7 +73,7 @@ static void applyGPOpacity(TransInfo *t, const int UNUSED(mval[2]))
if (td->val) {
*td->val = td->ival * ratio;
/* apply PET */
/* Apply proportional editing. */
*td->val = interpf(*td->val, td->ival, td->factor);
CLAMP(*td->val, 0.0f, 1.0f);
}

View File

@ -73,7 +73,7 @@ static void applyGPShrinkFatten(TransInfo *t, const int UNUSED(mval[2]))
if (td->val) {
*td->val = td->ival * ratio;
/* apply PET */
/* Apply proportional editing. */
*td->val = interpf(*td->val, td->ival, td->factor);
if (*td->val <= 0.0f) {
*td->val = 0.001f;

View File

@ -89,7 +89,7 @@ static void applyMaskShrinkFatten(TransInfo *t, const int UNUSED(mval[2]))
*td->val = td->ival * ratio;
}
/* apply PET */
/* Apply proportional editing. */
*td->val = interpf(*td->val, td->ival, td->factor);
if (*td->val <= 0.0f) {
*td->val = 0.001f;

View File

@ -1190,36 +1190,19 @@ static void TargetSnapOffset(TransInfo *t, TransData *td)
if (t->spacetype == SPACE_NODE && td != nullptr) {
bNode *node = static_cast<bNode *>(td->extra);
char border = t->tsnap.snapNodeBorder;
float width = BLI_rctf_size_x(&node->runtime->totr);
float height = BLI_rctf_size_y(&node->runtime->totr);
#ifdef USE_NODE_CENTER
if (border & NODE_LEFT) {
t->tsnap.snapTarget[0] -= 0.5f * width;
}
if (border & NODE_RIGHT) {
t->tsnap.snapTarget[0] += 0.5f * width;
}
if (border & NODE_BOTTOM) {
t->tsnap.snapTarget[1] -= 0.5f * height;
}
if (border & NODE_TOP) {
t->tsnap.snapTarget[1] += 0.5f * height;
}
#else
if (border & NODE_LEFT) {
t->tsnap.snapTarget[0] -= 0.0f;
}
if (border & NODE_RIGHT) {
t->tsnap.snapTarget[0] += width;
t->tsnap.snapTarget[0] += BLI_rctf_size_x(&node->runtime->totr);
}
if (border & NODE_BOTTOM) {
t->tsnap.snapTarget[1] -= height;
t->tsnap.snapTarget[1] -= BLI_rctf_size_y(&node->runtime->totr);
}
if (border & NODE_TOP) {
t->tsnap.snapTarget[1] += 0.0f;
}
#endif
}
}

View File

@ -63,6 +63,7 @@ set(SRC
../include/ED_mball.h
../include/ED_mesh.h
../include/ED_node.h
../include/ED_node.hh
../include/ED_numinput.h
../include/ED_object.h
../include/ED_outliner.h

Some files were not shown because too many files have changed in this diff Show More