Merge branch 'master' into refactor-mesh-position-generic

This commit is contained in:
Hans Goudey 2023-01-04 20:19:36 -05:00
commit 3100fc0f2f
122 changed files with 2377 additions and 1753 deletions

View File

@ -501,12 +501,14 @@ endif()
if(NOT APPLE)
option(WITH_CYCLES_DEVICE_ONEAPI "Enable Cycles oneAPI compute support" OFF)
option(WITH_CYCLES_ONEAPI_BINARIES "Enable Ahead-Of-Time compilation for Cycles oneAPI device" OFF)
option(WITH_CYCLES_ONEAPI_HOST_TASK_EXECUTION "Switch target of oneAPI implementation from SYCL devices to Host Task (single thread on CPU). This option is only for debugging purposes." OFF)
# https://www.intel.com/content/www/us/en/develop/documentation/oneapi-dpcpp-cpp-compiler-dev-guide-and-reference/top/compilation/ahead-of-time-compilation.html
# acm-g10 is the target for the first Intel Arc Alchemist GPUs.
set(CYCLES_ONEAPI_SPIR64_GEN_DEVICES "acm-g10" CACHE STRING "oneAPI Intel GPU architectures to build binaries for")
set(CYCLES_ONEAPI_SYCL_TARGETS spir64 spir64_gen CACHE STRING "oneAPI targets to build AOT binaries for")
mark_as_advanced(WITH_CYCLES_ONEAPI_HOST_TASK_EXECUTION)
mark_as_advanced(CYCLES_ONEAPI_SPIR64_GEN_DEVICES)
mark_as_advanced(CYCLES_ONEAPI_SYCL_TARGETS)
endif()
@ -830,27 +832,17 @@ endif()
# enable boost for cycles, audaspace or i18n
# otherwise if the user disabled
set_and_warn_dependency(WITH_BOOST WITH_CYCLES OFF)
set_and_warn_dependency(WITH_BOOST WITH_INTERNATIONAL OFF)
set_and_warn_dependency(WITH_BOOST WITH_OPENVDB OFF)
set_and_warn_dependency(WITH_BOOST WITH_OPENCOLORIO OFF)
set_and_warn_dependency(WITH_BOOST WITH_QUADRIFLOW OFF)
set_and_warn_dependency(WITH_BOOST WITH_USD OFF)
set_and_warn_dependency(WITH_BOOST WITH_ALEMBIC OFF)
if(WITH_CYCLES)
set_and_warn_dependency(WITH_BOOST WITH_CYCLES_OSL OFF)
set_and_warn_dependency(WITH_PUGIXML WITH_CYCLES_OSL OFF)
endif()
set_and_warn_dependency(WITH_PUGIXML WITH_OPENIMAGEIO OFF)
if(WITH_BOOST AND NOT (WITH_CYCLES OR WITH_OPENIMAGEIO OR WITH_INTERNATIONAL OR
WITH_OPENVDB OR WITH_OPENCOLORIO OR WITH_USD OR WITH_ALEMBIC))
message(STATUS "No dependencies need 'WITH_BOOST' forcing WITH_BOOST=OFF")
set(WITH_BOOST OFF)
endif()
set_and_warn_dependency(WITH_TBB WITH_CYCLES OFF)
set_and_warn_dependency(WITH_TBB WITH_USD OFF)
set_and_warn_dependency(WITH_TBB WITH_OPENIMAGEDENOISE OFF)
set_and_warn_dependency(WITH_TBB WITH_OPENVDB OFF)
set_and_warn_dependency(WITH_TBB WITH_MOD_FLUID OFF)
@ -859,14 +851,10 @@ set_and_warn_dependency(WITH_OPENVDB WITH_NANOVDB OFF)
# OpenVDB and OpenColorIO uses 'half' type from OpenEXR
set_and_warn_dependency(WITH_IMAGE_OPENEXR WITH_OPENVDB OFF)
set_and_warn_dependency(WITH_IMAGE_OPENEXR WITH_OPENCOLORIO OFF)
# Haru needs `TIFFFaxBlackCodes` & `TIFFFaxWhiteCodes` symbols from TIFF.
set_and_warn_dependency(WITH_IMAGE_TIFF WITH_HARU OFF)
# USD needs OpenSubDiv, since that is used by the Cycles Hydra render delegate.
set_and_warn_dependency(WITH_OPENSUBDIV WITH_USD OFF)
# auto enable openimageio for cycles
if(WITH_CYCLES)
set(WITH_OPENIMAGEIO ON)
@ -880,17 +868,6 @@ else()
set(WITH_CYCLES_OSL OFF)
endif()
# auto enable openimageio linking dependencies
if(WITH_OPENIMAGEIO)
set(WITH_IMAGE_OPENEXR ON)
set(WITH_IMAGE_TIFF ON)
endif()
# auto enable alembic linking dependencies
if(WITH_ALEMBIC)
set(WITH_IMAGE_OPENEXR ON)
endif()
# don't store paths to libs for portable distribution
if(WITH_INSTALL_PORTABLE)
set(CMAKE_SKIP_BUILD_RPATH TRUE)
@ -1093,14 +1070,6 @@ if(WITH_CYCLES)
"Configure OIIO or disable WITH_CYCLES"
)
endif()
if(NOT WITH_BOOST)
message(
FATAL_ERROR
"Cycles requires WITH_BOOST, the library may not have been found. "
"Configure BOOST or disable WITH_CYCLES"
)
endif()
if(WITH_CYCLES_OSL)
if(NOT WITH_LLVM)
message(
@ -2007,24 +1976,6 @@ if(0)
print_all_vars()
endif()
set(LIBDIR_STALE)
if(UNIX AND NOT APPLE)
# Only search for the path if it's found on the system.
if(EXISTS "../lib/linux_centos7_x86_64")
set(LIBDIR_STALE "/lib/linux_centos7_x86_64/")
endif()
endif()
if(LIBDIR_STALE)
print_cached_vars_containing_value(
"${LIBDIR_STALE}"
"\nWARNING: found cached references to old library paths!\n"
"\nIt is *strongly* recommended to reference updated library paths!\n"
)
endif()
unset(LIBDIR_STALE)
# Should be the last step of configuration.
if(POSTCONFIGURE_SCRIPT)
include(${POSTCONFIGURE_SCRIPT})

View File

@ -1209,43 +1209,6 @@ function(print_all_vars)
endforeach()
endfunction()
# Print a list of all cached variables with values containing `contents`.
function(print_cached_vars_containing_value
contents
msg_header
msg_footer
)
set(_list_info)
set(_found)
get_cmake_property(_vars VARIABLES)
foreach(_var ${_vars})
if(DEFINED CACHE{${_var}})
# Skip "_" prefixed variables, these are used for internal book-keeping,
# not under user control.
string(FIND "${_var}" "_" _found)
if(NOT (_found EQUAL 0))
string(FIND "${${_var}}" "${contents}" _found)
if(NOT (_found EQUAL -1))
if(_found)
list(APPEND _list_info "${_var}=${${_var}}")
endif()
endif()
endif()
endif()
endforeach()
unset(_var)
unset(_vars)
unset(_found)
if(_list_info)
message(${msg_header})
foreach(_var ${_list_info})
message(" * ${_var}")
endforeach()
message(${msg_footer})
endif()
unset(_list_info)
endfunction()
macro(openmp_delayload
projectname
)

View File

@ -86,16 +86,14 @@ endif()
if(WITH_USD)
find_package(USD REQUIRED)
add_bundled_libraries(usd/lib)
endif()
add_bundled_libraries(usd/lib)
if(WITH_MATERIALX)
find_package(MaterialX)
set_and_warn_library_found("MaterialX" MaterialX_FOUND WITH_MATERIALX)
if(WITH_MATERIALX)
add_bundled_libraries(materialx/lib)
endif()
endif()
add_bundled_libraries(materialx/lib)
if(WITH_VULKAN_BACKEND)
find_package(MoltenVK REQUIRED)
@ -117,8 +115,8 @@ endif()
if(WITH_OPENSUBDIV)
find_package(OpenSubdiv)
add_bundled_libraries(opensubdiv/lib)
endif()
add_bundled_libraries(opensubdiv/lib)
if(WITH_CODEC_SNDFILE)
find_package(SndFile)
@ -156,9 +154,9 @@ list(APPEND FREETYPE_LIBRARIES
if(WITH_IMAGE_OPENEXR)
find_package(OpenEXR)
add_bundled_libraries(openexr/lib)
add_bundled_libraries(imath/lib)
endif()
add_bundled_libraries(openexr/lib)
add_bundled_libraries(imath/lib)
if(WITH_CODEC_FFMPEG)
set(FFMPEG_ROOT_DIR ${LIBDIR}/ffmpeg)
@ -270,12 +268,11 @@ if(WITH_BOOST)
set(BOOST_INCLUDE_DIR ${Boost_INCLUDE_DIRS})
set(BOOST_DEFINITIONS)
add_bundled_libraries(boost/lib)
mark_as_advanced(Boost_LIBRARIES)
mark_as_advanced(Boost_INCLUDE_DIRS)
unset(_boost_FIND_COMPONENTS)
endif()
add_bundled_libraries(boost/lib)
if(WITH_INTERNATIONAL OR WITH_CODEC_FFMPEG)
string(APPEND PLATFORM_LINKFLAGS " -liconv") # boost_locale and ffmpeg needs it !
@ -297,13 +294,13 @@ if(WITH_OPENIMAGEIO)
)
set(OPENIMAGEIO_DEFINITIONS "-DOIIO_STATIC_BUILD")
set(OPENIMAGEIO_IDIFF "${LIBDIR}/openimageio/bin/idiff")
add_bundled_libraries(openimageio/lib)
endif()
add_bundled_libraries(openimageio/lib)
if(WITH_OPENCOLORIO)
find_package(OpenColorIO 2.0.0 REQUIRED)
add_bundled_libraries(opencolorio/lib)
endif()
add_bundled_libraries(opencolorio/lib)
if(WITH_OPENVDB)
find_package(OpenVDB)
@ -314,8 +311,8 @@ if(WITH_OPENVDB)
unset(BLOSC_LIBRARIES CACHE)
endif()
set(OPENVDB_DEFINITIONS)
add_bundled_libraries(openvdb/lib)
endif()
add_bundled_libraries(openvdb/lib)
if(WITH_NANOVDB)
find_package(NanoVDB)
@ -363,8 +360,8 @@ endif()
if(WITH_TBB)
find_package(TBB REQUIRED)
add_bundled_libraries(tbb/lib)
endif()
add_bundled_libraries(tbb/lib)
if(WITH_POTRACE)
find_package(Potrace REQUIRED)
@ -382,9 +379,9 @@ if(WITH_OPENMP)
set(OpenMP_LIBRARY_DIR "${LIBDIR}/openmp/lib/")
set(OpenMP_LINKER_FLAGS "-L'${OpenMP_LIBRARY_DIR}' -lomp")
set(OpenMP_LIBRARY "${OpenMP_LIBRARY_DIR}/libomp.dylib")
add_bundled_libraries(openmp/lib)
endif()
endif()
add_bundled_libraries(openmp/lib)
if(WITH_XR_OPENXR)
find_package(XR_OpenXR_SDK REQUIRED)

View File

@ -3,6 +3,7 @@
# Auto update existing CMake caches for new libraries
# Clear cached variables whose name matches `pattern`.
function(unset_cache_variables pattern)
get_cmake_property(_cache_variables CACHE_VARIABLES)
foreach(_cache_variable ${_cache_variables})
@ -12,6 +13,30 @@ function(unset_cache_variables pattern)
endforeach()
endfunction()
# Clear cached variables with values containing `contents`.
function(unset_cached_varables_containting contents msg)
get_cmake_property(_cache_variables CACHE_VARIABLES)
set(_found)
set(_print_msg)
foreach(_cache_variable ${_cache_variables})
# Skip "_" prefixed variables, these are used for internal book-keeping,
# not under user control.
string(FIND "${_cache_variable}" "_" _found)
if(NOT (_found EQUAL 0))
string(FIND "${${_cache_variable}}" "${contents}" _found)
if(NOT (_found EQUAL -1))
if(_found)
unset(${_cache_variable} CACHE)
set(_print_msg ON)
endif()
endif()
endif()
endforeach()
if(_print_msg)
message(STATUS ${msg})
endif()
endfunction()
# Detect update from 3.1 to 3.2 libs.
if(UNIX AND
DEFINED OPENEXR_VERSION AND
@ -63,3 +88,13 @@ if(UNIX AND
unset_cache_variables("^TBB")
unset_cache_variables("^USD")
endif()
if(UNIX AND (NOT APPLE) AND LIBDIR AND (EXISTS ${LIBDIR}))
# Only search for the path if it's found on the system.
set(_libdir_stale "/lib/linux_centos7_x86_64/")
unset_cached_varables_containting(
"${_libdir_stale}"
"Auto clearing old ${_libdir_stale} paths from CMake configuration"
)
unset(_libdir_stale)
endif()

View File

@ -166,11 +166,9 @@ endif()
if(WITH_IMAGE_OPENEXR)
find_package_wrapper(OpenEXR) # our own module
set_and_warn_library_found("OpenEXR" OPENEXR_FOUND WITH_IMAGE_OPENEXR)
if(WITH_IMAGE_OPENEXR)
add_bundled_libraries(openexr/lib)
add_bundled_libraries(imath/lib)
endif()
endif()
add_bundled_libraries(openexr/lib)
add_bundled_libraries(imath/lib)
if(WITH_IMAGE_OPENJPEG)
find_package_wrapper(OpenJPEG)
@ -328,11 +326,8 @@ endif()
if(WITH_OPENVDB)
find_package(OpenVDB)
set_and_warn_library_found("OpenVDB" OPENVDB_FOUND WITH_OPENVDB)
if(WITH_OPENVDB)
add_bundled_libraries(openvdb/lib)
endif()
endif()
add_bundled_libraries(openvdb/lib)
if(WITH_NANOVDB)
find_package_wrapper(NanoVDB)
@ -351,18 +346,14 @@ endif()
if(WITH_USD)
find_package_wrapper(USD)
set_and_warn_library_found("USD" USD_FOUND WITH_USD)
if(WITH_USD)
add_bundled_libraries(usd/lib)
endif()
endif()
add_bundled_libraries(usd/lib)
if(WITH_MATERIALX)
find_package_wrapper(MaterialX)
set_and_warn_library_found("MaterialX" MaterialX_FOUND WITH_MATERIALX)
if(WITH_MATERIALX)
add_bundled_libraries(materialx/lib)
endif()
endif()
add_bundled_libraries(materialx/lib)
if(WITH_BOOST)
# uses in build instructions to override include and library variables
@ -418,9 +409,8 @@ if(WITH_BOOST)
find_package(IcuLinux)
list(APPEND BOOST_LIBRARIES ${ICU_LIBRARIES})
endif()
add_bundled_libraries(boost/lib)
endif()
add_bundled_libraries(boost/lib)
if(WITH_PUGIXML)
find_package_wrapper(PugiXML)
@ -455,21 +445,16 @@ if(WITH_OPENIMAGEIO)
endif()
set_and_warn_library_found("OPENIMAGEIO" OPENIMAGEIO_FOUND WITH_OPENIMAGEIO)
if(WITH_OPENIMAGEIO)
add_bundled_libraries(openimageio/lib)
endif()
endif()
add_bundled_libraries(openimageio/lib)
if(WITH_OPENCOLORIO)
find_package_wrapper(OpenColorIO 2.0.0)
set(OPENCOLORIO_DEFINITIONS)
set_and_warn_library_found("OpenColorIO" OPENCOLORIO_FOUND WITH_OPENCOLORIO)
if(WITH_OPENCOLORIO)
add_bundled_libraries(opencolorio/lib)
endif()
endif()
add_bundled_libraries(opencolorio/lib)
if(WITH_CYCLES AND WITH_CYCLES_EMBREE)
find_package(Embree 3.8.0 REQUIRED)
@ -510,18 +495,14 @@ if(WITH_OPENSUBDIV)
set(OPENSUBDIV_LIBPATH) # TODO, remove and reference the absolute path everywhere
set_and_warn_library_found("OpenSubdiv" OPENSUBDIV_FOUND WITH_OPENSUBDIV)
if(WITH_OPENSUBDIV)
add_bundled_libraries(opensubdiv/lib)
endif()
endif()
add_bundled_libraries(opensubdiv/lib)
if(WITH_TBB)
find_package_wrapper(TBB)
set_and_warn_library_found("TBB" TBB_FOUND WITH_TBB)
if(WITH_TBB)
add_bundled_libraries(tbb/lib)
endif()
endif()
add_bundled_libraries(tbb/lib)
if(WITH_XR_OPENXR)
find_package(XR_OpenXR_SDK)
@ -1013,18 +994,6 @@ endfunction()
configure_atomic_lib_if_needed()
# Handle library inter-dependencies.
# FIXME: find a better place to handle inter-library dependencies.
# This is done near the end of the file to ensure bundled libraries are not added multiple times.
if(WITH_USD)
if(NOT WITH_OPENIMAGEIO)
add_bundled_libraries(openimageio/lib)
endif()
if(NOT WITH_OPENVDB)
add_bundled_libraries(openvdb/lib)
endif()
endif()
if(PLATFORM_BUNDLED_LIBRARIES)
# For the installed Python module and installed Blender executable, we set the
# rpath to the relative path where the install step will copy the shared libraries.

View File

@ -1,12 +1,6 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright 2021-2022 Blender Foundation */
#include "blender/display_driver.h"
#include "device/device.h"
#include "util/log.h"
#include "util/math.h"
#include "GPU_context.h"
#include "GPU_immediate.h"
#include "GPU_shader.h"
@ -15,6 +9,12 @@
#include "RE_engine.h"
#include "blender/display_driver.h"
#include "device/device.h"
#include "util/log.h"
#include "util/math.h"
CCL_NAMESPACE_BEGIN
/* --------------------------------------------------------------------

View File

@ -163,6 +163,9 @@ if(WITH_CYCLES_DEVICE_METAL)
endif()
if(WITH_CYCLES_DEVICE_ONEAPI)
if(WITH_CYCLES_ONEAPI_HOST_TASK_EXECUTION)
add_definitions(-DWITH_ONEAPI_SYCL_HOST_TASK)
endif()
if(WITH_CYCLES_ONEAPI_BINARIES)
set(cycles_kernel_oneapi_lib_suffix "_aot")
else()

View File

@ -167,6 +167,17 @@ class Device {
return true;
}
/* Request cancellation of any long-running work. */
virtual void cancel()
{
}
/* Return true if device is ready for rendering, or report status if not. */
virtual bool is_ready(string & /*status*/) const
{
return true;
}
/* GPU device only functions.
* These may not be used on CPU or multi-devices. */

View File

@ -76,7 +76,20 @@ class MetalDevice : public Device {
bool use_metalrt = false;
MetalPipelineType kernel_specialization_level = PSO_GENERIC;
std::atomic_bool async_compile_and_load = false;
int device_id = 0;
static thread_mutex existing_devices_mutex;
static std::map<int, MetalDevice *> active_device_ids;
static bool is_device_cancelled(int device_id);
static MetalDevice *get_device_by_ID(int device_idID,
thread_scoped_lock &existing_devices_mutex_lock);
virtual bool is_ready(string &status) const override;
virtual void cancel() override;
virtual BVHLayoutMask get_bvh_layout_mask() const override;
@ -92,14 +105,12 @@ class MetalDevice : public Device {
bool use_adaptive_compilation();
bool make_source_and_check_if_compile_needed(MetalPipelineType pso_type);
void make_source(MetalPipelineType pso_type, const uint kernel_features);
virtual bool load_kernels(const uint kernel_features) override;
void reserve_local_memory(const uint kernel_features);
void init_host_memory();
void load_texture_info();
void erase_allocation(device_memory &mem);
@ -112,7 +123,7 @@ class MetalDevice : public Device {
virtual void optimize_for_scene(Scene *scene) override;
bool compile_and_load(MetalPipelineType pso_type);
static void compile_and_load(int device_id, MetalPipelineType pso_type);
/* ------------------------------------------------------------------ */
/* low-level memory management */

View File

@ -13,10 +13,32 @@
# include "util/path.h"
# include "util/time.h"
# include <crt_externs.h>
CCL_NAMESPACE_BEGIN
class MetalDevice;
thread_mutex MetalDevice::existing_devices_mutex;
std::map<int, MetalDevice *> MetalDevice::active_device_ids;
/* Thread-safe device access for async work. Calling code must pass an appropriatelty scoped lock
* to existing_devices_mutex to safeguard against destruction of the returned instance. */
MetalDevice *MetalDevice::get_device_by_ID(int ID, thread_scoped_lock &existing_devices_mutex_lock)
{
auto it = active_device_ids.find(ID);
if (it != active_device_ids.end()) {
return it->second;
}
return nullptr;
}
bool MetalDevice::is_device_cancelled(int ID)
{
thread_scoped_lock lock(existing_devices_mutex);
return get_device_by_ID(ID, lock) == nullptr;
}
BVHLayoutMask MetalDevice::get_bvh_layout_mask() const
{
return use_metalrt ? BVH_LAYOUT_METAL : BVH_LAYOUT_BVH2;
@ -40,6 +62,15 @@ void MetalDevice::set_error(const string &error)
MetalDevice::MetalDevice(const DeviceInfo &info, Stats &stats, Profiler &profiler)
: Device(info, stats, profiler), texture_info(this, "texture_info", MEM_GLOBAL)
{
{
/* Assign an ID for this device which we can use to query whether async shader compilation
* requests are still relevant. */
thread_scoped_lock lock(existing_devices_mutex);
static int existing_devices_counter = 1;
device_id = existing_devices_counter++;
active_device_ids[device_id] = this;
}
mtlDevId = info.num;
/* select chosen device */
@ -57,7 +88,6 @@ MetalDevice::MetalDevice(const DeviceInfo &info, Stats &stats, Profiler &profile
if (@available(macos 11.0, *)) {
if ([mtlDevice hasUnifiedMemory]) {
default_storage_mode = MTLResourceStorageModeShared;
init_host_memory();
}
}
@ -181,6 +211,13 @@ MetalDevice::MetalDevice(const DeviceInfo &info, Stats &stats, Profiler &profile
MetalDevice::~MetalDevice()
{
/* Cancel any async shader compilations that are in flight. */
cancel();
/* This lock safeguards against destruction during use (see other uses of
* existing_devices_mutex). */
thread_scoped_lock lock(existing_devices_mutex);
for (auto &tex : texture_slot_map) {
if (tex) {
[tex release];
@ -326,21 +363,66 @@ bool MetalDevice::load_kernels(const uint _kernel_features)
* active, but may still need to be rendered without motion blur if that isn't active as well. */
motion_blur = kernel_features & KERNEL_FEATURE_OBJECT_MOTION;
bool result = compile_and_load(PSO_GENERIC);
/* Only request generic kernels if they aren't cached in memory. */
if (make_source_and_check_if_compile_needed(PSO_GENERIC)) {
/* If needed, load them asynchronously in order to responsively message progess to the user. */
int this_device_id = this->device_id;
auto compile_kernels_fn = ^() {
compile_and_load(this_device_id, PSO_GENERIC);
};
reserve_local_memory(kernel_features);
return result;
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0),
compile_kernels_fn);
}
return true;
}
bool MetalDevice::compile_and_load(MetalPipelineType pso_type)
bool MetalDevice::make_source_and_check_if_compile_needed(MetalPipelineType pso_type)
{
make_source(pso_type, kernel_features);
if (!MetalDeviceKernels::should_load_kernels(this, pso_type)) {
/* We already have a full set of matching pipelines which are cached or queued. */
metal_printf("%s kernels already requested\n", kernel_type_as_string(pso_type));
return true;
if (this->source[pso_type].empty()) {
make_source(pso_type, kernel_features);
}
return MetalDeviceKernels::should_load_kernels(this, pso_type);
}
void MetalDevice::compile_and_load(int device_id, MetalPipelineType pso_type)
{
/* Thread-safe front-end compilation. Typically the MSL->AIR compilation can take a few seconds,
* so we avoid blocking device teardown if the user cancels a render immediately.
*/
id<MTLDevice> mtlDevice;
string source;
MetalGPUVendor device_vendor;
/* Safely gather any state required for the MSL->AIR compilation. */
{
thread_scoped_lock lock(existing_devices_mutex);
/* Check whether the device still exists. */
MetalDevice *instance = get_device_by_ID(device_id, lock);
if (!instance) {
metal_printf("Ignoring %s compilation request - device no longer exists\n",
kernel_type_as_string(pso_type));
return;
}
if (!instance->make_source_and_check_if_compile_needed(pso_type)) {
/* We already have a full set of matching pipelines which are cached or queued. Return early
* to avoid redundant MTLLibrary compilation. */
metal_printf("Ignoreing %s compilation request - kernels already requested\n",
kernel_type_as_string(pso_type));
return;
}
mtlDevice = instance->mtlDevice;
device_vendor = instance->device_vendor;
source = instance->source[pso_type];
}
/* Perform the actual compilation using our cached context. The MetalDevice can safely destruct
* in this time. */
MTLCompileOptions *options = [[MTLCompileOptions alloc] init];
@ -359,20 +441,15 @@ bool MetalDevice::compile_and_load(MetalPipelineType pso_type)
if (getenv("CYCLES_METAL_PROFILING") || getenv("CYCLES_METAL_DEBUG")) {
path_write_text(path_cache_get(string_printf("%s.metal", kernel_type_as_string(pso_type))),
source[pso_type]);
source);
}
const double starttime = time_dt();
NSError *error = NULL;
mtlLibrary[pso_type] = [mtlDevice newLibraryWithSource:@(source[pso_type].c_str())
options:options
error:&error];
if (!mtlLibrary[pso_type]) {
NSString *err = [error localizedDescription];
set_error(string_printf("Failed to compile library:\n%s", [err UTF8String]));
}
id<MTLLibrary> mtlLibrary = [mtlDevice newLibraryWithSource:@(source.c_str())
options:options
error:&error];
metal_printf("Front-end compilation finished in %.1f seconds (%s)\n",
time_dt() - starttime,
@ -380,17 +457,21 @@ bool MetalDevice::compile_and_load(MetalPipelineType pso_type)
[options release];
return MetalDeviceKernels::load(this, pso_type);
}
void MetalDevice::reserve_local_memory(const uint kernel_features)
{
/* METAL_WIP - implement this */
}
void MetalDevice::init_host_memory()
{
/* METAL_WIP - implement this */
/* Save the compiled MTLLibrary and trigger the AIR->PSO builds (if the MetalDevice still
* exists). */
{
thread_scoped_lock lock(existing_devices_mutex);
if (MetalDevice *instance = get_device_by_ID(device_id, lock)) {
if (mtlLibrary) {
instance->mtlLibrary[pso_type] = mtlLibrary;
MetalDeviceKernels::load(instance, pso_type);
}
else {
NSString *err = [error localizedDescription];
instance->set_error(string_printf("Failed to compile library:\n%s", [err UTF8String]));
}
}
}
}
void MetalDevice::load_texture_info()
@ -700,55 +781,74 @@ device_ptr MetalDevice::mem_alloc_sub_ptr(device_memory &mem, size_t offset, siz
return 0;
}
void MetalDevice::cancel()
{
/* Remove this device's ID from the list of active devices. Any pending compilation requests
* originating from this session will be cancelled. */
thread_scoped_lock lock(existing_devices_mutex);
if (device_id) {
active_device_ids.erase(device_id);
device_id = 0;
}
}
bool MetalDevice::is_ready(string &status) const
{
int num_loaded = MetalDeviceKernels::get_loaded_kernel_count(this, PSO_GENERIC);
if (num_loaded < DEVICE_KERNEL_NUM) {
status = string_printf("%d / %d render kernels loaded (may take a few minutes the first time)",
num_loaded,
DEVICE_KERNEL_NUM);
return false;
}
metal_printf("MetalDevice::is_ready(...) --> true\n");
return true;
}
void MetalDevice::optimize_for_scene(Scene *scene)
{
MetalPipelineType specialization_level = kernel_specialization_level;
if (specialization_level < PSO_SPECIALIZED_INTERSECT) {
return;
}
/* PSO_SPECIALIZED_INTERSECT kernels are fast to specialize, so we always load them
* synchronously. */
compile_and_load(PSO_SPECIALIZED_INTERSECT);
if (specialization_level < PSO_SPECIALIZED_SHADE) {
return;
}
if (!scene->params.background) {
/* Don't load PSO_SPECIALIZED_SHADE kernels during viewport rendering as they are slower to
* build. */
return;
/* In live viewport, don't specialize beyond intersection kernels for responsiveness. */
specialization_level = (MetalPipelineType)min(specialization_level, PSO_SPECIALIZED_INTERSECT);
}
/* PSO_SPECIALIZED_SHADE kernels are slower to specialize, so we load them asynchronously, and
* only if there isn't an existing load in flight.
*/
auto specialize_shade_fn = ^() {
compile_and_load(PSO_SPECIALIZED_SHADE);
async_compile_and_load = false;
/* For responsive rendering, specialize the kernels in the background, and only if there isn't an
* existing "optimize_for_scene" request in flight. */
int this_device_id = this->device_id;
auto specialize_kernels_fn = ^() {
for (int level = 1; level <= int(specialization_level); level++) {
compile_and_load(this_device_id, MetalPipelineType(level));
}
};
bool async_specialize_shade = true;
/* In normal use, we always compile the specialized kernels in the background. */
bool specialize_in_background = true;
/* Block if a per-kernel profiling is enabled (ensure steady rendering rate). */
if (getenv("CYCLES_METAL_PROFILING") != nullptr) {
async_specialize_shade = false;
specialize_in_background = false;
}
if (async_specialize_shade) {
if (!async_compile_and_load) {
async_compile_and_load = true;
/* Block during benchmark warm-up to ensure kernels are cached prior to the observed run. */
for (int i = 0; i < *_NSGetArgc(); i++) {
if (!strcmp((*_NSGetArgv())[i], "--warm-up")) {
specialize_in_background = false;
}
}
if (specialize_in_background) {
if (!MetalDeviceKernels::any_specialization_happening_now()) {
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0),
specialize_shade_fn);
specialize_kernels_fn);
}
else {
metal_printf(
"Async PSO_SPECIALIZED_SHADE load request already in progress - dropping request\n");
metal_printf("\"optimize_for_scene\" request already in flight - dropping request\n");
}
}
else {
specialize_shade_fn();
specialize_kernels_fn();
}
}

View File

@ -64,6 +64,8 @@ struct MetalKernelPipeline {
void compile();
int originating_device_id;
id<MTLLibrary> mtlLibrary = nil;
MetalPipelineType pso_type;
string source_md5;
@ -94,7 +96,9 @@ struct MetalKernelPipeline {
/* Cache of Metal kernels for each DeviceKernel. */
namespace MetalDeviceKernels {
bool should_load_kernels(MetalDevice *device, MetalPipelineType pso_type);
bool any_specialization_happening_now();
int get_loaded_kernel_count(MetalDevice const *device, MetalPipelineType pso_type);
bool should_load_kernels(MetalDevice const *device, MetalPipelineType pso_type);
bool load(MetalDevice *device, MetalPipelineType pso_type);
const MetalKernelPipeline *get_best_pipeline(const MetalDevice *device, DeviceKernel kernel);

View File

@ -86,23 +86,17 @@ struct ShaderCache {
void load_kernel(DeviceKernel kernel, MetalDevice *device, MetalPipelineType pso_type);
bool should_load_kernel(DeviceKernel device_kernel,
MetalDevice *device,
MetalDevice const *device,
MetalPipelineType pso_type);
void wait_for_all();
private:
friend ShaderCache *get_shader_cache(id<MTLDevice> mtlDevice);
void compile_thread_func(int thread_index);
using PipelineCollection = std::vector<unique_ptr<MetalKernelPipeline>>;
struct PipelineRequest {
MetalKernelPipeline *pipeline = nullptr;
std::function<void(MetalKernelPipeline *)> completionHandler;
};
struct OccupancyTuningParameters {
int threads_per_threadgroup = 0;
int num_threads_per_block = 0;
@ -113,13 +107,15 @@ struct ShaderCache {
PipelineCollection pipelines[DEVICE_KERNEL_NUM];
id<MTLDevice> mtlDevice;
bool running = false;
static bool running;
std::condition_variable cond_var;
std::deque<PipelineRequest> request_queue;
std::deque<MetalKernelPipeline *> request_queue;
std::vector<std::thread> compile_threads;
std::atomic_int incomplete_requests = 0;
std::atomic_int incomplete_specialization_requests = 0;
};
bool ShaderCache::running = true;
std::mutex g_shaderCacheMutex;
std::map<id<MTLDevice>, unique_ptr<ShaderCache>> g_shaderCache;
@ -137,11 +133,25 @@ ShaderCache *get_shader_cache(id<MTLDevice> mtlDevice)
ShaderCache::~ShaderCache()
{
metal_printf("ShaderCache shutting down with incomplete_requests = %d\n",
int(incomplete_requests));
running = false;
cond_var.notify_all();
int num_incomplete = int(incomplete_requests);
if (num_incomplete) {
/* Shutting down the app with incomplete shader compilation requests. Give 1 second's grace for
* clean shutdown. */
metal_printf("ShaderCache busy (incomplete_requests = %d)...\n", num_incomplete);
std::this_thread::sleep_for(std::chrono::seconds(1));
num_incomplete = int(incomplete_requests);
}
if (num_incomplete) {
metal_printf("ShaderCache still busy (incomplete_requests = %d). Terminating...\n",
num_incomplete);
std::terminate();
}
metal_printf("ShaderCache idle. Shutting down.\n");
for (auto &thread : compile_threads) {
thread.join();
}
@ -156,35 +166,69 @@ void ShaderCache::wait_for_all()
void ShaderCache::compile_thread_func(int thread_index)
{
while (1) {
while (running) {
/* wait for / acquire next request */
PipelineRequest request;
MetalKernelPipeline *pipeline;
{
thread_scoped_lock lock(cache_mutex);
cond_var.wait(lock, [&] { return !running || !request_queue.empty(); });
if (!running) {
break;
if (!running || request_queue.empty()) {
continue;
}
if (!request_queue.empty()) {
request = request_queue.front();
request_queue.pop_front();
}
pipeline = request_queue.front();
request_queue.pop_front();
}
/* service request */
if (request.pipeline) {
request.pipeline->compile();
incomplete_requests--;
/* Service the request. */
DeviceKernel device_kernel = pipeline->device_kernel;
MetalPipelineType pso_type = pipeline->pso_type;
if (MetalDevice::is_device_cancelled(pipeline->originating_device_id)) {
/* The originating MetalDevice is no longer active, so this request is obsolete. */
metal_printf("Cancelling compilation of %s (%s)\n",
device_kernel_as_string(device_kernel),
kernel_type_as_string(pso_type));
}
else {
/* Do the actual compilation. */
pipeline->compile();
thread_scoped_lock lock(cache_mutex);
auto &collection = pipelines[device_kernel];
/* Cache up to 3 kernel variants with the same pso_type in memory, purging oldest first. */
int max_entries_of_same_pso_type = 3;
for (int i = (int)collection.size() - 1; i >= 0; i--) {
if (collection[i]->pso_type == pso_type) {
max_entries_of_same_pso_type -= 1;
if (max_entries_of_same_pso_type == 0) {
metal_printf("Purging oldest %s:%s kernel from ShaderCache\n",
kernel_type_as_string(pso_type),
device_kernel_as_string(device_kernel));
collection.erase(collection.begin() + i);
break;
}
}
}
collection.push_back(unique_ptr<MetalKernelPipeline>(pipeline));
}
incomplete_requests--;
if (pso_type != PSO_GENERIC) {
incomplete_specialization_requests--;
}
}
}
bool ShaderCache::should_load_kernel(DeviceKernel device_kernel,
MetalDevice *device,
MetalDevice const *device,
MetalPipelineType pso_type)
{
if (!running) {
return false;
}
if (device_kernel == DEVICE_KERNEL_INTEGRATOR_MEGAKERNEL) {
/* Skip megakernel. */
return false;
@ -240,7 +284,6 @@ void ShaderCache::load_kernel(DeviceKernel device_kernel,
/* create compiler threads on first run */
thread_scoped_lock lock(cache_mutex);
if (compile_threads.empty()) {
running = true;
for (int i = 0; i < max_mtlcompiler_threads; i++) {
compile_threads.push_back(std::thread([&] { compile_thread_func(i); }));
}
@ -252,53 +295,39 @@ void ShaderCache::load_kernel(DeviceKernel device_kernel,
}
incomplete_requests++;
if (pso_type != PSO_GENERIC) {
incomplete_specialization_requests++;
}
PipelineRequest request;
request.pipeline = new MetalKernelPipeline;
memcpy(&request.pipeline->kernel_data_,
&device->launch_params.data,
sizeof(request.pipeline->kernel_data_));
request.pipeline->pso_type = pso_type;
request.pipeline->mtlDevice = mtlDevice;
request.pipeline->source_md5 = device->source_md5[pso_type];
request.pipeline->mtlLibrary = device->mtlLibrary[pso_type];
request.pipeline->device_kernel = device_kernel;
request.pipeline->threads_per_threadgroup = device->max_threads_per_threadgroup;
MetalKernelPipeline *pipeline = new MetalKernelPipeline;
/* Keep track of the originating device's ID so that we can cancel requests if the device ceases
* to be active. */
pipeline->originating_device_id = device->device_id;
memcpy(&pipeline->kernel_data_, &device->launch_params.data, sizeof(pipeline->kernel_data_));
pipeline->pso_type = pso_type;
pipeline->mtlDevice = mtlDevice;
pipeline->source_md5 = device->source_md5[pso_type];
pipeline->mtlLibrary = device->mtlLibrary[pso_type];
pipeline->device_kernel = device_kernel;
pipeline->threads_per_threadgroup = device->max_threads_per_threadgroup;
if (occupancy_tuning[device_kernel].threads_per_threadgroup) {
request.pipeline->threads_per_threadgroup =
pipeline->threads_per_threadgroup =
occupancy_tuning[device_kernel].threads_per_threadgroup;
request.pipeline->num_threads_per_block =
pipeline->num_threads_per_block =
occupancy_tuning[device_kernel].num_threads_per_block;
}
/* metalrt options */
request.pipeline->use_metalrt = device->use_metalrt;
request.pipeline->metalrt_features = device->use_metalrt ?
(device->kernel_features & METALRT_FEATURE_MASK) :
0;
pipeline->use_metalrt = device->use_metalrt;
pipeline->metalrt_features = device->use_metalrt ?
(device->kernel_features & METALRT_FEATURE_MASK) :
0;
{
thread_scoped_lock lock(cache_mutex);
auto &collection = pipelines[device_kernel];
/* Cache up to 3 kernel variants with the same pso_type, purging oldest first. */
int max_entries_of_same_pso_type = 3;
for (int i = (int)collection.size() - 1; i >= 0; i--) {
if (collection[i]->pso_type == pso_type) {
max_entries_of_same_pso_type -= 1;
if (max_entries_of_same_pso_type == 0) {
metal_printf("Purging oldest %s:%s kernel from ShaderCache\n",
kernel_type_as_string(pso_type),
device_kernel_as_string(device_kernel));
collection.erase(collection.begin() + i);
break;
}
}
}
collection.push_back(unique_ptr<MetalKernelPipeline>(request.pipeline));
request_queue.push_back(request);
request_queue.push_back(pipeline);
}
cond_var.notify_one();
}
@ -664,51 +693,61 @@ void MetalKernelPipeline::compile()
double starttime = time_dt();
MTLNewComputePipelineStateWithReflectionCompletionHandler completionHandler = ^(
id<MTLComputePipelineState> computePipelineState,
MTLComputePipelineReflection *reflection,
NSError *error) {
bool recreate_archive = false;
if (computePipelineState == nil && archive) {
/* Block on load to ensure we continue with a valid kernel function */
if (creating_new_archive) {
starttime = time_dt();
NSError *error;
if (![archive addComputePipelineFunctionsWithDescriptor:computePipelineStateDescriptor
error:&error]) {
NSString *errStr = [error localizedDescription];
metal_printf(
"Failed to create compute pipeline state \"%s\" from archive - attempting recreation... "
"(error: %s)\n",
device_kernel_as_string((DeviceKernel)device_kernel),
errStr ? [errStr UTF8String] : "nil");
computePipelineState = [mtlDevice
newComputePipelineStateWithDescriptor:computePipelineStateDescriptor
options:MTLPipelineOptionNone
reflection:nullptr
error:&error];
recreate_archive = true;
metal_printf("Failed to add PSO to archive:\n%s\n", errStr ? [errStr UTF8String] : "nil");
}
}
double duration = time_dt() - starttime;
pipeline = [mtlDevice newComputePipelineStateWithDescriptor:computePipelineStateDescriptor
options:pipelineOptions
reflection:nullptr
error:&error];
if (computePipelineState == nil) {
NSString *errStr = [error localizedDescription];
error_str = string_printf("Failed to create compute pipeline state \"%s\", error: \n",
device_kernel_as_string((DeviceKernel)device_kernel));
error_str += (errStr ? [errStr UTF8String] : "nil");
metal_printf("%16s | %2d | %-55s | %7.2fs | FAILED!\n",
kernel_type_as_string(pso_type),
device_kernel,
device_kernel_as_string((DeviceKernel)device_kernel),
duration);
return;
}
bool recreate_archive = false;
if (pipeline == nil && archive) {
NSString *errStr = [error localizedDescription];
metal_printf(
"Failed to create compute pipeline state \"%s\" from archive - attempting recreation... "
"(error: %s)\n",
device_kernel_as_string((DeviceKernel)device_kernel),
errStr ? [errStr UTF8String] : "nil");
pipeline = [mtlDevice newComputePipelineStateWithDescriptor:computePipelineStateDescriptor
options:MTLPipelineOptionNone
reflection:nullptr
error:&error];
recreate_archive = true;
}
if (!num_threads_per_block) {
num_threads_per_block = round_down(computePipelineState.maxTotalThreadsPerThreadgroup,
computePipelineState.threadExecutionWidth);
num_threads_per_block = std::max(num_threads_per_block,
(int)computePipelineState.threadExecutionWidth);
}
double duration = time_dt() - starttime;
this->pipeline = computePipelineState;
if (pipeline == nil) {
NSString *errStr = [error localizedDescription];
error_str = string_printf("Failed to create compute pipeline state \"%s\", error: \n",
device_kernel_as_string((DeviceKernel)device_kernel));
error_str += (errStr ? [errStr UTF8String] : "nil");
metal_printf("%16s | %2d | %-55s | %7.2fs | FAILED!\n",
kernel_type_as_string(pso_type),
device_kernel,
device_kernel_as_string((DeviceKernel)device_kernel),
duration);
return;
}
if (@available(macOS 11.0, *)) {
if (!num_threads_per_block) {
num_threads_per_block = round_down(pipeline.maxTotalThreadsPerThreadgroup,
pipeline.threadExecutionWidth);
num_threads_per_block = std::max(num_threads_per_block,
(int)pipeline.threadExecutionWidth);
}
if (@available(macOS 11.0, *)) {
if (ShaderCache::running) {
if (creating_new_archive || recreate_archive) {
if (![archive serializeToURL:[NSURL fileURLWithPath:@(metalbin_path.c_str())]
error:&error]) {
@ -720,24 +759,7 @@ void MetalKernelPipeline::compile()
}
}
}
};
/* Block on load to ensure we continue with a valid kernel function */
if (creating_new_archive) {
starttime = time_dt();
NSError *error;
if (![archive addComputePipelineFunctionsWithDescriptor:computePipelineStateDescriptor
error:&error]) {
NSString *errStr = [error localizedDescription];
metal_printf("Failed to add PSO to archive:\n%s\n", errStr ? [errStr UTF8String] : "nil");
}
}
id<MTLComputePipelineState> pipeline = [mtlDevice
newComputePipelineStateWithDescriptor:computePipelineStateDescriptor
options:pipelineOptions
reflection:nullptr
error:&error];
completionHandler(pipeline, nullptr, error);
this->loaded = true;
[computePipelineStateDescriptor release];
@ -763,8 +785,6 @@ void MetalKernelPipeline::compile()
}
}
double duration = time_dt() - starttime;
if (!use_binary_archive) {
metal_printf("%16s | %2d | %-55s | %7.2fs\n",
kernel_type_as_string(pso_type),
@ -791,24 +811,46 @@ bool MetalDeviceKernels::load(MetalDevice *device, MetalPipelineType pso_type)
shader_cache->load_kernel((DeviceKernel)i, device, pso_type);
}
shader_cache->wait_for_all();
metal_printf("Back-end compilation finished in %.1f seconds (%s)\n",
time_dt() - starttime,
kernel_type_as_string(pso_type));
if (getenv("CYCLES_METAL_PROFILING")) {
shader_cache->wait_for_all();
metal_printf("Back-end compilation finished in %.1f seconds (%s)\n",
time_dt() - starttime,
kernel_type_as_string(pso_type));
}
return true;
}
bool MetalDeviceKernels::should_load_kernels(MetalDevice *device, MetalPipelineType pso_type)
bool MetalDeviceKernels::any_specialization_happening_now()
{
auto shader_cache = get_shader_cache(device->mtlDevice);
for (int i = 0; i < DEVICE_KERNEL_NUM; i++) {
if (shader_cache->should_load_kernel((DeviceKernel)i, device, pso_type)) {
/* Return true if any ShaderCaches have ongoing specialization requests (typically there will be
* only 1). */
thread_scoped_lock lock(g_shaderCacheMutex);
for (auto &it : g_shaderCache) {
if (it.second->incomplete_specialization_requests > 0) {
return true;
}
}
return false;
}
int MetalDeviceKernels::get_loaded_kernel_count(MetalDevice const *device,
MetalPipelineType pso_type)
{
auto shader_cache = get_shader_cache(device->mtlDevice);
int loaded_count = DEVICE_KERNEL_NUM;
for (int i = 0; i < DEVICE_KERNEL_NUM; i++) {
if (shader_cache->should_load_kernel((DeviceKernel)i, device, pso_type)) {
loaded_count -= 1;
}
}
return loaded_count;
}
bool MetalDeviceKernels::should_load_kernels(MetalDevice const *device, MetalPipelineType pso_type)
{
return get_loaded_kernel_count(device, pso_type) != DEVICE_KERNEL_NUM;
}
const MetalKernelPipeline *MetalDeviceKernels::get_best_pipeline(const MetalDevice *device,
DeviceKernel kernel)
{

View File

@ -702,6 +702,10 @@ bool MetalDeviceQueue::synchronize()
void MetalDeviceQueue::zero_to_device(device_memory &mem)
{
if (metal_device_->have_error()) {
return;
}
assert(mem.type != MEM_GLOBAL && mem.type != MEM_TEXTURE);
if (mem.memory_size() == 0) {
@ -729,6 +733,10 @@ void MetalDeviceQueue::zero_to_device(device_memory &mem)
void MetalDeviceQueue::copy_to_device(device_memory &mem)
{
if (metal_device_->have_error()) {
return;
}
if (mem.memory_size() == 0) {
return;
}
@ -771,6 +779,10 @@ void MetalDeviceQueue::copy_to_device(device_memory &mem)
void MetalDeviceQueue::copy_from_device(device_memory &mem)
{
if (metal_device_->have_error()) {
return;
}
assert(mem.type != MEM_GLOBAL && mem.type != MEM_TEXTURE);
if (mem.memory_size() == 0) {

View File

@ -429,7 +429,12 @@ void OneapiDevice::check_usm(SyclQueue *queue_, const void *usm_ptr, bool allow_
queue->get_device().get_info<sycl::info::device::device_type>();
sycl::usm::alloc usm_type = get_pointer_type(usm_ptr, queue->get_context());
(void)usm_type;
assert(usm_type == sycl::usm::alloc::device ||
# ifndef WITH_ONEAPI_SYCL_HOST_TASK
const sycl::usm::alloc main_memory_type = sycl::usm::alloc::device;
# else
const sycl::usm::alloc main_memory_type = sycl::usm::alloc::host;
# endif
assert(usm_type == main_memory_type ||
(usm_type == sycl::usm::alloc::host &&
(allow_host || device_type == sycl::info::device_type::cpu)) ||
usm_type == sycl::usm::alloc::unknown);
@ -478,7 +483,11 @@ void *OneapiDevice::usm_alloc_device(SyclQueue *queue_, size_t memory_size)
{
assert(queue_);
sycl::queue *queue = reinterpret_cast<sycl::queue *>(queue_);
# ifndef WITH_ONEAPI_SYCL_HOST_TASK
return sycl::malloc_device(memory_size, *queue);
# else
return sycl::malloc_host(memory_size, *queue);
# endif
}
void OneapiDevice::usm_free(SyclQueue *queue_, void *usm_ptr)
@ -736,7 +745,11 @@ char *OneapiDevice::device_capabilities()
const std::vector<sycl::device> &oneapi_devices = available_devices();
for (const sycl::device &device : oneapi_devices) {
# ifndef WITH_ONEAPI_SYCL_HOST_TASK
const std::string &name = device.get_info<sycl::info::device::name>();
# else
const std::string &name = "SYCL Host Task (Debug)";
# endif
capabilities << std::string("\t") << name << "\n";
# define WRITE_ATTR(attribute_name, attribute_variable) \
@ -813,7 +826,11 @@ void OneapiDevice::iterate_devices(OneAPIDeviceIteratorCallback cb, void *user_p
for (sycl::device &device : devices) {
const std::string &platform_name =
device.get_platform().get_info<sycl::info::platform::name>();
# ifndef WITH_ONEAPI_SYCL_HOST_TASK
std::string name = device.get_info<sycl::info::device::name>();
# else
std::string name = "SYCL Host Task (Debug)";
# endif
std::string id = "ONEAPI_" + platform_name + "_" + name;
if (device.has(sycl::aspect::ext_intel_pci_address)) {
id.append("_" + device.get_info<sycl::ext::intel::info::device::pci_address>());

View File

@ -390,6 +390,9 @@ void PathTrace::path_trace(RenderWork &render_work)
const int num_samples = render_work.path_trace.num_samples;
PathTraceWork *path_trace_work = path_trace_works_[i].get();
if (path_trace_work->get_device()->have_error()) {
return;
}
PathTraceWork::RenderStatistics statistics;
path_trace_work->render_samples(statistics,

View File

@ -752,6 +752,10 @@ if(WITH_CYCLES_DEVICE_ONEAPI)
${SYCL_CPP_FLAGS}
)
if (WITH_CYCLES_ONEAPI_HOST_TASK_EXECUTION)
list(APPEND sycl_compiler_flags -DWITH_ONEAPI_SYCL_HOST_TASK)
endif()
# Set defaults for spir64 and spir64_gen options
if(NOT DEFINED CYCLES_ONEAPI_SYCL_OPTIONS_spir64)
set(CYCLES_ONEAPI_SYCL_OPTIONS_spir64 "-options '-ze-opt-large-register-file -ze-opt-regular-grf-kernel integrator_intersect'")
@ -763,7 +767,8 @@ if(WITH_CYCLES_DEVICE_ONEAPI)
string(PREPEND CYCLES_ONEAPI_SYCL_OPTIONS_spir64_gen "--format zebin ")
string(PREPEND CYCLES_ONEAPI_SYCL_OPTIONS_spir64_gen "-device ${CYCLES_ONEAPI_SPIR64_GEN_DEVICES} ")
if(WITH_CYCLES_ONEAPI_BINARIES)
# Host execution won't use GPU binaries, no need to compile them.
if(WITH_CYCLES_ONEAPI_BINARIES AND NOT WITH_CYCLES_ONEAPI_HOST_TASK_EXECUTION)
# AoT binaries aren't currently reused when calling sycl::build.
list(APPEND sycl_compiler_flags -DSYCL_SKIP_KERNELS_PRELOAD)
# Iterate over all targest and their options

View File

@ -30,6 +30,16 @@ void gpu_parallel_active_index_array_impl(const uint num_states,
ccl_global int *ccl_restrict num_indices,
IsActiveOp is_active_op)
{
# ifdef WITH_ONEAPI_SYCL_HOST_TASK
int write_index = 0;
for (int state_index = 0; state_index < num_states; state_index++) {
if (is_active_op(state_index))
indices[write_index++] = state_index;
}
*num_indices = write_index;
return;
# endif /* WITH_ONEAPI_SYCL_HOST_TASK */
const sycl::nd_item<1> &item_id = sycl::ext::oneapi::experimental::this_nd_item<1>();
const uint blocksize = item_id.get_local_range(0);

View File

@ -56,7 +56,8 @@
#define ccl_gpu_kernel(block_num_threads, thread_num_registers)
#define ccl_gpu_kernel_threads(block_num_threads)
#define ccl_gpu_kernel_signature(name, ...) \
#ifndef WITH_ONEAPI_SYCL_HOST_TASK
# define ccl_gpu_kernel_signature(name, ...) \
void oneapi_kernel_##name(KernelGlobalsGPU *ccl_restrict kg, \
size_t kernel_global_size, \
size_t kernel_local_size, \
@ -67,9 +68,37 @@ void oneapi_kernel_##name(KernelGlobalsGPU *ccl_restrict kg, \
sycl::nd_range<1>(kernel_global_size, kernel_local_size), \
[=](sycl::nd_item<1> item) {
#define ccl_gpu_kernel_postfix \
# define ccl_gpu_kernel_postfix \
}); \
}
#else
/* Additional anonymous lambda is required to handle all "return" statements in the kernel code */
# define ccl_gpu_kernel_signature(name, ...) \
void oneapi_kernel_##name(KernelGlobalsGPU *ccl_restrict kg, \
size_t kernel_global_size, \
size_t kernel_local_size, \
sycl::handler &cgh, \
__VA_ARGS__) { \
(kg); \
(kernel_local_size); \
cgh.host_task( \
[=]() {\
for (size_t gid = (size_t)0; gid < kernel_global_size; gid++) { \
kg->nd_item_local_id_0 = 0; \
kg->nd_item_local_range_0 = 1; \
kg->nd_item_group_id_0 = gid; \
kg->nd_item_group_range_0 = kernel_global_size; \
kg->nd_item_global_id_0 = gid; \
kg->nd_item_global_range_0 = kernel_global_size; \
auto kernel = [=]() {
# define ccl_gpu_kernel_postfix \
}; \
kernel(); \
} \
}); \
}
#endif
#define ccl_gpu_kernel_call(x) ((ONEAPIKernelContext*)kg)->x
@ -83,23 +112,40 @@ void oneapi_kernel_##name(KernelGlobalsGPU *ccl_restrict kg, \
} ccl_gpu_kernel_lambda_pass((ONEAPIKernelContext *)kg)
/* GPU thread, block, grid size and index */
#define ccl_gpu_thread_idx_x (sycl::ext::oneapi::experimental::this_nd_item<1>().get_local_id(0))
#define ccl_gpu_block_dim_x (sycl::ext::oneapi::experimental::this_nd_item<1>().get_local_range(0))
#define ccl_gpu_block_idx_x (sycl::ext::oneapi::experimental::this_nd_item<1>().get_group(0))
#define ccl_gpu_grid_dim_x (sycl::ext::oneapi::experimental::this_nd_item<1>().get_group_range(0))
#define ccl_gpu_warp_size (sycl::ext::oneapi::experimental::this_sub_group().get_local_range()[0])
#define ccl_gpu_thread_mask(thread_warp) uint(0xFFFFFFFF >> (ccl_gpu_warp_size - thread_warp))
#define ccl_gpu_global_id_x() (sycl::ext::oneapi::experimental::this_nd_item<1>().get_global_id(0))
#define ccl_gpu_global_size_x() (sycl::ext::oneapi::experimental::this_nd_item<1>().get_global_range(0))
#ifndef WITH_ONEAPI_SYCL_HOST_TASK
# define ccl_gpu_thread_idx_x (sycl::ext::oneapi::experimental::this_nd_item<1>().get_local_id(0))
# define ccl_gpu_block_dim_x (sycl::ext::oneapi::experimental::this_nd_item<1>().get_local_range(0))
# define ccl_gpu_block_idx_x (sycl::ext::oneapi::experimental::this_nd_item<1>().get_group(0))
# define ccl_gpu_grid_dim_x (sycl::ext::oneapi::experimental::this_nd_item<1>().get_group_range(0))
# define ccl_gpu_warp_size (sycl::ext::oneapi::experimental::this_sub_group().get_local_range()[0])
# define ccl_gpu_thread_mask(thread_warp) uint(0xFFFFFFFF >> (ccl_gpu_warp_size - thread_warp))
# define ccl_gpu_global_id_x() (sycl::ext::oneapi::experimental::this_nd_item<1>().get_global_id(0))
# define ccl_gpu_global_size_x() (sycl::ext::oneapi::experimental::this_nd_item<1>().get_global_range(0))
/* GPU warp synchronization */
#define ccl_gpu_syncthreads() sycl::ext::oneapi::experimental::this_nd_item<1>().barrier()
#define ccl_gpu_local_syncthreads() sycl::ext::oneapi::experimental::this_nd_item<1>().barrier(sycl::access::fence_space::local_space)
#ifdef __SYCL_DEVICE_ONLY__
#define ccl_gpu_ballot(predicate) (sycl::ext::oneapi::group_ballot(sycl::ext::oneapi::experimental::this_sub_group(), predicate).count())
# define ccl_gpu_syncthreads() sycl::ext::oneapi::experimental::this_nd_item<1>().barrier()
# define ccl_gpu_local_syncthreads() sycl::ext::oneapi::experimental::this_nd_item<1>().barrier(sycl::access::fence_space::local_space)
# ifdef __SYCL_DEVICE_ONLY__
# define ccl_gpu_ballot(predicate) (sycl::ext::oneapi::group_ballot(sycl::ext::oneapi::experimental::this_sub_group(), predicate).count())
# else
# define ccl_gpu_ballot(predicate) (predicate ? 1 : 0)
# endif
#else
#define ccl_gpu_ballot(predicate) (predicate ? 1 : 0)
# define ccl_gpu_thread_idx_x (kg->nd_item_local_id_0)
# define ccl_gpu_block_dim_x (kg->nd_item_local_range_0)
# define ccl_gpu_block_idx_x (kg->nd_item_group_id_0)
# define ccl_gpu_grid_dim_x (kg->nd_item_group_range_0)
# define ccl_gpu_warp_size (1)
# define ccl_gpu_thread_mask(thread_warp) uint(0xFFFFFFFF >> (ccl_gpu_warp_size - thread_warp))
# define ccl_gpu_global_id_x() (kg->nd_item_global_id_0)
# define ccl_gpu_global_size_x() (kg->nd_item_global_range_0)
# define ccl_gpu_syncthreads()
# define ccl_gpu_local_syncthreads()
# define ccl_gpu_ballot(predicate) (predicate ? 1 : 0)
#endif
/* Debug defines */

View File

@ -23,6 +23,15 @@ typedef struct KernelGlobalsGPU {
#undef KERNEL_DATA_ARRAY
IntegratorStateGPU *integrator_state;
const KernelData *__data;
#ifdef WITH_ONEAPI_SYCL_HOST_TASK
size_t nd_item_local_id_0;
size_t nd_item_local_range_0;
size_t nd_item_group_id_0;
size_t nd_item_group_range_0;
size_t nd_item_global_id_0;
size_t nd_item_global_range_0;
#endif
} KernelGlobalsGPU;
typedef ccl_global KernelGlobalsGPU *ccl_restrict KernelGlobals;

View File

@ -230,6 +230,12 @@ bool oneapi_enqueue_kernel(KernelContext *kernel_context,
/* NOTE(@nsirgien): As for now non-uniform work-groups don't work on most oneAPI devices,
* we extend work size to fit uniformity requirements. */
global_size = groups_count * local_size;
# ifdef WITH_ONEAPI_SYCL_HOST_TASK
/* Path array implementation is serial in case of SYCL Host Task execution. */
global_size = 1;
local_size = 1;
# endif
}
/* Let the compiler throw an error if there are any kernels missing in this implementation. */

View File

@ -113,14 +113,18 @@ static void oiio_load_pixels(const ImageMetaData &metadata,
if (depth <= 1) {
size_t scanlinesize = width * components * sizeof(StorageType);
in->read_image(FileFormat,
in->read_image(0,
0,
0,
components,
FileFormat,
(uchar *)readpixels + (height - 1) * scanlinesize,
AutoStride,
-scanlinesize,
AutoStride);
}
else {
in->read_image(FileFormat, (uchar *)readpixels);
in->read_image(0, 0, 0, components, FileFormat, (uchar *)readpixels);
}
if (components > 4) {

View File

@ -439,9 +439,12 @@ bool DenoiseImage::read_previous_pixels(const DenoiseImageLayer &layer,
{
/* Load pixels from neighboring frames, and copy them into device buffer
* with channels reshuffled. */
size_t num_pixels = (size_t)width * (size_t)height;
const size_t num_pixels = (size_t)width * (size_t)height;
const int num_channels = in_previous->spec().nchannels;
array<float> neighbor_pixels(num_pixels * num_channels);
if (!in_previous->read_image(TypeDesc::FLOAT, neighbor_pixels.data())) {
if (!in_previous->read_image(0, 0, 0, num_channels, TypeDesc::FLOAT, neighbor_pixels.data())) {
return false;
}
@ -491,7 +494,7 @@ bool DenoiseImage::load(const string &in_filepath, string &error)
/* Read all channels into buffer. Reading all channels at once is faster
* than individually due to interleaved EXR channel storage. */
if (!in->read_image(TypeDesc::FLOAT, pixels.data())) {
if (!in->read_image(0, 0, 0, num_channels, TypeDesc::FLOAT, pixels.data())) {
error = "Failed to read image: " + in_filepath;
return false;
}

View File

@ -401,8 +401,8 @@ static bool merge_pixels(const vector<MergeImage> &images,
* faster than individually due to interleaved EXR channel storage. */
array<float> pixels;
alloc_pixels(image.in->spec(), pixels);
if (!image.in->read_image(TypeDesc::FLOAT, pixels.data())) {
const int num_channels = image.in->spec().nchannels;
if (!image.in->read_image(0, 0, 0, num_channels, TypeDesc::FLOAT, pixels.data())) {
error = "Failed to read image: " + image.filepath;
return false;
}
@ -538,6 +538,7 @@ static void read_layer_samples(vector<MergeImage> &images,
/* Load the "Debug Sample Count" pass and add the samples to the layer's sample count. */
array<float> sample_count_buffer;
sample_count_buffer.resize(in_spec.width * in_spec.height);
image.in->read_image(0,
0,
layer.sample_pass_offset,

View File

@ -113,6 +113,9 @@ void Session::start()
void Session::cancel(bool quick)
{
/* Cancel any long running device operations (e.g. shader compilations). */
device->cancel();
/* Check if session thread is rendering. */
const bool rendering = is_session_thread_rendering();
@ -401,6 +404,16 @@ RenderWork Session::run_update_for_next_iteration()
path_trace_->load_kernels();
path_trace_->alloc_work_memory();
/* Wait for device to be ready (e.g. finish any background compilations). */
string device_status;
while (!device->is_ready(device_status)) {
progress.set_status(device_status);
if (progress.get_cancel()) {
break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(200));
}
progress.add_skip_time(update_timer, params.background);
}

View File

@ -646,7 +646,8 @@ bool TileManager::read_full_buffer_from_disk(const string_view filename,
return false;
}
if (!in->read_image(TypeDesc::FLOAT, buffers->buffer.data())) {
const int num_channels = in->spec().nchannels;
if (!in->read_image(0, 0, 0, num_channels, TypeDesc::FLOAT, buffers->buffer.data())) {
LOG(ERROR) << "Error reading pixels from the tile file " << in->geterror();
return false;
}

View File

@ -1223,13 +1223,12 @@ static void gwl_registry_entry_update_all(GWL_Display *display, const int interf
continue;
}
GWL_RegisteryUpdate_Params params = {
.name = reg->name,
.interface_slot = reg->interface_slot,
.version = reg->version,
GWL_RegisteryUpdate_Params params{};
params.name = reg->name;
params.interface_slot = reg->interface_slot;
params.version = reg->version;
params.user_data = reg->user_data;
.user_data = reg->user_data,
};
handler->update_fn(display, &params);
}
}
@ -4535,18 +4534,7 @@ static void output_handle_scale(void *data, struct wl_output * /*wl_output*/, co
CLOG_INFO(LOG, 2, "scale");
GWL_Output *output = static_cast<GWL_Output *>(data);
output->scale = factor;
GHOST_WindowManager *window_manager = output->system->getWindowManager();
if (window_manager) {
for (GHOST_IWindow *iwin : window_manager->getWindows()) {
GHOST_WindowWayland *win = static_cast<GHOST_WindowWayland *>(iwin);
const std::vector<GWL_Output *> &outputs = win->outputs();
if (std::find(outputs.begin(), outputs.end(), output) == outputs.cend()) {
continue;
}
win->outputs_changed_update_scale();
}
}
output->system->output_scale_update_maybe_leave(output, false);
}
static const struct wl_output_listener output_listener = {
@ -4736,11 +4724,24 @@ static void gwl_registry_wl_output_update(GWL_Display *display,
}
static void gwl_registry_wl_output_remove(GWL_Display *display,
void *user_data,
const bool /*on_exit*/)
const bool on_exit)
{
/* While windows & cursors hold references to outputs, there is no need to manually remove
* these references as the compositor will remove references via #wl_surface_listener.leave. */
* these references as the compositor will remove references via #wl_surface_listener.leave.
*
* WARNING: this is not the case for WLROOTS based compositors which have a (bug?)
* where surface leave events don't run. So `system->output_leave(..)` is needed
* until the issue is resolved in WLROOTS. */
GWL_Output *output = static_cast<GWL_Output *>(user_data);
if (!on_exit) {
/* Needed for WLROOTS, does nothing if surface leave callbacks have already run. */
output->system->output_scale_update_maybe_leave(output, true);
}
if (output->xdg_output) {
zxdg_output_v1_destroy(output->xdg_output);
}
wl_output_destroy(output->wl_output);
std::vector<GWL_Output *>::iterator iter = std::find(
display->outputs.begin(), display->outputs.end(), output);
@ -5176,11 +5177,10 @@ static void global_handle_add(void *data,
const GWL_RegistryEntry *registry_entry_prev = display->registry_entry;
/* The interface name that is ensured not to be freed. */
GWL_RegisteryAdd_Params params = {
.name = name,
.interface_slot = interface_slot,
.version = version,
};
GWL_RegisteryAdd_Params params{};
params.name = name;
params.interface_slot = interface_slot;
params.version = version;
handler->add_fn(display, &params);
@ -6762,6 +6762,49 @@ void GHOST_SystemWayland::window_surface_unref(const wl_surface *wl_surface)
#undef SURFACE_CLEAR_PTR
}
void GHOST_SystemWayland::output_scale_update_maybe_leave(GWL_Output *output, bool leave)
{
/* Update scale, optionally leaving the outputs beforehand. */
GHOST_WindowManager *window_manager = output->system->getWindowManager();
if (window_manager) {
for (GHOST_IWindow *iwin : window_manager->getWindows()) {
GHOST_WindowWayland *win = static_cast<GHOST_WindowWayland *>(iwin);
const std::vector<GWL_Output *> &outputs = win->outputs();
bool found = leave ? win->outputs_leave(output) :
!(std::find(outputs.begin(), outputs.end(), output) == outputs.cend());
if (found) {
win->outputs_changed_update_scale();
}
}
}
for (GWL_Seat *seat : display_->seats) {
bool found;
found = leave ? seat->pointer.outputs.erase(output) : seat->pointer.outputs.count(output);
if (found) {
if (seat->cursor.wl_surface_cursor != nullptr) {
update_cursor_scale(
seat->cursor, seat->system->wl_shm(), &seat->pointer, seat->cursor.wl_surface_cursor);
}
}
found = leave ? seat->tablet.outputs.erase(output) : seat->tablet.outputs.count(output);
if (found) {
for (struct zwp_tablet_tool_v2 *zwp_tablet_tool_v2 : seat->tablet_tools) {
GWL_TabletTool *tablet_tool = static_cast<GWL_TabletTool *>(
zwp_tablet_tool_v2_get_user_data(zwp_tablet_tool_v2));
if (tablet_tool->wl_surface_cursor != nullptr) {
update_cursor_scale(seat->cursor,
seat->system->wl_shm(),
&seat->pointer,
tablet_tool->wl_surface_cursor);
}
}
}
}
}
bool GHOST_SystemWayland::window_cursor_grab_set(const GHOST_TGrabCursorMode mode,
const GHOST_TGrabCursorMode mode_current,
int32_t init_grab_xy[2],

View File

@ -194,6 +194,8 @@ class GHOST_SystemWayland : public GHOST_System {
/** Set this seat to be active. */
void seat_active_set(const struct GWL_Seat *seat);
void output_scale_update_maybe_leave(GWL_Output *output, bool leave);
/** Clear all references to this surface to prevent accessing NULL pointers. */
void window_surface_unref(const wl_surface *wl_surface);

View File

@ -1361,9 +1361,6 @@ GHOST_TSuccess GHOST_WindowWayland::notify_size()
* Functionality only used for the WAYLAND implementation.
* \{ */
/**
* Return true when the windows scale or DPI changes.
*/
bool GHOST_WindowWayland::outputs_changed_update_scale()
{
#ifdef USE_EVENT_BACKGROUND_THREAD

View File

@ -156,6 +156,9 @@ class GHOST_WindowWayland : public GHOST_Window {
bool outputs_enter(GWL_Output *output);
bool outputs_leave(GWL_Output *output);
/**
* Return true when the windows scale or DPI changes.
*/
bool outputs_changed_update_scale();
#ifdef USE_EVENT_BACKGROUND_THREAD

View File

@ -20,6 +20,7 @@ set(SRC
./intern/mallocn.c
./intern/mallocn_guarded_impl.c
./intern/mallocn_lockfree_impl.c
./intern/memory_usage.cc
MEM_guardedalloc.h
./intern/mallocn_inline.h

View File

@ -53,6 +53,9 @@ class MemLeakPrinter {
void MEM_init_memleak_detection()
{
/* Calling this ensures that the memory usage counters outlive the memory leak detection. */
memory_usage_init();
/**
* This variable is constructed when this function is first called. This should happen as soon as
* possible when the program starts.

View File

@ -89,6 +89,14 @@ void aligned_free(void *ptr);
extern bool leak_detector_has_run;
extern char free_after_leak_detection_message[];
void memory_usage_init(void);
void memory_usage_block_alloc(size_t size);
void memory_usage_block_free(size_t size);
size_t memory_usage_block_num(void);
size_t memory_usage_current(void);
size_t memory_usage_peak(void);
void memory_usage_peak_reset(void);
/* Prototypes for counted allocator functions */
size_t MEM_lockfree_allocN_len(const void *vmemh) ATTR_WARN_UNUSED_RESULT;
void MEM_lockfree_freeN(void *vmemh);

View File

@ -30,8 +30,6 @@ typedef struct MemHeadAligned {
size_t len;
} MemHeadAligned;
static unsigned int totblock = 0;
static size_t mem_in_use = 0, peak_mem = 0;
static bool malloc_debug_memset = false;
static void (*error_callback)(const char *) = NULL;
@ -46,18 +44,6 @@ enum {
#define MEMHEAD_IS_ALIGNED(memhead) ((memhead)->len & (size_t)MEMHEAD_ALIGN_FLAG)
#define MEMHEAD_LEN(memhead) ((memhead)->len & ~((size_t)(MEMHEAD_ALIGN_FLAG)))
/* Uncomment this to have proper peak counter. */
#define USE_ATOMIC_MAX
MEM_INLINE void update_maximum(size_t *maximum_value, size_t value)
{
#ifdef USE_ATOMIC_MAX
atomic_fetch_and_update_max_z(maximum_value, value);
#else
*maximum_value = value > *maximum_value ? value : *maximum_value;
#endif
}
#ifdef __GNUC__
__attribute__((format(printf, 1, 2)))
#endif
@ -103,8 +89,7 @@ void MEM_lockfree_freeN(void *vmemh)
MemHead *memh = MEMHEAD_FROM_PTR(vmemh);
size_t len = MEMHEAD_LEN(memh);
atomic_sub_and_fetch_u(&totblock, 1);
atomic_sub_and_fetch_z(&mem_in_use, len);
memory_usage_block_free(len);
if (UNLIKELY(malloc_debug_memset && len)) {
memset(memh + 1, 255, len);
@ -224,16 +209,14 @@ void *MEM_lockfree_callocN(size_t len, const char *str)
if (LIKELY(memh)) {
memh->len = len;
atomic_add_and_fetch_u(&totblock, 1);
atomic_add_and_fetch_z(&mem_in_use, len);
update_maximum(&peak_mem, mem_in_use);
memory_usage_block_alloc(len);
return PTR_FROM_MEMHEAD(memh);
}
print_error("Calloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
SIZET_ARG(len),
str,
(uint)mem_in_use);
(uint)memory_usage_current());
return NULL;
}
@ -247,7 +230,7 @@ void *MEM_lockfree_calloc_arrayN(size_t len, size_t size, const char *str)
SIZET_ARG(len),
SIZET_ARG(size),
str,
(unsigned int)mem_in_use);
(unsigned int)memory_usage_current());
abort();
return NULL;
}
@ -269,16 +252,14 @@ void *MEM_lockfree_mallocN(size_t len, const char *str)
}
memh->len = len;
atomic_add_and_fetch_u(&totblock, 1);
atomic_add_and_fetch_z(&mem_in_use, len);
update_maximum(&peak_mem, mem_in_use);
memory_usage_block_alloc(len);
return PTR_FROM_MEMHEAD(memh);
}
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
SIZET_ARG(len),
str,
(uint)mem_in_use);
(uint)memory_usage_current());
return NULL;
}
@ -292,7 +273,7 @@ void *MEM_lockfree_malloc_arrayN(size_t len, size_t size, const char *str)
SIZET_ARG(len),
SIZET_ARG(size),
str,
(uint)mem_in_use);
(uint)memory_usage_current());
abort();
return NULL;
}
@ -340,16 +321,14 @@ void *MEM_lockfree_mallocN_aligned(size_t len, size_t alignment, const char *str
memh->len = len | (size_t)MEMHEAD_ALIGN_FLAG;
memh->alignment = (short)alignment;
atomic_add_and_fetch_u(&totblock, 1);
atomic_add_and_fetch_z(&mem_in_use, len);
update_maximum(&peak_mem, mem_in_use);
memory_usage_block_alloc(len);
return PTR_FROM_MEMHEAD(memh);
}
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
SIZET_ARG(len),
str,
(uint)mem_in_use);
(uint)memory_usage_current());
return NULL;
}
@ -369,8 +348,8 @@ void MEM_lockfree_callbackmemlist(void (*func)(void *))
void MEM_lockfree_printmemlist_stats(void)
{
printf("\ntotal memory len: %.3f MB\n", (double)mem_in_use / (double)(1024 * 1024));
printf("peak memory len: %.3f MB\n", (double)peak_mem / (double)(1024 * 1024));
printf("\ntotal memory len: %.3f MB\n", (double)memory_usage_current() / (double)(1024 * 1024));
printf("peak memory len: %.3f MB\n", (double)memory_usage_peak() / (double)(1024 * 1024));
printf(
"\nFor more detailed per-block statistics run Blender with memory debugging command line "
"argument.\n");
@ -398,23 +377,23 @@ void MEM_lockfree_set_memory_debug(void)
size_t MEM_lockfree_get_memory_in_use(void)
{
return mem_in_use;
return memory_usage_current();
}
uint MEM_lockfree_get_memory_blocks_in_use(void)
{
return totblock;
return (uint)memory_usage_block_num();
}
/* dummy */
void MEM_lockfree_reset_peak_memory(void)
{
peak_mem = mem_in_use;
memory_usage_peak_reset();
}
size_t MEM_lockfree_get_peak_memory(void)
{
return peak_mem;
return memory_usage_peak();
}
#ifndef NDEBUG

View File

@ -0,0 +1,258 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <algorithm>
#include <atomic>
#include <cassert>
#include <iostream>
#include <mutex>
#include <vector>
#include "MEM_guardedalloc.h"
#include "mallocn_intern.h"
#include "../../source/blender/blenlib/BLI_strict_flags.h"
namespace {
/**
* This is stored per thread. Align to cache line size to avoid false sharing.
*/
struct alignas(64) Local {
/** Helps to find bugs during program shutdown. */
bool destructed = false;
/**
* This is the first created #Local and on the main thread. When the main local data is
* destructed, we know that Blender is quitting and that we can't rely on thread locals being
* available still.
*/
bool is_main = false;
/**
* Number of bytes. This can be negative when e.g. one thread allocates a lot of memory, and
* another frees it. It has to be an atomic, because it may be accessed by other threads when the
* total memory usage is counted.
*/
std::atomic<int64_t> mem_in_use = 0;
/**
* Number of allocated blocks. Can be negative and is atomic for the same reason as above.
*/
std::atomic<int64_t> blocks_num = 0;
/**
* Amount of memory used when the peak was last updated. This is used so that we don't have to
* update the peak memory usage after every memory allocation. Instead it's only updated when "a
* lot" of new memory has been allocated. This makes the peak memory usage a little bit less
* accurate, but it's still good enough for practical purposes.
*/
std::atomic<int64_t> mem_in_use_during_peak_update = 0;
Local();
~Local();
};
/**
* This is a singleton that stores global data.
*/
struct Global {
/**
* Mutex that protects the vector below.
*/
std::mutex locals_mutex;
/**
* All currently constructed #Local. This must only be accessed when the mutex above is
* locked. Individual threads insert and remove themselves here.
*/
std::vector<Local *> locals;
/**
* Number of bytes that are not tracked by #Local. This is necessary because when a thread exits,
* its #Local data is freed. The memory counts stored there would be lost. The memory counts may
* be non-zero during thread destruction, if the thread did an unequal amount of allocations and
* frees (which is perfectly valid behavior as long as other threads have the responsibility to
* free any memory that the thread allocated).
*
* To solve this, the memory counts are added to these global counters when the thread
* exists. The global counters are also used when the entire process starts to exit, because the
* #Local data of the main thread is already destructed when the leak detection happens (during
* destruction of static variables which happens after destruction of threadlocals).
*/
std::atomic<int64_t> mem_in_use_outside_locals = 0;
/**
* Number of blocks that are not tracked by #Local, for the same reason as above.
*/
std::atomic<int64_t> blocks_num_outside_locals = 0;
/**
* Peak memory usage since the last reset.
*/
std::atomic<size_t> peak = 0;
};
} // namespace
/**
* This is true for most of the lifetime of the program. Only when it starts exiting this becomes
* false indicating that global counters should be used for correctness.
*/
static std::atomic<bool> use_local_counters = true;
/**
* When a thread allocated this amount of memory, the peak memory usage is updated. An alternative
* would be to update the global peak memory after every allocation, but that would cause much more
* overhead with little benefit.
*/
static constexpr int64_t peak_update_threshold = 1024 * 1024;
static Global &get_global()
{
static Global global;
return global;
}
static Local &get_local_data()
{
static thread_local Local local;
assert(!local.destructed);
return local;
}
Local::Local()
{
Global &global = get_global();
std::lock_guard lock{global.locals_mutex};
if (global.locals.empty()) {
/* This is the first thread creating #Local, it is therefore the main thread because it's
* created through #memory_usage_init. */
this->is_main = true;
}
/* Register self in the global list. */
global.locals.push_back(this);
}
Local::~Local()
{
Global &global = get_global();
std::lock_guard lock{global.locals_mutex};
/* Unregister self from the global list. */
global.locals.erase(std::find(global.locals.begin(), global.locals.end(), this));
/* Don't forget the memory counts stored locally. */
global.blocks_num_outside_locals.fetch_add(this->blocks_num, std::memory_order_relaxed);
global.mem_in_use_outside_locals.fetch_add(this->mem_in_use, std::memory_order_relaxed);
if (this->is_main) {
/* The main thread started shutting down. Use global counters from now on to avoid accessing
* threadlocals after they have been destructed. */
use_local_counters.store(false, std::memory_order_relaxed);
}
/* Helps to detect when thread locals are accidentally accessed after destruction. */
this->destructed = true;
}
/** Check if the current memory usage is higher than the peak and update it if yes. */
static void update_global_peak()
{
Global &global = get_global();
/* Update peak. */
global.peak = std::max<size_t>(global.peak, memory_usage_current());
std::lock_guard lock{global.locals_mutex};
for (Local *local : global.locals) {
assert(!local->destructed);
/* Updating this makes sure that the peak is not updated too often, which would degrade
* performance. */
local->mem_in_use_during_peak_update = local->mem_in_use.load(std::memory_order_relaxed);
}
}
void memory_usage_init()
{
/* Makes sure that the static and threadlocal variables on the main thread are initialized. */
get_local_data();
}
void memory_usage_block_alloc(const size_t size)
{
if (LIKELY(use_local_counters.load(std::memory_order_relaxed))) {
Local &local = get_local_data();
/* Increase local memory counts. This does not cause thread synchronization in the majority of
* cases, because each thread has these counters on a separate cache line. It may only cause
* synchronization if another thread is computing the total current memory usage at the same
* time, which is very rare compared to doing allocations. */
local.blocks_num.fetch_add(1, std::memory_order_relaxed);
local.mem_in_use.fetch_add(int64_t(size), std::memory_order_relaxed);
/* If a certain amount of new memory has been allocated, update the peak. */
if (local.mem_in_use - local.mem_in_use_during_peak_update > peak_update_threshold) {
update_global_peak();
}
}
else {
Global &global = get_global();
/* Increase global memory counts. */
global.blocks_num_outside_locals.fetch_add(1, std::memory_order_relaxed);
global.mem_in_use_outside_locals.fetch_add(int64_t(size), std::memory_order_relaxed);
}
}
void memory_usage_block_free(const size_t size)
{
if (LIKELY(use_local_counters)) {
/* Decrease local memory counts. See comment in #memory_usage_block_alloc for details regarding
* thread synchronization. */
Local &local = get_local_data();
local.mem_in_use.fetch_sub(int64_t(size), std::memory_order_relaxed);
local.blocks_num.fetch_sub(1, std::memory_order_relaxed);
}
else {
Global &global = get_global();
/* Decrease global memory counts. */
global.blocks_num_outside_locals.fetch_sub(1, std::memory_order_relaxed);
global.mem_in_use_outside_locals.fetch_sub(int64_t(size), std::memory_order_relaxed);
}
}
size_t memory_usage_block_num()
{
Global &global = get_global();
std::lock_guard lock{global.locals_mutex};
/* Count the number of active blocks. */
int64_t blocks_num = global.blocks_num_outside_locals;
for (Local *local : global.locals) {
blocks_num += local->blocks_num;
}
return size_t(blocks_num);
}
size_t memory_usage_current()
{
Global &global = get_global();
std::lock_guard lock{global.locals_mutex};
/* Count the memory that's currently in use. */
int64_t mem_in_use = global.mem_in_use_outside_locals;
for (Local *local : global.locals) {
mem_in_use += local->mem_in_use;
}
return size_t(mem_in_use);
}
/**
* Get the approximate peak memory usage since the last call to #memory_usage_peak_reset.
* This is approximate, because the peak usage is not updated after every allocation (see
* #peak_update_threshold).
*
* In the worst case, the peak memory usage is underestimated by
* `peak_update_threshold * #threads`. After large allocations (larger than the threshold), the
* peak usage is always updated so those allocations will always be taken into account.
*/
size_t memory_usage_peak()
{
update_global_peak();
Global &global = get_global();
return global.peak;
}
void memory_usage_peak_reset()
{
Global &global = get_global();
global.peak = memory_usage_current();
}

View File

@ -6,6 +6,7 @@
*/
#include <boost/locale.hpp>
#include <iostream>
#include <stdio.h>
#include "boost_locale_wrapper.h"

View File

@ -318,7 +318,9 @@ class NODE_MT_node(Menu):
layout.separator()
layout.operator("node.clipboard_copy", text="Copy")
layout.operator("node.clipboard_paste", text="Paste")
row = layout.row()
row.operator_context = 'EXEC_DEFAULT'
row.operator("node.clipboard_paste", text="Paste")
layout.operator("node.duplicate_move")
layout.operator("node.duplicate_move_linked")
layout.operator("node.delete")

View File

@ -25,7 +25,7 @@ extern "C" {
/* Blender file format version. */
#define BLENDER_FILE_VERSION BLENDER_VERSION
#define BLENDER_FILE_SUBVERSION 6
#define BLENDER_FILE_SUBVERSION 7
/* Minimum Blender version that supports reading file written with the current
* version. Older Blender versions will test this and show a warning if the file

View File

@ -287,11 +287,6 @@ class CurvesGeometry : public ::CurvesGeometry {
Span<float2> surface_uv_coords() const;
MutableSpan<float2> surface_uv_coords_for_write();
VArray<float> selection_point_float() const;
MutableSpan<float> selection_point_float_for_write();
VArray<float> selection_curve_float() const;
MutableSpan<float> selection_curve_float_for_write();
/**
* Calculate the largest and smallest position values, only including control points
* (rather than evaluated points). The existing values of `min` and `max` are taken into account.

View File

@ -38,8 +38,6 @@ static const std::string ATTR_HANDLE_POSITION_RIGHT = "handle_right";
static const std::string ATTR_NURBS_ORDER = "nurbs_order";
static const std::string ATTR_NURBS_WEIGHT = "nurbs_weight";
static const std::string ATTR_NURBS_KNOTS_MODE = "knots_mode";
static const std::string ATTR_SELECTION_POINT_FLOAT = ".selection_point_float";
static const std::string ATTR_SELECTION_CURVE_FLOAT = ".selection_curve_float";
static const std::string ATTR_SURFACE_UV_COORDINATE = "surface_uv_coordinate";
/* -------------------------------------------------------------------- */
@ -433,26 +431,6 @@ MutableSpan<float2> CurvesGeometry::surface_uv_coords_for_write()
return get_mutable_attribute<float2>(*this, ATTR_DOMAIN_CURVE, ATTR_SURFACE_UV_COORDINATE);
}
VArray<float> CurvesGeometry::selection_point_float() const
{
return get_varray_attribute<float>(*this, ATTR_DOMAIN_POINT, ATTR_SELECTION_POINT_FLOAT, 1.0f);
}
MutableSpan<float> CurvesGeometry::selection_point_float_for_write()
{
return get_mutable_attribute<float>(*this, ATTR_DOMAIN_POINT, ATTR_SELECTION_POINT_FLOAT, 1.0f);
}
VArray<float> CurvesGeometry::selection_curve_float() const
{
return get_varray_attribute<float>(*this, ATTR_DOMAIN_CURVE, ATTR_SELECTION_CURVE_FLOAT, 1.0f);
}
MutableSpan<float> CurvesGeometry::selection_curve_float_for_write()
{
return get_mutable_attribute<float>(*this, ATTR_DOMAIN_CURVE, ATTR_SELECTION_CURVE_FLOAT, 1.0f);
}
/** \} */
/* -------------------------------------------------------------------- */

View File

@ -987,9 +987,11 @@ class VArrayImpl_For_VertexWeights final : public VMutableArrayImpl<float> {
void set_all(Span<float> src) override
{
for (const int64_t index : src.index_range()) {
this->set(index, src[index]);
}
threading::parallel_for(src.index_range(), 4096, [&](const IndexRange range) {
for (const int64_t i : range) {
this->set(i, src[i]);
}
});
}
void materialize(IndexMask mask, MutableSpan<float> r_span) const override
@ -997,14 +999,16 @@ class VArrayImpl_For_VertexWeights final : public VMutableArrayImpl<float> {
if (dverts_ == nullptr) {
return r_span.fill_indices(mask, 0.0f);
}
for (const int64_t index : mask) {
if (const MDeformWeight *weight = this->find_weight_at_index(index)) {
r_span[index] = weight->weight;
threading::parallel_for(mask.index_range(), 4096, [&](const IndexRange range) {
for (const int64_t i : mask.slice(range)) {
if (const MDeformWeight *weight = this->find_weight_at_index(i)) {
r_span[i] = weight->weight;
}
else {
r_span[i] = 0.0f;
}
}
else {
r_span[index] = 0.0f;
}
}
});
}
void materialize_to_uninitialized(IndexMask mask, MutableSpan<float> r_span) const override
@ -1107,15 +1111,18 @@ class VertexGroupsAttributeProvider final : public DynamicAttributesProvider {
return true;
}
for (MDeformVert &dvert : mesh->deform_verts_for_write()) {
MDeformWeight *weight = BKE_defvert_find_index(&dvert, index);
BKE_defvert_remove_group(&dvert, weight);
for (MDeformWeight &weight : MutableSpan(dvert.dw, dvert.totweight)) {
if (weight.def_nr > index) {
weight.def_nr--;
MutableSpan<MDeformVert> dverts = mesh->deform_verts_for_write();
threading::parallel_for(dverts.index_range(), 1024, [&](IndexRange range) {
for (MDeformVert &dvert : dverts.slice(range)) {
MDeformWeight *weight = BKE_defvert_find_index(&dvert, index);
BKE_defvert_remove_group(&dvert, weight);
for (MDeformWeight &weight : MutableSpan(dvert.dw, dvert.totweight)) {
if (weight.def_nr > index) {
weight.def_nr--;
}
}
}
}
});
return true;
}

View File

@ -2891,6 +2891,7 @@ void BKE_object_obdata_size_init(struct Object *ob, const float size)
case OB_LAMP: {
Light *lamp = (Light *)ob->data;
lamp->dist *= size;
lamp->radius *= size;
lamp->area_size *= size;
lamp->area_sizey *= size;
lamp->area_sizez *= size;

View File

@ -112,4 +112,6 @@ inline void gather(const VArray<T> &src,
});
}
void invert_booleans(MutableSpan<bool> span);
} // namespace blender::array_utils

View File

@ -33,4 +33,13 @@ void gather(const GSpan src, const IndexMask indices, GMutableSpan dst, const in
gather(GVArray::ForSpan(src), indices, dst, grain_size);
}
void invert_booleans(MutableSpan<bool> span)
{
threading::parallel_for(span.index_range(), 4096, [&](IndexRange range) {
for (const int i : range) {
span[i] = !span[i];
}
});
}
} // namespace blender::array_utils

View File

@ -1782,12 +1782,6 @@ void blo_do_versions_280(FileData *fd, Library *UNUSED(lib), Main *bmain)
}
if (!MAIN_VERSION_ATLEAST(bmain, 280, 1)) {
if (!DNA_struct_elem_find(fd->filesdna, "Lamp", "float", "bleedexp")) {
for (Light *la = bmain->lights.first; la; la = la->id.next) {
la->bleedexp = 2.5f;
}
}
if (!DNA_struct_elem_find(fd->filesdna, "GPUDOFSettings", "float", "ratio")) {
for (Camera *ca = bmain->cameras.first; ca; ca = ca->id.next) {
ca->gpu_dof.ratio = 1.0f;
@ -1820,7 +1814,6 @@ void blo_do_versions_280(FileData *fd, Library *UNUSED(lib), Main *bmain)
for (Light *la = bmain->lights.first; la; la = la->id.next) {
la->contact_dist = 0.2f;
la->contact_bias = 0.03f;
la->contact_spread = 0.2f;
la->contact_thickness = 0.2f;
}
}

View File

@ -28,6 +28,7 @@
#include "DNA_curves_types.h"
#include "DNA_genfile.h"
#include "DNA_gpencil_modifier_types.h"
#include "DNA_light_types.h"
#include "DNA_lineart_types.h"
#include "DNA_listBase.h"
#include "DNA_mask_types.h"
@ -50,6 +51,7 @@
#include "BKE_collection.h"
#include "BKE_colortools.h"
#include "BKE_curve.h"
#include "BKE_curves.hh"
#include "BKE_data_transfer.h"
#include "BKE_deform.h"
#include "BKE_fcurve.h"
@ -3835,6 +3837,13 @@ void blo_do_versions_300(FileData *fd, Library * /*lib*/, Main *bmain)
}
}
}
if (!MAIN_VERSION_ATLEAST(bmain, 305, 7)) {
LISTBASE_FOREACH (Light *, light, &bmain->lights) {
light->radius = light->area_size;
}
}
/**
* Versioning code until next subversion bump goes here.
*
@ -3850,5 +3859,9 @@ void blo_do_versions_300(FileData *fd, Library * /*lib*/, Main *bmain)
LISTBASE_FOREACH (Curves *, curves_id, &bmain->hair_curves) {
curves_id->flag &= ~CV_SCULPT_SELECTION_ENABLED;
}
LISTBASE_FOREACH (Curves *, curves_id, &bmain->hair_curves) {
BKE_id_attribute_rename(&curves_id->id, ".selection_point_float", ".selection", nullptr);
BKE_id_attribute_rename(&curves_id->id, ".selection_curve_float", ".selection", nullptr);
}
}
}

View File

@ -104,7 +104,13 @@ void OutputFileNode::convert_to_operations(NodeConverter &converter,
char path[FILE_MAX];
/* combine file path for the input */
BLI_path_join(path, FILE_MAX, storage->base_path, sockdata->path);
if (sockdata->path[0]) {
BLI_path_join(path, FILE_MAX, storage->base_path, sockdata->path);
}
else {
BLI_strncpy(path, storage->base_path, FILE_MAX);
BLI_path_slash_ensure(path, FILE_MAX);
}
NodeOperation *output_operation = nullptr;

View File

@ -45,7 +45,7 @@ static void light_shape_parameters_set(EEVEE_Light *evli, const Light *la, const
evli->sizey = scale[1] / scale[2];
evli->spotsize = cosf(la->spotsize * 0.5f);
evli->spotblend = (1.0f - evli->spotsize) * la->spotblend;
evli->radius = max_ff(0.001f, la->area_size);
evli->radius = max_ff(0.001f, la->radius);
}
else if (la->type == LA_AREA) {
evli->sizex = max_ff(0.003f, la->area_size * scale[0] * 0.5f);
@ -62,7 +62,7 @@ static void light_shape_parameters_set(EEVEE_Light *evli, const Light *la, const
evli->radius = max_ff(0.001f, tanf(min_ff(la->sun_angle, DEG2RADF(179.9f)) / 2.0f));
}
else {
evli->radius = max_ff(0.001f, la->area_size);
evli->radius = max_ff(0.001f, la->radius);
}
}

View File

@ -178,7 +178,7 @@ void Light::shape_parameters_set(const ::Light *la, const float scale[3])
_area_size_x = tanf(min_ff(la->sun_angle, DEG2RADF(179.9f)) / 2.0f);
}
else {
_area_size_x = la->area_size;
_area_size_x = la->radius;
}
_area_size_y = _area_size_x = max_ff(0.001f, _area_size_x);
radius_squared = square_f(_area_size_x);

View File

@ -637,7 +637,7 @@ void OVERLAY_light_cache_populate(OVERLAY_Data *vedata, Object *ob)
DRW_buffer_add_entry(cb->groundline, instdata.pos);
if (la->type == LA_LOCAL) {
instdata.area_size_x = instdata.area_size_y = la->area_size;
instdata.area_size_x = instdata.area_size_y = la->radius;
DRW_buffer_add_entry(cb->light_point, color, &instdata);
}
else if (la->type == LA_SUN) {
@ -661,7 +661,7 @@ void OVERLAY_light_cache_populate(OVERLAY_Data *vedata, Object *ob)
instdata.spot_blend = sqrtf((-a - c * a) / (c - c * a));
instdata.spot_cosine = a;
/* HACK: We pack the area size in alpha color. This is decoded by the shader. */
color[3] = -max_ff(la->area_size, FLT_MIN);
color[3] = -max_ff(la->radius, FLT_MIN);
DRW_buffer_add_entry(cb->light_spot, color, &instdata);
if ((la->mode & LA_SHOW_CONE) && !DRW_state_is_select()) {

View File

@ -10,6 +10,7 @@
#include "draw_cache_impl.h"
#include "overlay_private.hh"
#include "BKE_attribute.hh"
#include "BKE_curves.hh"
void OVERLAY_sculpt_curves_cache_init(OVERLAY_Data *vedata)
@ -31,18 +32,11 @@ void OVERLAY_sculpt_curves_cache_init(OVERLAY_Data *vedata)
static bool everything_selected(const Curves &curves_id)
{
const blender::bke::CurvesGeometry &curves = blender::bke::CurvesGeometry::wrap(
curves_id.geometry);
blender::VArray<float> selection;
switch (curves_id.selection_domain) {
case ATTR_DOMAIN_POINT:
selection = curves.selection_point_float();
break;
case ATTR_DOMAIN_CURVE:
selection = curves.selection_curve_float();
break;
}
return selection.is_single() && selection.get_internal_single() == 1.0f;
using namespace blender;
const bke::CurvesGeometry &curves = bke::CurvesGeometry::wrap(curves_id.geometry);
const VArray<bool> selection = curves.attributes().lookup_or_default<bool>(
".selection", ATTR_DOMAIN_POINT, true);
return selection.is_single() && selection.get_internal_single();
}
void OVERLAY_sculpt_curves_cache_populate(OVERLAY_Data *vedata, Object *object)
@ -56,12 +50,9 @@ void OVERLAY_sculpt_curves_cache_populate(OVERLAY_Data *vedata, Object *object)
}
/* Retrieve the location of the texture. */
const char *name = curves->selection_domain == ATTR_DOMAIN_POINT ? ".selection_point_float" :
".selection_curve_float";
bool is_point_domain;
GPUVertBuf **texture = DRW_curves_texture_for_evaluated_attribute(
curves, name, &is_point_domain);
curves, ".selection", &is_point_domain);
if (texture == nullptr) {
return;
}

View File

@ -11,6 +11,7 @@
#include "MEM_guardedalloc.h"
#include "BLI_devirtualize_parameters.hh"
#include "BLI_listbase.h"
#include "BLI_math_base.h"
#include "BLI_math_vec_types.hh"
@ -334,17 +335,16 @@ static void curves_batch_cache_ensure_edit_points_data(const Curves &curves_id,
GPU_vertbuf_init_with_format(cache.edit_points_data, &format_data);
GPU_vertbuf_data_alloc(cache.edit_points_data, curves.points_num());
VArray<float> selection;
const VArray<bool> selection = curves.attributes().lookup_or_default<bool>(
".selection", eAttrDomain(curves_id.selection_domain), true);
switch (curves_id.selection_domain) {
case ATTR_DOMAIN_POINT:
selection = curves.selection_point_float();
for (const int point_i : selection.index_range()) {
const float point_selection = (selection[point_i] > 0.0f) ? 1.0f : 0.0f;
GPU_vertbuf_attr_set(cache.edit_points_data, color, point_i, &point_selection);
}
break;
case ATTR_DOMAIN_CURVE:
selection = curves.selection_curve_float();
for (const int curve_i : curves.curves_range()) {
const float curve_selection = (selection[curve_i] > 0.0f) ? 1.0f : 0.0f;
const IndexRange points = curves.points_for_curve(curve_i);

View File

@ -22,6 +22,7 @@ set(INC
set(SRC
intern/curves_add.cc
intern/curves_ops.cc
intern/curves_selection.cc
)
set(LIB

View File

@ -6,7 +6,9 @@
#include <atomic>
#include "BLI_array_utils.hh"
#include "BLI_devirtualize_parameters.hh"
#include "BLI_index_mask_ops.hh"
#include "BLI_utildefines.h"
#include "BLI_vector_set.hh"
@ -748,7 +750,6 @@ static int curves_set_selection_domain_exec(bContext *C, wmOperator *op)
continue;
}
const eAttrDomain old_domain = eAttrDomain(curves_id->selection_domain);
curves_id->selection_domain = domain;
CurvesGeometry &curves = CurvesGeometry::wrap(curves_id->geometry);
@ -756,18 +757,21 @@ static int curves_set_selection_domain_exec(bContext *C, wmOperator *op)
if (curves.points_num() == 0) {
continue;
}
if (old_domain == ATTR_DOMAIN_POINT && domain == ATTR_DOMAIN_CURVE) {
VArray<float> curve_selection = curves.adapt_domain(
curves.selection_point_float(), ATTR_DOMAIN_POINT, ATTR_DOMAIN_CURVE);
curve_selection.materialize(curves.selection_curve_float_for_write());
attributes.remove(".selection_point_float");
const GVArray src = attributes.lookup(".selection", domain);
if (src.is_empty()) {
continue;
}
else if (old_domain == ATTR_DOMAIN_CURVE && domain == ATTR_DOMAIN_POINT) {
VArray<float> point_selection = curves.adapt_domain(
curves.selection_curve_float(), ATTR_DOMAIN_CURVE, ATTR_DOMAIN_POINT);
point_selection.materialize(curves.selection_point_float_for_write());
attributes.remove(".selection_curve_float");
const CPPType &type = src.type();
void *dst = MEM_malloc_arrayN(attributes.domain_size(domain), type.size(), __func__);
src.materialize(dst);
attributes.remove(".selection");
if (!attributes.add(".selection",
domain,
bke::cpp_type_to_custom_data_type(type),
bke::AttributeInitMoveArray(dst))) {
MEM_freeN(dst);
}
/* Use #ID_RECALC_GEOMETRY instead of #ID_RECALC_SELECT because it is handled as a generic
@ -801,46 +805,54 @@ static void CURVES_OT_set_selection_domain(wmOperatorType *ot)
RNA_def_property_flag(prop, (PropertyFlag)(PROP_HIDDEN | PROP_SKIP_SAVE));
}
static bool varray_contains_nonzero(const VArray<float> &data)
static bool contains(const VArray<bool> &varray, const bool value)
{
bool contains_nonzero = false;
devirtualize_varray(data, [&](const auto array) {
for (const int i : data.index_range()) {
if (array[i] != 0.0f) {
contains_nonzero = true;
break;
}
}
});
return contains_nonzero;
const CommonVArrayInfo info = varray.common_info();
if (info.type == CommonVArrayInfo::Type::Single) {
return *static_cast<const bool *>(info.data) == value;
}
if (info.type == CommonVArrayInfo::Type::Span) {
const Span<bool> span(static_cast<const bool *>(info.data), varray.size());
return threading::parallel_reduce(
span.index_range(),
4096,
false,
[&](const IndexRange range, const bool init) {
return init || span.slice(range).contains(value);
},
[&](const bool a, const bool b) { return a || b; });
}
return threading::parallel_reduce(
varray.index_range(),
2048,
false,
[&](const IndexRange range, const bool init) {
if (init) {
return init;
}
/* Alternatively, this could use #materialize to retrieve many values at once. */
for (const int64_t i : range) {
if (varray[i] == value) {
return true;
}
}
return false;
},
[&](const bool a, const bool b) { return a || b; });
}
bool has_anything_selected(const Curves &curves_id)
{
const CurvesGeometry &curves = CurvesGeometry::wrap(curves_id.geometry);
switch (curves_id.selection_domain) {
case ATTR_DOMAIN_POINT:
return varray_contains_nonzero(curves.selection_point_float());
case ATTR_DOMAIN_CURVE:
return varray_contains_nonzero(curves.selection_curve_float());
}
BLI_assert_unreachable();
return false;
const VArray<bool> selection = curves.attributes().lookup<bool>(".selection");
return !selection || contains(selection, true);
}
static bool any_point_selected(const CurvesGeometry &curves)
static bool has_anything_selected(const Span<Curves *> curves_ids)
{
return varray_contains_nonzero(curves.selection_point_float());
}
static bool any_point_selected(const Span<Curves *> curves_ids)
{
for (const Curves *curves_id : curves_ids) {
if (any_point_selected(CurvesGeometry::wrap(curves_id->geometry))) {
return true;
}
}
return false;
return std::any_of(curves_ids.begin(), curves_ids.end(), [](const Curves *curves_id) {
return has_anything_selected(*curves_id);
});
}
namespace select_all {
@ -854,6 +866,16 @@ static void invert_selection(MutableSpan<float> selection)
});
}
static void invert_selection(GMutableSpan selection)
{
if (selection.type().is<bool>()) {
array_utils::invert_booleans(selection.typed<bool>());
}
else if (selection.type().is<float>()) {
invert_selection(selection.typed<float>());
}
}
static int select_all_exec(bContext *C, wmOperator *op)
{
int action = RNA_enum_get(op->ptr, "action");
@ -861,27 +883,34 @@ static int select_all_exec(bContext *C, wmOperator *op)
VectorSet<Curves *> unique_curves = get_unique_editable_curves(*C);
if (action == SEL_TOGGLE) {
action = any_point_selected(unique_curves) ? SEL_DESELECT : SEL_SELECT;
action = has_anything_selected(unique_curves) ? SEL_DESELECT : SEL_SELECT;
}
for (Curves *curves_id : unique_curves) {
CurvesGeometry &curves = CurvesGeometry::wrap(curves_id->geometry);
bke::MutableAttributeAccessor attributes = curves.attributes_for_write();
if (action == SEL_SELECT) {
/* As an optimization, just remove the selection attributes when everything is selected. */
bke::MutableAttributeAccessor attributes = curves.attributes_for_write();
attributes.remove(".selection_point_float");
attributes.remove(".selection_curve_float");
attributes.remove(".selection");
}
else if (!attributes.contains(".selection")) {
BLI_assert(ELEM(action, SEL_INVERT, SEL_DESELECT));
/* If the attribute doesn't exist and it's either deleted or inverted, create
* it with nothing selected, since that means everything was selected before. */
attributes.add(".selection",
eAttrDomain(curves_id->selection_domain),
CD_PROP_BOOL,
bke::AttributeInitDefaultValue());
}
else {
MutableSpan<float> selection = curves_id->selection_domain == ATTR_DOMAIN_POINT ?
curves.selection_point_float_for_write() :
curves.selection_curve_float_for_write();
bke::GSpanAttributeWriter selection = attributes.lookup_for_write_span(".selection");
if (action == SEL_DESELECT) {
selection.fill(0.0f);
fill_selection_false(selection.span);
}
else if (action == SEL_INVERT) {
invert_selection(selection);
invert_selection(selection.span);
}
selection.finish();
}
/* Use #ID_RECALC_GEOMETRY instead of #ID_RECALC_SELECT because it is handled as a generic

View File

@ -0,0 +1,117 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup edcurves
*/
#include "BLI_index_mask_ops.hh"
#include "BKE_attribute.hh"
#include "BKE_curves.hh"
#include "ED_curves.h"
#include "ED_object.h"
namespace blender::ed::curves {
static IndexMask retrieve_selected_curves(const bke::CurvesGeometry &curves,
Vector<int64_t> &r_indices)
{
const IndexRange curves_range = curves.curves_range();
const bke::AttributeAccessor attributes = curves.attributes();
/* Interpolate from points to curves manually as a performance improvement, since we are only
* interested in whether any point in each curve is selected. Retrieve meta data since
* #lookup_or_default from the attribute API doesn't give the domain of the attribute. */
std::optional<bke::AttributeMetaData> meta_data = attributes.lookup_meta_data(".selection");
if (meta_data && meta_data->domain == ATTR_DOMAIN_POINT) {
/* Avoid the interpolation from interpolating the attribute to the
* curve domain by retrieving the point domain values directly. */
const VArray<bool> selection = attributes.lookup_or_default<bool>(
".selection", ATTR_DOMAIN_POINT, true);
if (selection.is_single()) {
return selection.get_internal_single() ? IndexMask(curves_range) : IndexMask();
}
return index_mask_ops::find_indices_based_on_predicate(
curves_range, 512, r_indices, [&](const int64_t curve_i) {
const IndexRange points = curves.points_for_curve(curve_i);
/* The curve is selected if any of its points are selected. */
Array<bool, 32> point_selection(points.size());
selection.materialize_compressed(points, point_selection);
return point_selection.as_span().contains(true);
});
}
const VArray<bool> selection = attributes.lookup_or_default<bool>(
".selection", ATTR_DOMAIN_CURVE, true);
return index_mask_ops::find_indices_from_virtual_array(curves_range, selection, 2048, r_indices);
}
IndexMask retrieve_selected_curves(const Curves &curves_id, Vector<int64_t> &r_indices)
{
const bke::CurvesGeometry &curves = bke::CurvesGeometry::wrap(curves_id.geometry);
return retrieve_selected_curves(curves, r_indices);
}
static IndexMask retrieve_selected_points(const bke::CurvesGeometry &curves,
Vector<int64_t> &r_indices)
{
return index_mask_ops::find_indices_from_virtual_array(
curves.points_range(),
curves.attributes().lookup_or_default<bool>(".selection", ATTR_DOMAIN_POINT, true),
2048,
r_indices);
}
IndexMask retrieve_selected_points(const Curves &curves_id, Vector<int64_t> &r_indices)
{
const bke::CurvesGeometry &curves = bke::CurvesGeometry::wrap(curves_id.geometry);
return retrieve_selected_points(curves, r_indices);
}
void ensure_selection_attribute(Curves &curves_id, const eCustomDataType create_type)
{
bke::CurvesGeometry &curves = bke::CurvesGeometry::wrap(curves_id.geometry);
bke::MutableAttributeAccessor attributes = curves.attributes_for_write();
if (attributes.contains(".selection")) {
return;
}
const eAttrDomain domain = eAttrDomain(curves_id.selection_domain);
const int domain_size = attributes.domain_size(domain);
switch (create_type) {
case CD_PROP_BOOL:
attributes.add(".selection",
domain,
CD_PROP_BOOL,
bke::AttributeInitVArray(VArray<bool>::ForSingle(true, domain_size)));
break;
case CD_PROP_FLOAT:
attributes.add(".selection",
domain,
CD_PROP_FLOAT,
bke::AttributeInitVArray(VArray<float>::ForSingle(1.0f, domain_size)));
break;
default:
BLI_assert_unreachable();
}
}
void fill_selection_false(GMutableSpan selection)
{
if (selection.type().is<bool>()) {
selection.typed<bool>().fill(false);
}
else if (selection.type().is<float>()) {
selection.typed<float>().fill(0.0f);
}
}
void fill_selection_true(GMutableSpan selection)
{
if (selection.type().is<bool>()) {
selection.typed<bool>().fill(true);
}
else if (selection.type().is<float>()) {
selection.typed<float>().fill(1.0f);
}
}
} // namespace blender::ed::curves

View File

@ -20,20 +20,69 @@ void ED_operatortypes_curves(void);
#ifdef __cplusplus
# include "BKE_curves.hh"
# include "BKE_attribute.hh"
# include "BLI_index_mask.hh"
# include "BLI_vector.hh"
# include "BLI_vector_set.hh"
# include "BKE_curves.hh"
namespace blender::ed::curves {
bke::CurvesGeometry primitive_random_sphere(int curves_size, int points_per_curve);
bool has_anything_selected(const Curves &curves_id);
VectorSet<Curves *> get_unique_editable_curves(const bContext &C);
void ensure_surface_deformation_node_exists(bContext &C, Object &curves_ob);
/* -------------------------------------------------------------------- */
/** \name Poll Functions
* \{ */
bool editable_curves_with_surface_poll(bContext *C);
bool curves_with_surface_poll(bContext *C);
bool editable_curves_poll(bContext *C);
bool curves_poll(bContext *C);
/** \} */
/* -------------------------------------------------------------------- */
/** \name Selection
*
* Selection on curves can be stored on either attribute domain: either per-curve or per-point. It
* can be stored with a float or boolean data-type. The boolean data-type is faster, smaller, and
* corresponds better to edit-mode selections, but the float data type is useful for soft selection
* (like masking) in sculpt mode.
*
* The attribute API is used to do the necessary type and domain conversions when necessary, and
* can handle most interaction with the selection attribute, but these functions implement some
* helpful utilities on top of that.
* \{ */
void fill_selection_false(GMutableSpan span);
void fill_selection_true(GMutableSpan span);
/**
* Return true if any element is selected, on either domain with either type.
*/
bool has_anything_selected(const Curves &curves_id);
/**
* Find curves that have any point selected (a selection factor greater than zero),
* or curves that have their own selection factor greater than zero.
*/
IndexMask retrieve_selected_curves(const Curves &curves_id, Vector<int64_t> &r_indices);
/**
* Find points that are selected (a selection factor greater than zero),
* or points in curves with a selection factor greater than zero).
*/
IndexMask retrieve_selected_points(const Curves &curves_id, Vector<int64_t> &r_indices);
/**
* If the ".selection" attribute doesn't exist, create it with the requested type (bool or float).
*/
void ensure_selection_attribute(Curves &curves_id, const eCustomDataType create_type);
/** \} */
} // namespace blender::ed::curves
#endif

View File

@ -17,26 +17,3 @@ void ED_operatortypes_sculpt_curves(void);
#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
# include "BLI_index_mask.hh"
# include "BLI_vector.hh"
namespace blender::ed::sculpt_paint {
/**
* Find curves that have any point selected (a selection factor greater than zero),
* or curves that have their own selection factor greater than zero.
*/
IndexMask retrieve_selected_curves(const Curves &curves_id, Vector<int64_t> &r_indices);
/**
* Find points that are selected (a selection factor greater than zero),
* or points in curves with a selection factor greater than zero).
*/
IndexMask retrieve_selected_points(const Curves &curves_id, Vector<int64_t> &r_indices);
} // namespace blender::ed::sculpt_paint
#endif

View File

@ -78,15 +78,6 @@ void ED_node_draw_snap(
/* node_draw.cc */
/**
* Draw a single node socket at default size.
* \note this is only called from external code, internally #node_socket_draw_nested() is used for
* optimized drawing of multiple/all sockets of a node.
*/
void ED_node_socket_draw(struct bNodeSocket *sock,
const struct rcti *rect,
const float color[4],
float scale);
void ED_node_tree_update(const struct bContext *C);
void ED_node_tag_update_id(struct ID *id);

View File

@ -9,6 +9,7 @@ struct SpaceNode;
struct ARegion;
struct Main;
struct bNodeTree;
struct rcti;
namespace blender::ed::space_node {
@ -22,4 +23,11 @@ void node_insert_on_link_flags_set(SpaceNode &snode, const ARegion &region);
void node_insert_on_link_flags(Main &bmain, SpaceNode &snode);
void node_insert_on_link_flags_clear(bNodeTree &node_tree);
/**
* Draw a single node socket at default size.
* \note this is only called from external code, internally #node_socket_draw_nested() is used for
* optimized drawing of multiple/all sockets of a node.
*/
void node_socket_draw(bNodeSocket *sock, const rcti *rect, const float color[4], float scale);
} // namespace blender::ed::space_node

View File

@ -217,7 +217,8 @@ void ED_area_tag_refresh(ScrArea *area);
void ED_area_do_refresh(struct bContext *C, ScrArea *area);
struct AZone *ED_area_azones_update(ScrArea *area, const int mouse_xy[2]);
/**
* Use NULL to disable it.
* Show the given text in the area's header, instead of its regular contents.
* Use NULL to disable this and show the regular header contents again.
*/
void ED_area_status_text(ScrArea *area, const char *str);
/**

View File

@ -9684,14 +9684,20 @@ static int ui_handle_view_items_hover(const wmEvent *event, const ARegion *regio
static int ui_handle_view_item_event(bContext *C,
const wmEvent *event,
ARegion *region,
uiBut *view_but)
uiBut *active_but,
ARegion *region)
{
BLI_assert(view_but->type == UI_BTYPE_VIEW_ITEM);
if (event->type == LEFTMOUSE) {
/* Only bother finding the active view item button if the active button isn't already a view
* item. */
uiBut *view_but = (active_but && active_but->type == UI_BTYPE_VIEW_ITEM) ?
active_but :
ui_view_item_find_mouse_over(region, event->xy);
/* Will free active button if there already is one. */
ui_handle_button_activate(C, region, view_but, BUTTON_ACTIVATE_OVER);
return ui_do_button(C, view_but->block, view_but, event);
if (view_but) {
ui_handle_button_activate(C, region, view_but, BUTTON_ACTIVATE_OVER);
return ui_do_button(C, view_but->block, view_but, event);
}
}
return WM_UI_HANDLER_CONTINUE;
@ -11302,10 +11308,7 @@ static int ui_region_handler(bContext *C, const wmEvent *event, void * /*userdat
* nested in the item (it's an overlapping layout). */
ui_handle_view_items_hover(event, region);
if (retval == WM_UI_HANDLER_CONTINUE) {
uiBut *view_item = ui_view_item_find_mouse_over(region, event->xy);
if (view_item) {
retval = ui_handle_view_item_event(C, event, region, view_item);
}
retval = ui_handle_view_item_event(C, event, but, region);
}
/* delayed apply callbacks */

View File

@ -26,7 +26,7 @@
#include "BLF_api.h"
#include "ED_node.h"
#include "ED_node.hh"
#include "UI_interface.h"
#include "UI_interface_icons.h"
@ -2213,7 +2213,8 @@ static void widget_draw_node_link_socket(const uiWidgetColors *wcol,
UI_widgetbase_draw_cache_flush();
GPU_blend(GPU_BLEND_NONE);
ED_node_socket_draw(static_cast<bNodeSocket *>(but->custom_data), rect, col, scale);
blender::ed::space_node::node_socket_draw(
static_cast<bNodeSocket *>(but->custom_data), rect, col, scale);
}
else {
widget_draw_icon(but, ICON_LAYER_USED, alpha, rect, wcol->text);

View File

@ -241,6 +241,13 @@ struct AddOperationExecutor {
const geometry::AddCurvesOnMeshOutputs add_outputs = geometry::add_curves_on_mesh(
*curves_orig_, add_inputs);
bke::MutableAttributeAccessor attributes = curves_orig_->attributes_for_write();
if (bke::GSpanAttributeWriter selection = attributes.lookup_for_write_span(".selection")) {
curves::fill_selection_true(selection.span.slice(selection.domain == ATTR_DOMAIN_POINT ?
add_outputs.new_points_range :
add_outputs.new_curves_range));
selection.finish();
}
if (add_outputs.uv_error) {
report_invalid_uv_map(stroke_extension.reports);

View File

@ -132,8 +132,9 @@ struct CombOperationExecutor {
transforms_ = CurvesSurfaceTransforms(*curves_ob_orig_, curves_id_orig_->surface);
point_factors_ = get_point_selection(*curves_id_orig_);
curve_selection_ = retrieve_selected_curves(*curves_id_orig_, selected_curve_indices_);
point_factors_ = curves_orig_->attributes().lookup_or_default<float>(
".selection", ATTR_DOMAIN_POINT, 1.0f);
curve_selection_ = curves::retrieve_selected_curves(*curves_id_orig_, selected_curve_indices_);
brush_pos_prev_re_ = self_->brush_pos_last_re_;
brush_pos_re_ = stroke_extension.mouse_position;

View File

@ -97,7 +97,7 @@ struct DeleteOperationExecutor {
curves_ = &CurvesGeometry::wrap(curves_id_->geometry);
selected_curve_indices_.clear();
curve_selection_ = retrieve_selected_curves(*curves_id_, selected_curve_indices_);
curve_selection_ = curves::retrieve_selected_curves(*curves_id_, selected_curve_indices_);
curves_sculpt_ = ctx_.scene->toolsettings->curves_sculpt;
brush_ = BKE_paint_brush_for_read(&curves_sculpt_->paint);

View File

@ -286,6 +286,13 @@ struct DensityAddOperationExecutor {
const geometry::AddCurvesOnMeshOutputs add_outputs = geometry::add_curves_on_mesh(
*curves_orig_, add_inputs);
bke::MutableAttributeAccessor attributes = curves_orig_->attributes_for_write();
if (bke::GSpanAttributeWriter selection = attributes.lookup_for_write_span(".selection")) {
curves::fill_selection_true(selection.span.slice(selection.domain == ATTR_DOMAIN_POINT ?
add_outputs.new_points_range :
add_outputs.new_curves_range));
selection.finish();
}
if (add_outputs.uv_error) {
report_invalid_uv_map(stroke_extension.reports);
@ -562,7 +569,7 @@ struct DensitySubtractOperationExecutor {
minimum_distance_ = brush_->curves_sculpt_settings->minimum_distance;
curve_selection_ = retrieve_selected_curves(*curves_id_, selected_curve_indices_);
curve_selection_ = curves::retrieve_selected_curves(*curves_id_, selected_curve_indices_);
transforms_ = CurvesSurfaceTransforms(*object_, curves_id_->surface);
const eBrushFalloffShape falloff_shape = static_cast<eBrushFalloffShape>(

View File

@ -280,8 +280,9 @@ struct CurvesEffectOperationExecutor {
return;
}
curve_selection_factors_ = get_curves_selection(*curves_id_);
curve_selection_ = retrieve_selected_curves(*curves_id_, selected_curve_indices_);
curve_selection_factors_ = curves_->attributes().lookup_or_default(
".selection", ATTR_DOMAIN_CURVE, 1.0f);
curve_selection_ = curves::retrieve_selected_curves(*curves_id_, selected_curve_indices_);
const CurvesSculpt &curves_sculpt = *ctx_.scene->toolsettings->curves_sculpt;
brush_ = BKE_paint_brush_for_read(&curves_sculpt.paint);

View File

@ -11,10 +11,11 @@
#include "BLI_vector.hh"
#include "BLI_virtual_array.hh"
#include "BKE_attribute.h"
#include "BKE_attribute.hh"
#include "BKE_crazyspace.hh"
#include "BKE_curves.hh"
#include "ED_curves.h"
#include "ED_curves_sculpt.h"
struct ARegion;
@ -92,15 +93,7 @@ std::optional<CurvesBrush3D> sample_curves_3d_brush(const Depsgraph &depsgraph,
Vector<float4x4> get_symmetry_brush_transforms(eCurvesSymmetryType symmetry);
/**
* Get the floating point selection on the curve domain, averaged from points if necessary.
*/
VArray<float> get_curves_selection(const Curves &curves_id);
/**
* Get the floating point selection on the curve domain, copied from curves if necessary.
*/
VArray<float> get_point_selection(const Curves &curves_id);
bke::SpanAttributeWriter<float> float_selection_ensure(Curves &curves_id);
/** See #move_last_point_and_resample. */
struct MoveAndResampleBuffers {

View File

@ -363,12 +363,14 @@ static int select_random_exec(bContext *C, wmOperator *op)
for (Curves *curves_id : unique_curves) {
CurvesGeometry &curves = CurvesGeometry::wrap(curves_id->geometry);
const bool was_anything_selected = curves::has_anything_selected(*curves_id);
bke::SpanAttributeWriter<float> attribute = float_selection_ensure(*curves_id);
MutableSpan<float> selection = attribute.span;
if (!was_anything_selected) {
selection.fill(1.0f);
}
switch (curves_id->selection_domain) {
case ATTR_DOMAIN_POINT: {
MutableSpan<float> selection = curves.selection_point_float_for_write();
if (!was_anything_selected) {
selection.fill(1.0f);
}
if (partial) {
if (constant_per_curve) {
for (const int curve_i : curves.curves_range()) {
@ -408,10 +410,6 @@ static int select_random_exec(bContext *C, wmOperator *op)
break;
}
case ATTR_DOMAIN_CURVE: {
MutableSpan<float> selection = curves.selection_curve_float_for_write();
if (!was_anything_selected) {
selection.fill(1.0f);
}
if (partial) {
for (const int curve_i : curves.curves_range()) {
const float random_value = next_partial_random_value();
@ -429,9 +427,6 @@ static int select_random_exec(bContext *C, wmOperator *op)
break;
}
}
MutableSpan<float> selection = curves_id->selection_domain == ATTR_DOMAIN_POINT ?
curves.selection_point_float_for_write() :
curves.selection_curve_float_for_write();
const bool was_any_selected = std::any_of(
selection.begin(), selection.end(), [](const float v) { return v > 0.0f; });
if (was_any_selected) {
@ -445,6 +440,8 @@ static int select_random_exec(bContext *C, wmOperator *op)
}
}
attribute.finish();
/* Use #ID_RECALC_GEOMETRY instead of #ID_RECALC_SELECT because it is handled as a generic
* attribute for now. */
DEG_id_tag_update(&curves_id->id, ID_RECALC_GEOMETRY);
@ -541,22 +538,35 @@ static int select_end_exec(bContext *C, wmOperator *op)
for (Curves *curves_id : unique_curves) {
CurvesGeometry &curves = CurvesGeometry::wrap(curves_id->geometry);
bke::MutableAttributeAccessor attributes = curves.attributes_for_write();
const bool was_anything_selected = curves::has_anything_selected(*curves_id);
MutableSpan<float> selection = curves.selection_point_float_for_write();
curves::ensure_selection_attribute(*curves_id, CD_PROP_BOOL);
bke::GSpanAttributeWriter selection = attributes.lookup_for_write_span(".selection");
if (!was_anything_selected) {
selection.fill(1.0f);
curves::fill_selection_true(selection.span);
}
threading::parallel_for(curves.curves_range(), 256, [&](const IndexRange range) {
for (const int curve_i : range) {
const IndexRange points = curves.points_for_curve(curve_i);
if (end_points) {
selection.slice(points.drop_back(amount)).fill(0.0f);
}
else {
selection.slice(points.drop_front(amount)).fill(0.0f);
}
selection.span.type().to_static_type_tag<bool, float>([&](auto type_tag) {
using T = typename decltype(type_tag)::type;
if constexpr (std::is_void_v<T>) {
BLI_assert_unreachable();
}
else {
MutableSpan<T> selection_typed = selection.span.typed<T>();
threading::parallel_for(curves.curves_range(), 256, [&](const IndexRange range) {
for (const int curve_i : range) {
const IndexRange points = curves.points_for_curve(curve_i);
if (end_points) {
selection_typed.slice(points.drop_back(amount)).fill(T(0));
}
else {
selection_typed.slice(points.drop_front(amount)).fill(T(0));
}
}
});
}
});
selection.finish();
/* Use #ID_RECALC_GEOMETRY instead of #ID_RECALC_SELECT because it is handled as a generic
* attribute for now. */
@ -592,12 +602,14 @@ namespace select_grow {
struct GrowOperatorDataPerCurve : NonCopyable, NonMovable {
Curves *curves_id;
Vector<int> selected_points;
Vector<int> unselected_points;
Vector<int64_t> selected_point_indices;
Vector<int64_t> unselected_point_indices;
IndexMask selected_points;
IndexMask unselected_points;
Array<float> distances_to_selected;
Array<float> distances_to_unselected;
Array<float> original_selection;
GArray<> original_selection;
float pixel_to_distance_factor;
};
@ -621,7 +633,7 @@ static void update_points_selection(const GrowOperatorDataPerCurve &data,
}
});
threading::parallel_for(data.selected_points.index_range(), 512, [&](const IndexRange range) {
for (const int point_i : data.selected_points.as_span().slice(range)) {
for (const int point_i : data.selected_points.slice(range)) {
points_selection[point_i] = 1.0f;
}
});
@ -637,7 +649,7 @@ static void update_points_selection(const GrowOperatorDataPerCurve &data,
});
threading::parallel_for(
data.unselected_points.index_range(), 512, [&](const IndexRange range) {
for (const int point_i : data.unselected_points.as_span().slice(range)) {
for (const int point_i : data.unselected_points.slice(range)) {
points_selection[point_i] = 0.0f;
}
});
@ -653,18 +665,19 @@ static int select_grow_update(bContext *C, wmOperator *op, const float mouse_dif
CurvesGeometry &curves = CurvesGeometry::wrap(curves_id.geometry);
const float distance = curve_op_data->pixel_to_distance_factor * mouse_diff_x;
bke::SpanAttributeWriter<float> selection = float_selection_ensure(curves_id);
/* Grow or shrink selection based on precomputed distances. */
switch (curves_id.selection_domain) {
switch (selection.domain) {
case ATTR_DOMAIN_POINT: {
MutableSpan<float> points_selection = curves.selection_point_float_for_write();
update_points_selection(*curve_op_data, distance, points_selection);
update_points_selection(*curve_op_data, distance, selection.span);
break;
}
case ATTR_DOMAIN_CURVE: {
Array<float> new_points_selection(curves.points_num());
update_points_selection(*curve_op_data, distance, new_points_selection);
/* Propagate grown point selection to the curve selection. */
MutableSpan<float> curves_selection = curves.selection_curve_float_for_write();
MutableSpan<float> curves_selection = selection.span;
for (const int curve_i : curves.curves_range()) {
const IndexRange points = curves.points_for_curve(curve_i);
const Span<float> points_selection = new_points_selection.as_span().slice(points);
@ -674,8 +687,12 @@ static int select_grow_update(bContext *C, wmOperator *op, const float mouse_dif
}
break;
}
default:
BLI_assert_unreachable();
}
selection.finish();
/* Use #ID_RECALC_GEOMETRY instead of #ID_RECALC_SELECT because it is handled as a generic
* attribute for now. */
DEG_id_tag_update(&curves_id.id, ID_RECALC_GEOMETRY);
@ -685,57 +702,28 @@ static int select_grow_update(bContext *C, wmOperator *op, const float mouse_dif
return OPERATOR_FINISHED;
}
static void select_grow_invoke_per_curve(Curves &curves_id,
Object &curves_ob,
static void select_grow_invoke_per_curve(const Curves &curves_id,
const Object &curves_ob,
const ARegion &region,
const View3D &v3d,
const RegionView3D &rv3d,
GrowOperatorDataPerCurve &curve_op_data)
{
curve_op_data.curves_id = &curves_id;
CurvesGeometry &curves = CurvesGeometry::wrap(curves_id.geometry);
const CurvesGeometry &curves = CurvesGeometry::wrap(curves_id.geometry);
const Span<float3> positions = curves.positions();
/* Find indices of selected and unselected points. */
switch (curves_id.selection_domain) {
case ATTR_DOMAIN_POINT: {
const VArray<float> points_selection = curves.selection_point_float();
curve_op_data.original_selection.reinitialize(points_selection.size());
points_selection.materialize(curve_op_data.original_selection);
for (const int point_i : points_selection.index_range()) {
const float point_selection = points_selection[point_i];
if (point_selection > 0.0f) {
curve_op_data.selected_points.append(point_i);
}
else {
curve_op_data.unselected_points.append(point_i);
}
}
break;
}
case ATTR_DOMAIN_CURVE: {
const VArray<float> curves_selection = curves.selection_curve_float();
curve_op_data.original_selection.reinitialize(curves_selection.size());
curves_selection.materialize(curve_op_data.original_selection);
for (const int curve_i : curves_selection.index_range()) {
const float curve_selection = curves_selection[curve_i];
const IndexRange points = curves.points_for_curve(curve_i);
if (curve_selection > 0.0f) {
for (const int point_i : points) {
curve_op_data.selected_points.append(point_i);
}
}
else {
for (const int point_i : points) {
curve_op_data.unselected_points.append(point_i);
}
}
}
break;
}
if (const bke::GAttributeReader original_selection = curves.attributes().lookup(".selection")) {
curve_op_data.original_selection = GArray<>(original_selection.varray.type(),
original_selection.varray.size());
original_selection.varray.materialize(curve_op_data.original_selection.data());
}
/* Find indices of selected and unselected points. */
curve_op_data.selected_points = curves::retrieve_selected_points(
curves_id, curve_op_data.selected_point_indices);
curve_op_data.unselected_points = curve_op_data.selected_points.invert(
curves.points_range(), curve_op_data.unselected_point_indices);
threading::parallel_invoke(
1024 < curve_op_data.selected_points.size() + curve_op_data.unselected_points.size(),
[&]() {
@ -838,6 +826,7 @@ static int select_grow_invoke(bContext *C, wmOperator *op, const wmEvent *event)
Curves &curves_id = *static_cast<Curves *>(active_ob->data);
auto curve_op_data = std::make_unique<GrowOperatorDataPerCurve>();
curve_op_data->curves_id = &curves_id;
select_grow_invoke_per_curve(curves_id, *active_ob, *region, *v3d, *rv3d, *curve_op_data);
op_data->per_curve.append(std::move(curve_op_data));
@ -865,17 +854,15 @@ static int select_grow_modal(bContext *C, wmOperator *op, const wmEvent *event)
for (std::unique_ptr<GrowOperatorDataPerCurve> &curve_op_data : op_data.per_curve) {
Curves &curves_id = *curve_op_data->curves_id;
CurvesGeometry &curves = CurvesGeometry::wrap(curves_id.geometry);
switch (curves_id.selection_domain) {
case ATTR_DOMAIN_POINT: {
MutableSpan<float> points_selection = curves.selection_point_float_for_write();
points_selection.copy_from(curve_op_data->original_selection);
break;
}
case ATTR_DOMAIN_CURVE: {
MutableSpan<float> curves_seletion = curves.selection_curve_float_for_write();
curves_seletion.copy_from(curve_op_data->original_selection);
break;
}
bke::MutableAttributeAccessor attributes = curves.attributes_for_write();
attributes.remove(".selection");
if (!curve_op_data->original_selection.is_empty()) {
attributes.add(
".selection",
eAttrDomain(curves_id.selection_domain),
bke::cpp_type_to_custom_data_type(curve_op_data->original_selection.type()),
bke::AttributeInitVArray(GVArray::ForSpan(curve_op_data->original_selection)));
}
/* Use #ID_RECALC_GEOMETRY instead of #ID_RECALC_SELECT because it is handled as a generic

View File

@ -105,8 +105,9 @@ struct PinchOperationExecutor {
transforms_ = CurvesSurfaceTransforms(*object_, curves_id_->surface);
point_factors_ = get_point_selection(*curves_id_);
curve_selection_ = retrieve_selected_curves(*curves_id_, selected_curve_indices_);
point_factors_ = curves_->attributes().lookup_or_default<float>(
".selection", ATTR_DOMAIN_POINT, 1.0f);
curve_selection_ = curves::retrieve_selected_curves(*curves_id_, selected_curve_indices_);
brush_pos_re_ = stroke_extension.mouse_position;
const eBrushFalloffShape falloff_shape = static_cast<eBrushFalloffShape>(

View File

@ -102,8 +102,9 @@ struct PuffOperationExecutor {
brush_strength_ = brush_strength_get(*ctx_.scene, *brush_, stroke_extension);
brush_pos_re_ = stroke_extension.mouse_position;
point_factors_ = get_point_selection(*curves_id_);
curve_selection_ = retrieve_selected_curves(*curves_id_, selected_curve_indices_);
point_factors_ = curves_->attributes().lookup_or_default<float>(
".selection", ATTR_DOMAIN_POINT, 1.0f);
curve_selection_ = curves::retrieve_selected_curves(*curves_id_, selected_curve_indices_);
falloff_shape_ = static_cast<eBrushFalloffShape>(brush_->falloff_shape);

View File

@ -10,132 +10,34 @@
namespace blender::ed::sculpt_paint {
static VArray<float> get_curves_selection(const CurvesGeometry &curves, const eAttrDomain domain)
bke::SpanAttributeWriter<float> float_selection_ensure(Curves &curves_id)
{
switch (domain) {
case ATTR_DOMAIN_CURVE:
return curves.selection_curve_float();
case ATTR_DOMAIN_POINT:
return curves.adapt_domain(
curves.selection_point_float(), ATTR_DOMAIN_POINT, ATTR_DOMAIN_CURVE);
default:
BLI_assert_unreachable();
return {};
}
}
/* TODO: Use a generic attribute conversion utility instead of this function. */
bke::CurvesGeometry &curves = bke::CurvesGeometry::wrap(curves_id.geometry);
bke::MutableAttributeAccessor attributes = curves.attributes_for_write();
VArray<float> get_curves_selection(const Curves &curves_id)
{
return get_curves_selection(CurvesGeometry::wrap(curves_id.geometry),
eAttrDomain(curves_id.selection_domain));
}
if (const auto meta_data = attributes.lookup_meta_data(".selection")) {
if (meta_data->data_type == CD_PROP_BOOL) {
const VArray<float> selection = attributes.lookup<float>(".selection");
float *dst = static_cast<float *>(
MEM_malloc_arrayN(selection.size(), sizeof(float), __func__));
selection.materialize({dst, selection.size()});
static VArray<float> get_point_selection(const CurvesGeometry &curves, const eAttrDomain domain)
{
switch (domain) {
case ATTR_DOMAIN_CURVE:
return curves.adapt_domain(
curves.selection_curve_float(), ATTR_DOMAIN_CURVE, ATTR_DOMAIN_POINT);
case ATTR_DOMAIN_POINT:
return curves.selection_point_float();
default:
BLI_assert_unreachable();
return {};
}
}
VArray<float> get_point_selection(const Curves &curves_id)
{
return get_point_selection(CurvesGeometry::wrap(curves_id.geometry),
eAttrDomain(curves_id.selection_domain));
}
static IndexMask retrieve_selected_curves(const CurvesGeometry &curves,
const eAttrDomain domain,
Vector<int64_t> &r_indices)
{
switch (domain) {
case ATTR_DOMAIN_POINT: {
const VArray<float> selection = curves.selection_point_float();
if (selection.is_single()) {
return selection.get_internal_single() <= 0.0f ? IndexMask(0) :
IndexMask(curves.curves_num());
}
const Span<float> point_selection_span = selection.get_internal_span();
return index_mask_ops::find_indices_based_on_predicate(
curves.curves_range(), 512, r_indices, [&](const int curve_i) {
for (const int i : curves.points_for_curve(curve_i)) {
if (point_selection_span[i] > 0.0f) {
return true;
}
}
return false;
});
attributes.remove(".selection");
attributes.add(
".selection", meta_data->domain, CD_PROP_FLOAT, bke::AttributeInitMoveArray(dst));
}
case ATTR_DOMAIN_CURVE: {
const VArray<float> selection = curves.selection_curve_float();
if (selection.is_single()) {
return selection.get_internal_single() <= 0.0f ? IndexMask(0) :
IndexMask(curves.curves_num());
}
return index_mask_ops::find_indices_based_on_predicate(
curves.curves_range(), 2048, r_indices, [&](const int i) {
return selection[i] > 0.0f;
});
}
default:
BLI_assert_unreachable();
return {};
}
}
IndexMask retrieve_selected_curves(const Curves &curves_id, Vector<int64_t> &r_indices)
{
return retrieve_selected_curves(CurvesGeometry::wrap(curves_id.geometry),
eAttrDomain(curves_id.selection_domain),
r_indices);
}
static IndexMask retrieve_selected_points(const CurvesGeometry &curves,
const eAttrDomain domain,
Vector<int64_t> &r_indices)
{
switch (domain) {
case ATTR_DOMAIN_POINT: {
const VArray<float> selection = curves.selection_point_float();
if (selection.is_single()) {
return selection.get_internal_single() <= 0.0f ? IndexMask(0) :
IndexMask(curves.points_num());
}
return index_mask_ops::find_indices_based_on_predicate(
curves.points_range(), 2048, r_indices, [&](const int i) {
return selection[i] > 0.0f;
});
}
case ATTR_DOMAIN_CURVE: {
const VArray<float> selection = curves.selection_curve_float();
if (selection.is_single()) {
return selection.get_internal_single() <= 0.0f ? IndexMask(0) :
IndexMask(curves.points_num());
}
const VArray<float> point_selection = curves.adapt_domain(
selection, ATTR_DOMAIN_CURVE, ATTR_DOMAIN_POINT);
return index_mask_ops::find_indices_based_on_predicate(
curves.points_range(), 2048, r_indices, [&](const int i) {
return point_selection[i] > 0.0f;
});
}
default:
BLI_assert_unreachable();
return {};
else {
const eAttrDomain domain = eAttrDomain(curves_id.selection_domain);
const int64_t size = attributes.domain_size(domain);
attributes.add(".selection",
domain,
CD_PROP_FLOAT,
bke::AttributeInitVArray(VArray<float>::ForSingle(size, 1.0f)));
}
}
IndexMask retrieve_selected_points(const Curves &curves_id, Vector<int64_t> &r_indices)
{
return retrieve_selected_points(CurvesGeometry::wrap(curves_id.geometry),
eAttrDomain(curves_id.selection_domain),
r_indices);
return curves.attributes_for_write().lookup_for_write_span<float>(".selection");
}
} // namespace blender::ed::sculpt_paint

View File

@ -8,6 +8,7 @@
#include "DNA_brush_types.h"
#include "BKE_attribute.hh"
#include "BKE_brush.h"
#include "BKE_context.h"
#include "BKE_curves.hh"
@ -57,6 +58,8 @@ struct SelectionPaintOperationExecutor {
Curves *curves_id_ = nullptr;
CurvesGeometry *curves_ = nullptr;
bke::SpanAttributeWriter<float> selection_;
const Brush *brush_ = nullptr;
float brush_radius_base_re_;
float brush_radius_factor_;
@ -84,6 +87,10 @@ struct SelectionPaintOperationExecutor {
if (curves_->curves_num() == 0) {
return;
}
selection_ = float_selection_ensure(*curves_id_);
if (!selection_) {
return;
}
brush_ = BKE_paint_brush_for_read(&ctx_.scene->toolsettings->curves_sculpt->paint);
brush_radius_base_re_ = BKE_brush_size_get(ctx_.scene, brush_);
@ -94,12 +101,7 @@ struct SelectionPaintOperationExecutor {
if (self.clear_selection_) {
if (stroke_extension.is_first) {
if (curves_id_->selection_domain == ATTR_DOMAIN_POINT) {
curves_->selection_point_float_for_write().fill(0.0f);
}
else if (curves_id_->selection_domain == ATTR_DOMAIN_CURVE) {
curves_->selection_curve_float_for_write().fill(0.0f);
}
curves::fill_selection_false(selection_.span);
}
}
@ -116,25 +118,25 @@ struct SelectionPaintOperationExecutor {
}
}
if (curves_id_->selection_domain == ATTR_DOMAIN_POINT) {
MutableSpan<float> selection = curves_->selection_point_float_for_write();
if (selection_.domain == ATTR_DOMAIN_POINT) {
if (falloff_shape == PAINT_FALLOFF_SHAPE_TUBE) {
this->paint_point_selection_projected_with_symmetry(selection);
this->paint_point_selection_projected_with_symmetry(selection_.span);
}
else if (falloff_shape == PAINT_FALLOFF_SHAPE_SPHERE) {
this->paint_point_selection_spherical_with_symmetry(selection);
this->paint_point_selection_spherical_with_symmetry(selection_.span);
}
}
else {
MutableSpan<float> selection = curves_->selection_curve_float_for_write();
if (falloff_shape == PAINT_FALLOFF_SHAPE_TUBE) {
this->paint_curve_selection_projected_with_symmetry(selection);
this->paint_curve_selection_projected_with_symmetry(selection_.span);
}
else if (falloff_shape == PAINT_FALLOFF_SHAPE_SPHERE) {
this->paint_curve_selection_spherical_with_symmetry(selection);
this->paint_curve_selection_spherical_with_symmetry(selection_.span);
}
}
selection_.finish();
/* Use #ID_RECALC_GEOMETRY instead of #ID_RECALC_SELECT because
* selection is handled as a generic attribute for now. */
DEG_id_tag_update(&curves_id_->id, ID_RECALC_GEOMETRY);

View File

@ -157,8 +157,9 @@ struct SlideOperationExecutor {
brush_radius_factor_ = brush_radius_factor(*brush_, stroke_extension);
brush_strength_ = brush_strength_get(*ctx_.scene, *brush_, stroke_extension);
curve_factors_ = get_curves_selection(*curves_id_orig_);
curve_selection_ = retrieve_selected_curves(*curves_id_orig_, selected_curve_indices_);
curve_factors_ = curves_orig_->attributes().lookup_or_default(
".selection", ATTR_DOMAIN_CURVE, 1.0f);
curve_selection_ = curves::retrieve_selected_curves(*curves_id_orig_, selected_curve_indices_);
brush_pos_re_ = stroke_extension.mouse_position;

View File

@ -78,8 +78,9 @@ struct SmoothOperationExecutor {
brush_strength_ = brush_strength_get(*ctx_.scene, *brush_, stroke_extension);
brush_pos_re_ = stroke_extension.mouse_position;
point_factors_ = get_point_selection(*curves_id_);
curve_selection_ = retrieve_selected_curves(*curves_id_, selected_curve_indices_);
point_factors_ = curves_->attributes().lookup_or_default<float>(
".selection", ATTR_DOMAIN_POINT, 1.0f);
curve_selection_ = curves::retrieve_selected_curves(*curves_id_, selected_curve_indices_);
transforms_ = CurvesSurfaceTransforms(*object_, curves_id_->surface);
const eBrushFalloffShape falloff_shape = static_cast<eBrushFalloffShape>(

View File

@ -125,8 +125,9 @@ struct SnakeHookOperatorExecutor {
transforms_ = CurvesSurfaceTransforms(*object_, curves_id_->surface);
curve_factors_ = get_curves_selection(*curves_id_);
curve_selection_ = retrieve_selected_curves(*curves_id_, selected_curve_indices_);
curve_factors_ = curves_->attributes().lookup_or_default(
".selection", ATTR_DOMAIN_CURVE, 1.0f);
curve_selection_ = curves::retrieve_selected_curves(*curves_id_, selected_curve_indices_);
brush_pos_prev_re_ = self.last_mouse_position_re_;
brush_pos_re_ = stroke_extension.mouse_position;

View File

@ -16,6 +16,9 @@
#include "ED_render.h"
#include "ED_screen.h"
#include "RNA_access.h"
#include "RNA_define.h"
#include "DEG_depsgraph_build.h"
#include "node_intern.hh"
@ -24,6 +27,11 @@ namespace blender::ed::space_node {
struct NodeClipboardItem {
bNode *node;
/**
* The offset and size of the node from when it was drawn. Stored here since it doesn't remain
* valid for the nodes in the clipboard.
*/
rctf draw_rect;
/* Extra info to validate the node on creation. Otherwise we may reference missing data. */
ID *id;
@ -74,15 +82,24 @@ struct NodeClipboard {
return ok;
}
void add_node(bNode *node)
void add_node(const bNode &node,
Map<const bNode *, bNode *> &node_map,
Map<const bNodeSocket *, bNodeSocket *> &socket_map)
{
/* No ID refcounting, this node is virtual,
* detached from any actual Blender data currently. */
bNode *new_node = bke::node_copy_with_mapping(
nullptr, node, LIB_ID_CREATE_NO_USER_REFCOUNT | LIB_ID_CREATE_NO_MAIN, false, socket_map);
node_map.add_new(&node, new_node);
NodeClipboardItem item;
item.node = node;
item.id = node->id;
item.draw_rect = node.runtime->totr;
item.node = new_node;
item.id = new_node->id;
if (item.id) {
item.id_name = node->id->name;
if (ID_IS_LINKED(node->id)) {
item.library_name = node->id->lib->filepath_abs;
item.id_name = new_node->id->name;
if (ID_IS_LINKED(new_node->id)) {
item.library_name = new_node->id->lib->filepath_abs;
}
}
this->nodes.append(std::move(item));
@ -110,22 +127,13 @@ static int node_clipboard_copy_exec(bContext *C, wmOperator * /*op*/)
Map<const bNode *, bNode *> node_map;
Map<const bNodeSocket *, bNodeSocket *> socket_map;
for (bNode *node : tree.all_nodes()) {
for (const bNode *node : tree.all_nodes()) {
if (node->flag & SELECT) {
/* No ID reference counting, this node is virtual, detached from any actual Blender data. */
bNode *new_node = bke::node_copy_with_mapping(nullptr,
*node,
LIB_ID_CREATE_NO_USER_REFCOUNT |
LIB_ID_CREATE_NO_MAIN,
false,
socket_map);
node_map.add_new(node, new_node);
clipboard.add_node(*node, node_map, socket_map);
}
}
for (bNode *new_node : node_map.values()) {
clipboard.add_node(new_node);
/* Parent pointer must be redirected to new node or detached if parent is not copied. */
if (new_node->parent) {
if (node_map.contains(new_node->parent)) {
@ -197,14 +205,6 @@ static int node_clipboard_paste_exec(bContext *C, wmOperator *op)
node_deselect_all(tree);
/* calculate "barycenter" for placing on mouse cursor */
float2 center = {0.0f, 0.0f};
for (const NodeClipboardItem &item : clipboard.nodes) {
center.x += BLI_rctf_cent_x(&item.node->runtime->totr);
center.y += BLI_rctf_cent_y(&item.node->runtime->totr);
}
center /= clipboard.nodes.size();
Map<const bNode *, bNode *> node_map;
Map<const bNodeSocket *, bNodeSocket *> socket_map;
@ -248,6 +248,27 @@ static int node_clipboard_paste_exec(bContext *C, wmOperator *op)
}
}
PropertyRNA *offset_prop = RNA_struct_find_property(op->ptr, "offset");
if (RNA_property_is_set(op->ptr, offset_prop)) {
float2 center(0);
for (NodeClipboardItem &item : clipboard.nodes) {
center.x += BLI_rctf_cent_x(&item.draw_rect);
center.y += BLI_rctf_cent_y(&item.draw_rect);
}
/* DPI factor needs to be removed when computing a View2D offset from drawing rects. */
center /= clipboard.nodes.size();
center /= UI_DPI_FAC;
float2 mouse_location;
RNA_property_float_get_array(op->ptr, offset_prop, mouse_location);
const float2 offset = mouse_location - center;
for (bNode *new_node : node_map.values()) {
new_node->locx += offset.x;
new_node->locy += offset.y;
}
}
/* Add links between existing nodes. */
for (const bNodeLink &link : clipboard.links) {
const bNode *fromnode = link.fromnode;
@ -276,16 +297,40 @@ static int node_clipboard_paste_exec(bContext *C, wmOperator *op)
return OPERATOR_FINISHED;
}
static int node_clipboard_paste_invoke(bContext *C, wmOperator *op, const wmEvent *event)
{
const ARegion *region = CTX_wm_region(C);
float2 cursor;
UI_view2d_region_to_view(&region->v2d, event->mval[0], event->mval[1], &cursor.x, &cursor.y);
RNA_float_set_array(op->ptr, "offset", cursor);
return node_clipboard_paste_exec(C, op);
}
void NODE_OT_clipboard_paste(wmOperatorType *ot)
{
ot->name = "Paste from Clipboard";
ot->description = "Pastes nodes from the clipboard to the active node tree";
ot->idname = "NODE_OT_clipboard_paste";
ot->invoke = node_clipboard_paste_invoke;
ot->exec = node_clipboard_paste_exec;
ot->poll = ED_operator_node_editable;
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;
PropertyRNA *prop = RNA_def_float_array(
ot->srna,
"offset",
2,
nullptr,
-FLT_MAX,
FLT_MAX,
"Location",
"The 2D view location for the center of the new nodes, or unchanged if not set",
-FLT_MAX,
FLT_MAX);
RNA_def_property_flag(prop, PROP_SKIP_SAVE);
RNA_def_property_flag(prop, PROP_HIDDEN);
}
/** \} */

View File

@ -699,8 +699,7 @@ static int node_add_file_exec(bContext *C, wmOperator *op)
}
/* When adding new image file via drag-drop we need to load imbuf in order
* to get proper image source.
*/
* to get proper image source. */
if (RNA_struct_property_is_set(op->ptr, "filepath")) {
BKE_image_signal(bmain, ima, nullptr, IMA_SIGNAL_RELOAD);
WM_event_add_notifier(C, NC_IMAGE | NA_EDITED, ima);
@ -717,7 +716,7 @@ static int node_add_file_invoke(bContext *C, wmOperator *op, const wmEvent *even
ARegion *region = CTX_wm_region(C);
SpaceNode *snode = CTX_wm_space_node(C);
/* convert mouse coordinates to v2d space */
/* Convert mouse coordinates to `v2d` space. */
UI_view2d_region_to_view(&region->v2d,
event->mval[0],
event->mval[1],
@ -860,13 +859,12 @@ static int new_node_tree_exec(bContext *C, wmOperator *op)
ntree = ntreeAddTree(bmain, treename, idname);
/* hook into UI */
/* Hook into UI. */
UI_context_active_but_prop_get_templateID(C, &ptr, &prop);
if (prop) {
/* RNA_property_pointer_set increases the user count,
* fixed here as the editor is the initial user.
*/
/* #RNA_property_pointer_set increases the user count, fixed here as the editor is the initial
* user. */
id_us_min(&ntree->id);
RNA_id_pointer_create(&ntree->id, &idptr);

View File

@ -59,6 +59,7 @@
#include "ED_gpencil.h"
#include "ED_node.h"
#include "ED_node.hh"
#include "ED_screen.h"
#include "ED_space_api.h"
#include "ED_viewer_path.hh"
@ -84,9 +85,6 @@
namespace geo_log = blender::nodes::geo_eval_log;
using blender::GPointer;
using blender::Vector;
/**
* This is passed to many functions which draw the node editor.
*/
@ -278,7 +276,7 @@ static Array<uiBlock *> node_uiblocks_init(const bContext &C, const Span<bNode *
for (const int i : nodes.index_range()) {
const std::string block_name = "node_" + std::string(nodes[i]->name);
blocks[i] = UI_block_begin(&C, CTX_wm_region(&C), block_name.c_str(), UI_EMBOSS);
/* this cancels events for background nodes */
/* This cancels events for background nodes. */
UI_block_flag_enable(blocks[i], UI_BLOCK_CLIP_EVENTS);
}
@ -1240,12 +1238,8 @@ static void node_socket_draw_nested(const bContext &C,
UI_block_emboss_set(&block, old_emboss);
}
} // namespace blender::ed::space_node
void ED_node_socket_draw(bNodeSocket *sock, const rcti *rect, const float color[4], float scale)
void node_socket_draw(bNodeSocket *sock, const rcti *rect, const float color[4], float scale)
{
using namespace blender::ed::space_node;
const float size = NODE_SOCKSIZE_DRAW_MULIPLIER * NODE_SOCKSIZE * scale;
rcti draw_rect = *rect;
float outline_color[4] = {0};
@ -1292,10 +1286,6 @@ void ED_node_socket_draw(bNodeSocket *sock, const rcti *rect, const float color[
GPU_blend(state);
}
namespace blender::ed::space_node {
/* ************** Socket callbacks *********** */
static void node_draw_preview_background(rctf *rect)
{
GPUVertFormat *format = immVertexFormat();
@ -1526,7 +1516,8 @@ static void node_draw_sockets(const View2D &v2d,
scale,
selected);
if (--selected_input_len == 0) {
break; /* Stop as soon as last one is drawn. */
/* Stop as soon as last one is drawn. */
break;
}
}
}
@ -1552,7 +1543,8 @@ static void node_draw_sockets(const View2D &v2d,
scale,
selected);
if (--selected_output_len == 0) {
break; /* Stop as soon as last one is drawn. */
/* Stop as soon as last one is drawn. */
break;
}
}
}
@ -2163,11 +2155,6 @@ static void node_draw_basis(const bContext &C,
node_toggle_button_cb,
POINTER_FROM_INT(node.identifier),
(void *)"NODE_OT_preview_toggle");
/* XXX this does not work when node is activated and the operator called right afterwards,
* since active ID is not updated yet (needs to process the notifier).
* This can only work as visual indicator! */
// if (!(node.flag & (NODE_ACTIVE_ID|NODE_DO_OUTPUT)))
// UI_but_flag_enable(but, UI_BUT_DISABLED);
UI_block_emboss_set(&block, UI_EMBOSS);
}
/* Group edit. */
@ -2610,6 +2597,18 @@ int node_get_resize_cursor(NodeResizeDirection directions)
return WM_CURSOR_EDIT;
}
static const bNode *find_node_under_cursor(SpaceNode &snode, const float2 &cursor)
{
/* Check nodes front to back. */
const Span<bNode *> nodes = snode.edittree->all_nodes();
for (int i = nodes.index_range().last(); i >= 0; i--) {
if (BLI_rctf_isect_pt(&nodes[i]->runtime->totr, cursor[0], cursor[1])) {
return nodes[i];
}
}
return nullptr;
}
void node_set_cursor(wmWindow &win, SpaceNode &snode, const float2 &cursor)
{
const bNodeTree *ntree = snode.edittree;
@ -2617,36 +2616,28 @@ void node_set_cursor(wmWindow &win, SpaceNode &snode, const float2 &cursor)
WM_cursor_set(&win, WM_CURSOR_DEFAULT);
return;
}
bNode *node;
bNodeSocket *sock;
int wmcursor = WM_CURSOR_DEFAULT;
if (node_find_indicated_socket(
snode, &node, &sock, cursor, (eNodeSocketInOut)(SOCK_IN | SOCK_OUT))) {
if (node_find_indicated_socket(snode, cursor, SOCK_IN | SOCK_OUT)) {
WM_cursor_set(&win, WM_CURSOR_DEFAULT);
return;
}
const bNode *node = find_node_under_cursor(snode, cursor);
if (!node) {
WM_cursor_set(&win, WM_CURSOR_DEFAULT);
return;
}
const NodeResizeDirection dir = node_get_resize_direction(node, cursor[0], cursor[1]);
if (node->is_frame() && dir == NODE_RESIZE_NONE) {
/* Indicate that frame nodes can be moved/selected on their borders. */
const rctf frame_inside = node_frame_rect_inside(*node);
if (!BLI_rctf_isect_pt(&frame_inside, cursor[0], cursor[1])) {
WM_cursor_set(&win, WM_CURSOR_NSEW_SCROLL);
return;
}
WM_cursor_set(&win, WM_CURSOR_DEFAULT);
return;
}
/* Check nodes front to back. */
for (node = (bNode *)ntree->nodes.last; node; node = node->prev) {
if (BLI_rctf_isect_pt(&node->runtime->totr, cursor[0], cursor[1])) {
break; /* First hit on node stops. */
}
}
if (node) {
NodeResizeDirection dir = node_get_resize_direction(node, cursor[0], cursor[1]);
wmcursor = node_get_resize_cursor(dir);
/* We want to indicate that Frame nodes can be moved/selected on their borders. */
if (node->type == NODE_FRAME && dir == NODE_RESIZE_NONE) {
const rctf frame_inside = node_frame_rect_inside(*node);
if (!BLI_rctf_isect_pt(&frame_inside, cursor[0], cursor[1])) {
wmcursor = WM_CURSOR_NSEW_SCROLL;
}
}
}
WM_cursor_set(&win, wmcursor);
WM_cursor_set(&win, node_get_resize_cursor(dir));
}
static void count_multi_input_socket_links(bNodeTree &ntree, SpaceNode &snode)
@ -2676,28 +2667,28 @@ static void frame_node_prepare_for_draw(bNode &node, Span<bNode *> nodes)
const float margin = 1.5f * U.widget_unit;
NodeFrame *data = (NodeFrame *)node.storage;
/* init rect from current frame size */
/* Initialize rect from current frame size. */
rctf rect;
node_to_updated_rect(node, rect);
/* frame can be resized manually only if shrinking is disabled or no children are attached */
/* Frame can be resized manually only if shrinking is disabled or no children are attached. */
data->flag |= NODE_FRAME_RESIZEABLE;
/* for shrinking bbox, initialize the rect from first child node */
/* For shrinking bounding box, initialize the rect from first child node. */
bool bbinit = (data->flag & NODE_FRAME_SHRINK);
/* fit bounding box to all children */
/* Fit bounding box to all children. */
for (const bNode *tnode : nodes) {
if (tnode->parent != &node) {
continue;
}
/* add margin to node rect */
/* Add margin to node rect. */
rctf noderect = tnode->runtime->totr;
noderect.xmin -= margin;
noderect.xmax += margin;
noderect.ymin -= margin;
noderect.ymax += margin;
/* first child initializes frame */
/* First child initializes frame. */
if (bbinit) {
bbinit = false;
rect = noderect;
@ -2708,7 +2699,7 @@ static void frame_node_prepare_for_draw(bNode &node, Span<bNode *> nodes)
}
}
/* now adjust the frame size from view-space bounding box */
/* Now adjust the frame size from view-space bounding box. */
const float2 offset = node_from_view(node, {rect.xmin, rect.ymax});
node.offsetx = offset.x;
node.offsety = offset.y;
@ -2721,10 +2712,9 @@ static void frame_node_prepare_for_draw(bNode &node, Span<bNode *> nodes)
static void reroute_node_prepare_for_draw(bNode &node)
{
/* get "global" coords */
const float2 loc = node_to_view(node, float2(0));
/* reroute node has exactly one input and one output, both in the same place */
/* Reroute node has exactly one input and one output, both in the same place. */
bNodeSocket *socket = (bNodeSocket *)node.outputs.first;
socket->runtime->locx = loc.x;
socket->runtime->locy = loc.y;
@ -2798,10 +2788,10 @@ static void frame_node_draw_label(TreeDrawContext &tree_draw_ctx,
BLF_enable(fontid, BLF_ASPECT);
BLF_aspect(fontid, aspect, aspect, 1.0f);
/* clamp otherwise it can suck up a LOT of memory */
/* Clamp. Otherwise it can suck up a LOT of memory. */
BLF_size(fontid, MIN2(24.0f, font_size) * U.dpi_fac);
/* title color */
/* Title color. */
int color_id = node_get_colorid(tree_draw_ctx, node);
uchar color[3];
UI_GetThemeColorBlendShade3ubv(TH_TEXT, color_id, 0.4f, 10, color);
@ -2814,7 +2804,7 @@ static void frame_node_draw_label(TreeDrawContext &tree_draw_ctx,
/* 'x' doesn't need aspect correction */
const rctf &rct = node.runtime->totr;
/* XXX a bit hacky, should use separate align values for x and y */
/* XXX a bit hacky, should use separate align values for x and y. */
float x = BLI_rctf_cent_x(&rct) - (0.5f * width);
float y = rct.ymax - label_height;
@ -2825,24 +2815,23 @@ static void frame_node_draw_label(TreeDrawContext &tree_draw_ctx,
BLF_draw(fontid, label, sizeof(label));
}
/* draw text body */
/* Draw text body. */
if (node.id) {
const Text *text = (const Text *)node.id;
const int line_height_max = BLF_height_max(fontid);
const float line_spacing = (line_height_max * aspect);
const float line_width = (BLI_rctf_size_x(&rct) - margin) / aspect;
/* 'x' doesn't need aspect correction */
/* 'x' doesn't need aspect correction. */
x = rct.xmin + margin;
y = rct.ymax - label_height - (has_label ? line_spacing : 0);
/* early exit */
int y_min = y + ((margin * 2) - (y - rct.ymin));
BLF_enable(fontid, BLF_CLIPPING | BLF_WORD_WRAP);
BLF_clipping(fontid,
rct.xmin,
/* round to avoid clipping half-way through a line */
/* Round to avoid clipping half-way through a line. */
y - (floorf(((y - rct.ymin) - (margin * 2)) / line_spacing) * line_spacing),
rct.xmin + line_width,
rct.ymax);
@ -2878,7 +2867,7 @@ static void frame_node_draw(const bContext &C,
bNode &node,
uiBlock &block)
{
/* skip if out of view */
/* Skip if out of view. */
if (BLI_rctf_isect(&node.runtime->totr, &region.v2d.cur, nullptr) == false) {
UI_block_end(&C, &block);
return;
@ -2888,10 +2877,8 @@ static void frame_node_draw(const bContext &C,
UI_GetThemeColor4fv(TH_NODE_FRAME, color);
const float alpha = color[3];
/* shadow */
node_draw_shadow(snode, node, BASIS_RAD, alpha);
/* body */
if (node.flag & NODE_CUSTOM_COLOR) {
rgba_float_args_set(color, node.color[0], node.color[1], node.color[2], alpha);
}
@ -2903,7 +2890,7 @@ static void frame_node_draw(const bContext &C,
UI_draw_roundbox_corner_set(UI_CNR_ALL);
UI_draw_roundbox_4fv(&rct, true, BASIS_RAD, color);
/* outline active and selected emphasis */
/* Outline active and selected emphasis. */
if (node.flag & SELECT) {
if (node.flag & NODE_ACTIVE) {
UI_GetThemeColorShadeAlpha4fv(TH_ACTIVE, 0, -40, color);
@ -2915,7 +2902,7 @@ static void frame_node_draw(const bContext &C,
UI_draw_roundbox_aa(&rct, false, BASIS_RAD, color);
}
/* label and text */
/* Label and text. */
frame_node_draw_label(tree_draw_ctx, ntree, node, snode);
node_draw_extra_info_panel(tree_draw_ctx, snode, node, block);
@ -2927,7 +2914,7 @@ static void frame_node_draw(const bContext &C,
static void reroute_node_draw(
const bContext &C, ARegion &region, bNodeTree &ntree, bNode &node, uiBlock &block)
{
/* skip if out of view */
/* Skip if out of view. */
const rctf &rct = node.runtime->totr;
if (rct.xmax < region.v2d.cur.xmin || rct.xmin > region.v2d.cur.xmax ||
rct.ymax < region.v2d.cur.ymin || node.runtime->totr.ymin > region.v2d.cur.ymax) {
@ -2936,7 +2923,7 @@ static void reroute_node_draw(
}
if (node.label[0] != '\0') {
/* draw title (node label) */
/* Draw title (node label). */
char showname[128]; /* 128 used below */
BLI_strncpy(showname, node.label, sizeof(showname));
const short width = 512;
@ -2961,9 +2948,8 @@ static void reroute_node_draw(
UI_but_drawflag_disable(label_but, UI_BUT_TEXT_LEFT);
}
/* only draw input socket. as they all are placed on the same position.
* highlight also if node itself is selected, since we don't display the node body separately!
*/
/* Only draw input socket as they all are placed on the same position highlight
* if node itself is selected, since we don't display the node body separately. */
node_draw_sockets(region.v2d, C, ntree, node, block, false, node.flag & SELECT);
UI_block_end(&C, &block);
@ -3094,9 +3080,7 @@ static void snode_setup_v2d(SpaceNode &snode, ARegion &region, const float2 &cen
UI_view2d_center_set(&v2d, center[0], center[1]);
UI_view2d_view_ortho(&v2d);
/* Aspect + font, set each time. */
snode.runtime->aspect = BLI_rctf_size_x(&v2d.cur) / float(region.winx);
// XXX snode->curfont = uiSetCurFont_ext(snode->aspect);
}
/* Similar to is_compositor_enabled() in draw_manager.c but checks all 3D views. */

View File

@ -168,12 +168,12 @@ static int compo_get_recalc_flags(const bContext *C)
return recalc_flags;
}
/* called by compo, only to check job 'stop' value */
/* Called by compositor, only to check job 'stop' value. */
static bool compo_breakjob(void *cjv)
{
CompoJob *cj = (CompoJob *)cjv;
/* without G.is_break 'ESC' won't quit - which annoys users */
/* Without G.is_break 'ESC' won't quit - which annoys users. */
return (*(cj->stop)
#ifdef USE_ESC_COMPO
|| G.is_break
@ -181,7 +181,7 @@ static bool compo_breakjob(void *cjv)
);
}
/* called by compo, wmJob sends notifier */
/* Called by compositor, #wmJob sends notifier. */
static void compo_statsdrawjob(void *cjv, const char * /*str*/)
{
CompoJob *cj = (CompoJob *)cjv;
@ -189,7 +189,7 @@ static void compo_statsdrawjob(void *cjv, const char * /*str*/)
*(cj->do_update) = true;
}
/* called by compo, wmJob sends notifier */
/* Called by compositor, wmJob sends notifier. */
static void compo_redrawjob(void *cjv)
{
CompoJob *cj = (CompoJob *)cjv;
@ -210,8 +210,8 @@ static void compo_freejob(void *cjv)
MEM_freeN(cj);
}
/* only now we copy the nodetree, so adding many jobs while
* sliding buttons doesn't frustrate */
/* Only now we copy the nodetree, so adding many jobs while
* sliding buttons doesn't frustrate. */
static void compo_initjob(void *cjv)
{
CompoJob *cj = (CompoJob *)cjv;
@ -236,7 +236,7 @@ static void compo_initjob(void *cjv)
}
}
/* called before redraw notifiers, it moves finished previews over */
/* Called before redraw notifiers, it moves finished previews over. */
static void compo_updatejob(void * /*cjv*/)
{
WM_main_add_notifier(NC_SCENE | ND_COMPO_RESULT, nullptr);
@ -249,7 +249,7 @@ static void compo_progressjob(void *cjv, float progress)
*(cj->progress) = progress;
}
/* only this runs inside thread */
/* Only this runs inside thread. */
static void compo_startjob(void *cjv,
/* Cannot be const, this function implements wm_jobs_start_callback.
* NOLINTNEXTLINE: readability-non-const-parameter. */
@ -278,8 +278,6 @@ static void compo_startjob(void *cjv,
ntree->runtime->update_draw = compo_redrawjob;
ntree->runtime->udh = cj;
// XXX BIF_store_spare();
/* 1 is do_previews */
BKE_callback_exec_id(cj->bmain, &scene->id, BKE_CB_EVT_COMPOSITE_PRE);
if ((cj->scene->r.scemode & R_MULTIVIEW) == 0) {
@ -331,7 +329,7 @@ void ED_node_composite_job(const bContext *C, bNodeTree *nodetree, Scene *scene_
Scene *scene = CTX_data_scene(C);
ViewLayer *view_layer = CTX_data_view_layer(C);
/* to fix bug: T32272. */
/* See T32272. */
if (G.is_rendering) {
return;
}
@ -351,14 +349,14 @@ void ED_node_composite_job(const bContext *C, bNodeTree *nodetree, Scene *scene_
WM_JOB_TYPE_COMPOSITE);
CompoJob *cj = MEM_cnew<CompoJob>("compo job");
/* customdata for preview thread */
/* Custom data for preview thread. */
cj->bmain = bmain;
cj->scene = scene;
cj->view_layer = view_layer;
cj->ntree = nodetree;
cj->recalc_flags = compo_get_recalc_flags(C);
/* setup job */
/* Set up job. */
WM_jobs_customdata_set(wm_job, cj, compo_freejob);
WM_jobs_timer(wm_job, 0.1, NC_SCENE | ND_COMPO_RESULT, NC_SCENE | ND_COMPO_RESULT);
WM_jobs_callbacks_ex(wm_job,
@ -582,7 +580,7 @@ void ED_node_composit_default(const bContext *C, Scene *sce)
in->locy = 400.0f;
nodeSetActive(sce->nodetree, in);
/* links from color to color */
/* Links from color to color. */
bNodeSocket *fromsock = (bNodeSocket *)in->outputs.first;
bNodeSocket *tosock = (bNodeSocket *)out->inputs.first;
nodeAddLink(sce->nodetree, in, fromsock, out, tosock);
@ -592,7 +590,6 @@ void ED_node_composit_default(const bContext *C, Scene *sce)
void ED_node_texture_default(const bContext *C, Tex *tex)
{
/* but lets check it anyway */
if (tex->nodetree) {
if (G.debug & G_DEBUG) {
printf("error in texture initialize\n");
@ -631,16 +628,16 @@ void snode_set_context(const bContext &C)
bNodeTree *ntree = snode->nodetree;
ID *id = snode->id, *from = snode->from;
/* check the tree type */
/* Check the tree type. */
if (!treetype || (treetype->poll && !treetype->poll(&C, treetype))) {
/* invalid tree type, skip
/* Invalid tree type, skip.
* NOTE: not resetting the node path here, invalid #bNodeTreeType
* may still be registered at a later point. */
return;
}
if (snode->nodetree && !STREQ(snode->nodetree->idname, snode->tree_idname)) {
/* current tree does not match selected type, clear tree path */
/* Current tree does not match selected type, clear tree path. */
ntree = nullptr;
id = nullptr;
from = nullptr;
@ -648,7 +645,7 @@ void snode_set_context(const bContext &C)
if (!(snode->flag & SNODE_PIN) || ntree == nullptr) {
if (treetype->get_from_context) {
/* reset and update from context */
/* Reset and update from context. */
ntree = nullptr;
id = nullptr;
from = nullptr;
@ -681,7 +678,7 @@ void ED_node_set_active(
const bool was_output = (node->flag & NODE_DO_OUTPUT) != 0;
bool do_update = false;
/* generic node group output: set node as active output */
/* Generic node group output: set node as active output. */
if (node->type == NODE_GROUP_OUTPUT) {
for (bNode *node_iter : ntree->all_nodes()) {
if (node_iter->type == NODE_GROUP_OUTPUT) {
@ -696,7 +693,7 @@ void ED_node_set_active(
}
}
/* tree specific activate calls */
/* Tree specific activate calls. */
if (ntree->type == NTREE_SHADER) {
if (ELEM(node->type,
SH_NODE_OUTPUT_MATERIAL,
@ -760,7 +757,7 @@ void ED_node_set_active(
WM_main_add_notifier(NC_MATERIAL | ND_NODES, node->id);
}
else if (ntree->type == NTREE_COMPOSIT) {
/* make active viewer, currently only 1 supported... */
/* Make active viewer, currently only one is supported. */
if (ELEM(node->type, CMP_NODE_VIEWER, CMP_NODE_SPLITVIEWER)) {
for (bNode *node_iter : ntree->all_nodes()) {
if (ELEM(node_iter->type, CMP_NODE_VIEWER, CMP_NODE_SPLITVIEWER)) {
@ -822,86 +819,6 @@ void ED_node_post_apply_transform(bContext * /*C*/, bNodeTree * /*ntree*/)
namespace blender::ed::space_node {
/* -------------------------------------------------------------------- */
/** \name Generic Operator Functions for Nodes
* \{ */
#if 0 /* UNUSED */
static bool edit_node_poll(bContext *C)
{
return ED_operator_node_active(C);
}
static void edit_node_properties(wmOperatorType *ot)
{
/* XXX could node be a context pointer? */
RNA_def_string(ot->srna, "node", nullptr, MAX_NAME, "Node", "");
RNA_def_int(ot->srna, "socket", 0, 0, MAX_SOCKET, "Socket", "", 0, MAX_SOCKET);
RNA_def_enum(ot->srna, "in_out", rna_enum_node_socket_in_out_items, SOCK_IN, "Socket Side", "");
}
static int edit_node_invoke_properties(bContext *C, wmOperator *op)
{
if (!RNA_struct_property_is_set(op->ptr, "node")) {
bNode *node = CTX_data_pointer_get_type(C, "node", &RNA_Node).data;
if (!node) {
return 0;
}
else {
RNA_string_set(op->ptr, "node", node->name);
}
}
if (!RNA_struct_property_is_set(op->ptr, "in_out")) {
RNA_enum_set(op->ptr, "in_out", SOCK_IN);
}
if (!RNA_struct_property_is_set(op->ptr, "socket")) {
RNA_int_set(op->ptr, "socket", 0);
}
return 1;
}
static void edit_node_properties_get(
wmOperator *op, bNodeTree *ntree, bNode **r_node, bNodeSocket **r_sock, int *r_in_out)
{
bNode *node;
bNodeSocket *sock = nullptr;
char nodename[MAX_NAME];
int sockindex;
int in_out;
RNA_string_get(op->ptr, "node", nodename);
node = nodeFindNodebyName(ntree, nodename);
in_out = RNA_enum_get(op->ptr, "in_out");
sockindex = RNA_int_get(op->ptr, "socket");
switch (in_out) {
case SOCK_IN:
sock = BLI_findlink(&node->inputs, sockindex);
break;
case SOCK_OUT:
sock = BLI_findlink(&node->outputs, sockindex);
break;
}
if (r_node) {
*r_node = node;
}
if (r_sock) {
*r_sock = sock;
}
if (r_in_out) {
*r_in_out = in_out;
}
}
#endif
/** \} */
/* -------------------------------------------------------------------- */
/** \name Node Generic
* \{ */
@ -1022,7 +939,7 @@ static int node_resize_modal(bContext *C, wmOperator *op, const wmEvent *event)
}
}
/* height works the other way round ... */
/* Height works the other way round. */
{
float heightmin = UI_DPI_FAC * node->typeinfo->minheight;
float heightmax = UI_DPI_FAC * node->typeinfo->maxheight;
@ -1039,9 +956,8 @@ static int node_resize_modal(bContext *C, wmOperator *op, const wmEvent *event)
}
}
/* XXX make callback? */
if (node->type == NODE_FRAME) {
/* keep the offset symmetric around center point */
/* Keep the offset symmetric around center point. */
if (nsw->directions & NODE_RESIZE_LEFT) {
node->locx = nsw->oldlocx + 0.5f * dx;
node->offsetx = nsw->oldoffsetx + 0.5f * dx;
@ -1101,7 +1017,7 @@ static int node_resize_invoke(bContext *C, wmOperator *op, const wmEvent *event)
return OPERATOR_CANCELLED | OPERATOR_PASS_THROUGH;
}
/* convert mouse coordinates to v2d space */
/* Convert mouse coordinates to `v2d` space. */
float2 cursor;
int2 mval;
WM_event_drag_start_mval(event, region, mval);
@ -1169,7 +1085,7 @@ void node_set_hidden_sockets(SpaceNode *snode, bNode *node, int set)
}
}
else {
/* hide unused sockets */
/* Hide unused sockets. */
LISTBASE_FOREACH (bNodeSocket *, sock, &node->inputs) {
if (sock->link == nullptr) {
sock->flag |= SOCK_HIDDEN;
@ -1183,7 +1099,6 @@ void node_set_hidden_sockets(SpaceNode *snode, bNode *node, int set)
}
}
/* checks snode->mouse position, and returns found node/socket */
static bool cursor_isect_multi_input_socket(const float2 &cursor, const bNodeSocket &socket)
{
const float node_socket_height = node_socket_calculate_height(socket);
@ -1205,25 +1120,22 @@ static bool cursor_isect_multi_input_socket(const float2 &cursor, const bNodeSoc
return false;
}
bool node_find_indicated_socket(SpaceNode &snode,
bNode **nodep,
bNodeSocket **sockp,
const float2 &cursor,
const eNodeSocketInOut in_out)
bNodeSocket *node_find_indicated_socket(SpaceNode &snode,
const float2 &cursor,
const eNodeSocketInOut in_out)
{
rctf rect;
const float size_sock_padded = NODE_SOCKSIZE + 4;
*nodep = nullptr;
*sockp = nullptr;
snode.edittree->ensure_topology_cache();
const Span<bNode *> nodes = snode.edittree->all_nodes();
for (int i = nodes.index_range().last(); i >= 0; i--) {
bNode &node = *nodes[i];
/* check if we click in a socket */
LISTBASE_FOREACH_BACKWARD (bNode *, node, &snode.edittree->nodes) {
BLI_rctf_init_pt_radius(&rect, cursor, size_sock_padded);
if (!(node->flag & NODE_HIDDEN)) {
/* extra padding inside and out - allow dragging on the text areas too */
if (!(node.flag & NODE_HIDDEN)) {
/* Extra padding inside and out - allow dragging on the text areas too. */
if (in_out == SOCK_IN) {
rect.xmax += NODE_SOCKSIZE;
rect.xmin -= NODE_SOCKSIZE * 4;
@ -1235,37 +1147,31 @@ bool node_find_indicated_socket(SpaceNode &snode,
}
if (in_out & SOCK_IN) {
LISTBASE_FOREACH (bNodeSocket *, sock, &node->inputs) {
for (bNodeSocket *sock : node.input_sockets()) {
if (sock->is_visible()) {
const float2 location(sock->runtime->locx, sock->runtime->locy);
if (sock->flag & SOCK_MULTI_INPUT && !(node->flag & NODE_HIDDEN)) {
if (sock->flag & SOCK_MULTI_INPUT && !(node.flag & NODE_HIDDEN)) {
if (cursor_isect_multi_input_socket(cursor, *sock)) {
if (!socket_is_occluded(location, *node, snode)) {
*nodep = node;
*sockp = sock;
return true;
if (!socket_is_occluded(location, node, snode)) {
return sock;
}
}
}
else if (BLI_rctf_isect_pt(&rect, location.x, location.y)) {
if (!socket_is_occluded(location, *node, snode)) {
*nodep = node;
*sockp = sock;
return true;
if (!socket_is_occluded(location, node, snode)) {
return sock;
}
}
}
}
}
if (in_out & SOCK_OUT) {
LISTBASE_FOREACH (bNodeSocket *, sock, &node->outputs) {
for (bNodeSocket *sock : node.output_sockets()) {
if (sock->is_visible()) {
const float2 location(sock->runtime->locx, sock->runtime->locy);
if (BLI_rctf_isect_pt(&rect, location.x, location.y)) {
if (!socket_is_occluded(location, *node, snode)) {
*nodep = node;
*sockp = sock;
return true;
if (!socket_is_occluded(location, node, snode)) {
return sock;
}
}
}
@ -1273,7 +1179,7 @@ bool node_find_indicated_socket(SpaceNode &snode,
}
}
return false;
return nullptr;
}
/** \} */
@ -1321,7 +1227,7 @@ static void node_duplicate_reparent_recursive(bNodeTree *ntree,
node->flag |= NODE_TEST;
/* find first selected parent */
/* Find first selected parent. */
for (parent = node->parent; parent; parent = parent->parent) {
if (parent->flag & SELECT) {
if (!(parent->flag & NODE_TEST)) {
@ -1330,7 +1236,7 @@ static void node_duplicate_reparent_recursive(bNodeTree *ntree,
break;
}
}
/* reparent node copy to parent copy */
/* Reparent node copy to parent copy. */
if (parent) {
nodeDetachNode(ntree, node_map.lookup(node));
nodeAttachNode(ntree, node_map.lookup(node), node_map.lookup(parent));
@ -1377,9 +1283,8 @@ static int node_duplicate_exec(bContext *C, wmOperator *op)
/* Copy links between selected nodes. */
bNodeLink *lastlink = (bNodeLink *)ntree->links.last;
LISTBASE_FOREACH (bNodeLink *, link, &ntree->links) {
/* This creates new links between copied nodes.
* If keep_inputs is set, also copies input links from unselected (when fromnode==nullptr)!
*/
/* This creates new links between copied nodes. If keep_inputs is set, also copies input links
* from unselected (when fromnode is null)! */
if (link->tonode && (link->tonode->flag & NODE_SELECT) &&
(keep_inputs || (link->fromnode && (link->fromnode->flag & NODE_SELECT)))) {
bNodeLink *newlink = MEM_cnew<bNodeLink>("bNodeLink");
@ -1396,7 +1301,7 @@ static int node_duplicate_exec(bContext *C, wmOperator *op)
newlink->fromsock = socket_map.lookup(link->fromsock);
}
else {
/* input node not copied, this keeps the original input linked */
/* Input node not copied, this keeps the original input linked. */
newlink->fromnode = link->fromnode;
newlink->fromsock = link->fromsock;
}
@ -1404,24 +1309,24 @@ static int node_duplicate_exec(bContext *C, wmOperator *op)
BLI_addtail(&ntree->links, newlink);
}
/* make sure we don't copy new links again! */
/* Make sure we don't copy new links again. */
if (link == lastlink) {
break;
}
}
/* clear flags for recursive depth-first iteration */
/* Clear flags for recursive depth-first iteration. */
for (bNode *node : ntree->all_nodes()) {
node->flag &= ~NODE_TEST;
}
/* reparent copied nodes */
/* Reparent copied nodes. */
for (bNode *node : node_map.keys()) {
if (!(node->flag & NODE_TEST)) {
node_duplicate_reparent_recursive(ntree, node_map, node);
}
}
/* deselect old nodes, select the copies instead */
/* Deselect old nodes, select the copies instead. */
for (const auto item : node_map.items()) {
bNode *src_node = item.key;
bNode *dst_node = item.value;
@ -1462,9 +1367,7 @@ void NODE_OT_duplicate(wmOperatorType *ot)
RNA_def_property_flag(prop, PROP_SKIP_SAVE);
}
/* XXX: some code needing updating to operators. */
/* goes over all scenes, reads render layers */
/* Goes over all scenes, reads render layers. */
static int node_read_viewlayers_exec(bContext *C, wmOperator * /*op*/)
{
Main *bmain = CTX_data_main(C);
@ -1501,7 +1404,6 @@ static int node_read_viewlayers_exec(bContext *C, wmOperator * /*op*/)
void NODE_OT_read_viewlayers(wmOperatorType *ot)
{
ot->name = "Read View Layers";
ot->idname = "NODE_OT_read_viewlayers";
ot->description = "Read all render layers of all used scenes";
@ -1509,9 +1411,6 @@ void NODE_OT_read_viewlayers(wmOperatorType *ot)
ot->exec = node_read_viewlayers_exec;
ot->poll = composite_node_active;
/* flags */
ot->flag = 0;
}
int node_render_changed_exec(bContext *C, wmOperator * /*op*/)
@ -1572,15 +1471,14 @@ void NODE_OT_render_changed(wmOperatorType *ot)
/** \name Node Hide Operator
* \{ */
/**
* Toggles the flag on all selected nodes. If the flag is set on all nodes it is unset.
* If the flag is not set on all nodes, it is set.
*/
static void node_flag_toggle_exec(SpaceNode *snode, int toggle_flag)
{
int tot_eq = 0, tot_neq = 0;
/* Toggles the flag on all selected nodes.
* If the flag is set on all nodes it is unset.
* If the flag is not set on all nodes, it is set.
*/
for (bNode *node : snode->edittree->all_nodes()) {
if (node->flag & SELECT) {
@ -1625,7 +1523,7 @@ static int node_hide_toggle_exec(bContext *C, wmOperator * /*op*/)
{
SpaceNode *snode = CTX_wm_space_node(C);
/* sanity checking (poll callback checks this already) */
/* Sanity checking (poll callback checks this already). */
if ((snode == nullptr) || (snode->edittree == nullptr)) {
return OPERATOR_CANCELLED;
}
@ -1656,7 +1554,7 @@ static int node_preview_toggle_exec(bContext *C, wmOperator * /*op*/)
{
SpaceNode *snode = CTX_wm_space_node(C);
/* sanity checking (poll callback checks this already) */
/* Sanity checking (poll callback checks this already). */
if ((snode == nullptr) || (snode->edittree == nullptr)) {
return OPERATOR_CANCELLED;
}
@ -1729,7 +1627,7 @@ static int node_options_toggle_exec(bContext *C, wmOperator * /*op*/)
{
SpaceNode *snode = CTX_wm_space_node(C);
/* sanity checking (poll callback checks this already) */
/* Sanity checking (poll callback checks this already). */
if ((snode == nullptr) || (snode->edittree == nullptr)) {
return OPERATOR_CANCELLED;
}
@ -1760,7 +1658,7 @@ static int node_socket_toggle_exec(bContext *C, wmOperator * /*op*/)
{
SpaceNode *snode = CTX_wm_space_node(C);
/* sanity checking (poll callback checks this already) */
/* Sanity checking (poll callback checks this already). */
if ((snode == nullptr) || (snode->edittree == nullptr)) {
return OPERATOR_CANCELLED;
}
@ -1908,7 +1806,7 @@ static int node_switch_view_exec(bContext *C, wmOperator * /*op*/)
LISTBASE_FOREACH_MUTABLE (bNode *, node, &snode->edittree->nodes) {
if (node->flag & SELECT) {
/* call the update function from the Switch View node */
/* Call the update function from the Switch View node. */
node->runtime->update = NODE_UPDATE_OPERATOR;
}
}
@ -2233,7 +2131,7 @@ static int ntree_socket_add_exec(bContext *C, wmOperator *op)
bNodeSocket *sock;
if (active_sock) {
/* insert a copy of the active socket right after it */
/* Insert a copy of the active socket right after it. */
sock = ntreeInsertSocketInterface(
ntree, in_out, active_sock->idname, active_sock->next, active_sock->name);
/* XXX this only works for actual sockets, not interface templates! */
@ -2248,7 +2146,7 @@ static int ntree_socket_add_exec(bContext *C, wmOperator *op)
LISTBASE_FOREACH (bNodeSocket *, socket_iter, sockets) {
socket_iter->flag &= ~SELECT;
}
/* make the new socket active */
/* Make the new socket selected. */
sock->flag |= SELECT;
ED_node_tree_propagate_change(C, CTX_data_main(C), snode->edittree);
@ -2293,11 +2191,11 @@ static int ntree_socket_remove_exec(bContext *C, wmOperator *op)
return OPERATOR_CANCELLED;
}
/* preferably next socket becomes active, otherwise try previous socket */
/* Preferably next socket becomes active, otherwise try previous socket. */
bNodeSocket *active_sock = (iosock->next ? iosock->next : iosock->prev);
ntreeRemoveSocketInterface(ntree, iosock);
/* set active socket */
/* Set active socket. */
if (active_sock) {
active_sock->flag |= SELECT;
}
@ -2519,12 +2417,12 @@ static bool node_shader_script_update_poll(bContext *C)
const RenderEngineType *type = RE_engines_find(scene->r.engine);
SpaceNode *snode = CTX_wm_space_node(C);
/* test if we have a render engine that supports shaders scripts */
/* Test if we have a render engine that supports shaders scripts. */
if (!(type && type->update_script_node)) {
return false;
}
/* see if we have a shader script node in context */
/* See if we have a shader script node in context. */
bNode *node = (bNode *)CTX_data_pointer_get_type(C, "node", &RNA_ShaderNodeScript).data;
if (!node && snode && snode->edittree) {
@ -2539,13 +2437,13 @@ static bool node_shader_script_update_poll(bContext *C)
}
}
/* see if we have a text datablock in context */
/* See if we have a text datablock in context. */
Text *text = (Text *)CTX_data_pointer_get_type(C, "edit_text", &RNA_Text).data;
if (text) {
return true;
}
/* we don't check if text datablock is actually in use, too slow for poll */
/* We don't check if text datablock is actually in use, too slow for poll. */
return false;
}
@ -2561,7 +2459,7 @@ static bool node_shader_script_update_text_recursive(RenderEngine *engine,
done_trees.add_new(ntree);
/* update each script that is using this text datablock */
/* Update each script that is using this text datablock. */
for (bNode *node : ntree->all_nodes()) {
if (node->type == NODE_GROUP) {
bNodeTree *ngroup = (bNodeTree *)node->id;
@ -2591,7 +2489,6 @@ static int node_shader_script_update_exec(bContext *C, wmOperator *op)
RenderEngine *engine = RE_engine_create(type);
engine->reports = op->reports;
/* get node */
bNodeTree *ntree_base = nullptr;
bNode *node = nullptr;
if (nodeptr.data) {
@ -2604,13 +2501,13 @@ static int node_shader_script_update_exec(bContext *C, wmOperator *op)
}
if (node) {
/* update single node */
/* Update single node. */
type->update_script_node(engine, ntree_base, node);
found = true;
}
else {
/* update all nodes using text datablock */
/* Update all nodes using text datablock. */
Text *text = (Text *)CTX_data_pointer_get_type(C, "edit_text", &RNA_Text).data;
if (text) {
@ -2692,17 +2589,17 @@ static int viewer_border_exec(bContext *C, wmOperator *op)
rcti rect;
rctf rectf;
/* get border from operator */
/* Get border from operator. */
WM_operator_properties_border_to_rcti(op, &rect);
/* convert border to unified space within backdrop image */
/* Convert border to unified space within backdrop image. */
viewer_border_corner_to_backdrop(
snode, region, rect.xmin, rect.ymin, ibuf->x, ibuf->y, &rectf.xmin, &rectf.ymin);
viewer_border_corner_to_backdrop(
snode, region, rect.xmax, rect.ymax, ibuf->x, ibuf->y, &rectf.xmax, &rectf.ymax);
/* clamp coordinates */
/* Clamp coordinates. */
rectf.xmin = max_ff(rectf.xmin, 0.0f);
rectf.ymin = max_ff(rectf.ymin, 0.0f);
rectf.xmax = min_ff(rectf.xmax, 1.0f);

View File

@ -311,12 +311,9 @@ bool composite_node_editable(bContext *C);
bool node_has_hidden_sockets(bNode *node);
void node_set_hidden_sockets(SpaceNode *snode, bNode *node, int set);
int node_render_changed_exec(bContext *, wmOperator *);
/** Type is #SOCK_IN and/or #SOCK_OUT. */
bool node_find_indicated_socket(SpaceNode &snode,
bNode **nodep,
bNodeSocket **sockp,
const float2 &cursor,
eNodeSocketInOut in_out);
bNodeSocket *node_find_indicated_socket(SpaceNode &snode,
const float2 &cursor,
eNodeSocketInOut in_out);
float node_link_dim_factor(const View2D &v2d, const bNodeLink &link);
bool node_link_is_hidden_or_dimmed(const View2D &v2d, const bNodeLink &link);

View File

@ -55,13 +55,13 @@
struct NodeInsertOfsData {
bNodeTree *ntree;
bNode *insert; /* inserted node */
bNode *prev, *next; /* prev/next node in the chain */
bNode *insert; /* Inserted node. */
bNode *prev, *next; /* Prev/next node in the chain. */
bNode *insert_parent;
wmTimer *anim_timer;
float offset_x; /* offset to apply to node chain */
float offset_x; /* Offset to apply to node chain. */
};
namespace blender::ed::space_node {
@ -124,27 +124,24 @@ static void pick_input_link_by_link_intersect(const bContext &C,
float2 drag_start;
RNA_float_get_array(op.ptr, "drag_start", drag_start);
bNode *node;
bNodeSocket *socket;
node_find_indicated_socket(*snode, &node, &socket, drag_start, SOCK_IN);
bNodeSocket *socket = node_find_indicated_socket(*snode, drag_start, SOCK_IN);
bNode &node = socket->owner_node();
/* Distance to test overlapping of cursor on link. */
const float cursor_link_touch_distance = 12.5f * UI_DPI_FAC;
bNodeLink *link_to_pick = nullptr;
clear_picking_highlight(&snode->edittree->links);
LISTBASE_FOREACH (bNodeLink *, link, &snode->edittree->links) {
if (link->tosock == socket) {
/* Test if the cursor is near a link. */
std::array<float2, NODE_LINK_RESOL + 1> coords;
node_link_bezier_points_evaluated(*link, coords);
for (bNodeLink *link : socket->directly_linked_links()) {
/* Test if the cursor is near a link. */
std::array<float2, NODE_LINK_RESOL + 1> coords;
node_link_bezier_points_evaluated(*link, coords);
for (const int i : IndexRange(coords.size() - 1)) {
const float distance = dist_squared_to_line_segment_v2(cursor, coords[i], coords[i + 1]);
if (distance < cursor_link_touch_distance) {
link_to_pick = link;
nldrag.last_picked_multi_input_socket_link = link_to_pick;
}
for (const int i : IndexRange(coords.size() - 1)) {
const float distance = dist_squared_to_line_segment_v2(cursor, coords[i], coords[i + 1]);
if (distance < cursor_link_touch_distance) {
link_to_pick = link;
nldrag.last_picked_multi_input_socket_link = link_to_pick;
}
}
}
@ -164,8 +161,8 @@ static void pick_input_link_by_link_intersect(const bContext &C,
link_to_pick->flag |= NODE_LINK_TEMP_HIGHLIGHT;
ED_area_tag_redraw(CTX_wm_area(&C));
if (!node_find_indicated_socket(*snode, &node, &socket, cursor, SOCK_IN)) {
pick_link(nldrag, *snode, node, *link_to_pick);
if (!node_find_indicated_socket(*snode, cursor, SOCK_IN)) {
pick_link(nldrag, *snode, &node, *link_to_pick);
}
}
}
@ -191,7 +188,7 @@ static bNodeSocket *best_socket_output(bNodeTree *ntree,
bNodeSocket *sock_target,
const bool allow_multiple)
{
/* first look for selected output */
/* First look for selected output. */
LISTBASE_FOREACH (bNodeSocket *, sock, &node->outputs) {
if (!socket_is_available(ntree, sock, allow_multiple)) {
continue;
@ -202,13 +199,13 @@ static bNodeSocket *best_socket_output(bNodeTree *ntree,
}
}
/* try to find a socket with a matching name */
/* Try to find a socket with a matching name. */
LISTBASE_FOREACH (bNodeSocket *, sock, &node->outputs) {
if (!socket_is_available(ntree, sock, allow_multiple)) {
continue;
}
/* check for same types */
/* Check for same types. */
if (sock->type == sock_target->type) {
if (STREQ(sock->name, sock_target->name)) {
return sock;
@ -216,13 +213,13 @@ static bNodeSocket *best_socket_output(bNodeTree *ntree,
}
}
/* otherwise settle for the first available socket of the right type */
/* Otherwise settle for the first available socket of the right type. */
LISTBASE_FOREACH (bNodeSocket *, sock, &node->outputs) {
if (!socket_is_available(ntree, sock, allow_multiple)) {
continue;
}
/* check for same types */
/* Check for same types. */
if (sock->type == sock_target->type) {
return sock;
}
@ -237,8 +234,8 @@ static bNodeSocket *best_socket_output(bNodeTree *ntree,
return nullptr;
}
/* this is a bit complicated, but designed to prioritize finding
* sockets of higher types, such as image, first */
/* This is a bit complicated, but designed to prioritize finding
* sockets of higher types, such as image, first. */
static bNodeSocket *best_socket_input(bNodeTree *ntree, bNode *node, int num, int replace)
{
int maxtype = 0;
@ -246,7 +243,7 @@ static bNodeSocket *best_socket_input(bNodeTree *ntree, bNode *node, int num, in
maxtype = max_ii(sock->type, maxtype);
}
/* find sockets of higher 'types' first (i.e. image) */
/* Find sockets of higher 'types' first (i.e. image). */
int a = 0;
for (int socktype = maxtype; socktype >= 0; socktype--) {
LISTBASE_FOREACH (bNodeSocket *, sock, &node->inputs) {
@ -256,8 +253,8 @@ static bNodeSocket *best_socket_input(bNodeTree *ntree, bNode *node, int num, in
}
if (sock->type == socktype) {
/* increment to make sure we don't keep finding
* the same socket on every attempt running this function */
/* Increment to make sure we don't keep finding the same socket on every attempt running
* this function. */
a++;
if (a > num) {
return sock;
@ -278,7 +275,6 @@ static bool snode_autoconnect_input(SpaceNode &snode,
{
bNodeTree *ntree = snode.edittree;
/* then we can connect */
if (replace) {
nodeRemSocketLinks(ntree, sock_to);
}
@ -292,32 +288,27 @@ struct LinkAndPosition {
float2 multi_socket_position;
};
static void sort_multi_input_socket_links_with_drag(bNode &node,
static void sort_multi_input_socket_links_with_drag(bNodeSocket &socket,
bNodeLink &drag_link,
const float2 &cursor)
{
for (bNodeSocket *socket : node.input_sockets()) {
if (!socket->is_multi_input()) {
continue;
}
const float2 &socket_location = {socket->runtime->locx, socket->runtime->locy};
const float2 &socket_location = {socket.runtime->locx, socket.runtime->locy};
Vector<LinkAndPosition, 8> links;
for (bNodeLink *link : socket->directly_linked_links()) {
const float2 location = node_link_calculate_multi_input_position(
socket_location, link->multi_input_socket_index, link->tosock->runtime->total_inputs);
links.append({link, location});
};
Vector<LinkAndPosition, 8> links;
for (bNodeLink *link : socket.directly_linked_links()) {
const float2 location = node_link_calculate_multi_input_position(
socket_location, link->multi_input_socket_index, link->tosock->runtime->total_inputs);
links.append({link, location});
};
links.append({&drag_link, cursor});
links.append({&drag_link, cursor});
std::sort(links.begin(), links.end(), [](const LinkAndPosition a, const LinkAndPosition b) {
return a.multi_socket_position.y < b.multi_socket_position.y;
});
std::sort(links.begin(), links.end(), [](const LinkAndPosition a, const LinkAndPosition b) {
return a.multi_socket_position.y < b.multi_socket_position.y;
});
for (const int i : links.index_range()) {
links[i].link->multi_input_socket_index = i;
}
for (const int i : links.index_range()) {
links[i].link->multi_input_socket_index = i;
}
}
@ -360,12 +351,12 @@ static void snode_autoconnect(SpaceNode &snode, const bool allow_multiple, const
bNode *node_fr = sorted_nodes[i];
bNode *node_to = sorted_nodes[i + 1];
/* corner case: input/output node aligned the wrong way around (T47729) */
/* Corner case: input/output node aligned the wrong way around (T47729). */
if (BLI_listbase_is_empty(&node_to->inputs) || BLI_listbase_is_empty(&node_fr->outputs)) {
SWAP(bNode *, node_fr, node_to);
}
/* if there are selected sockets, connect those */
/* If there are selected sockets, connect those. */
LISTBASE_FOREACH (bNodeSocket *, sock_to, &node_to->inputs) {
if (sock_to->flag & SELECT) {
has_selected_inputs = true;
@ -374,7 +365,7 @@ static void snode_autoconnect(SpaceNode &snode, const bool allow_multiple, const
continue;
}
/* check for an appropriate output socket to connect from */
/* Check for an appropriate output socket to connect from. */
bNodeSocket *sock_fr = best_socket_output(ntree, node_fr, sock_to, allow_multiple);
if (!sock_fr) {
continue;
@ -387,18 +378,18 @@ static void snode_autoconnect(SpaceNode &snode, const bool allow_multiple, const
}
if (!has_selected_inputs) {
/* no selected inputs, connect by finding suitable match */
/* No selected inputs, connect by finding suitable match. */
int num_inputs = BLI_listbase_count(&node_to->inputs);
for (int i = 0; i < num_inputs; i++) {
/* find the best guess input socket */
/* Find the best guess input socket. */
bNodeSocket *sock_to = best_socket_input(ntree, node_to, i, replace);
if (!sock_to) {
continue;
}
/* check for an appropriate output socket to connect from */
/* Check for an appropriate output socket to connect from. */
bNodeSocket *sock_fr = best_socket_output(ntree, node_fr, sock_to, allow_multiple);
if (!sock_fr) {
continue;
@ -905,9 +896,7 @@ static void add_dragged_links_to_tree(bContext &C, bNodeLinkDrag &nldrag)
if (!link.tosock || !link.fromsock) {
continue;
}
/* before actually adding the link,
* let nodes perform special link insertion handling
*/
/* Before actually adding the link let nodes perform special link insertion handling. */
bNodeLink *new_link = MEM_new<bNodeLink>(__func__, link);
if (link.fromnode->typeinfo->insert_link) {
link.fromnode->typeinfo->insert_link(&ntree, link.fromnode, new_link);
@ -916,11 +905,11 @@ static void add_dragged_links_to_tree(bContext &C, bNodeLinkDrag &nldrag)
link.tonode->typeinfo->insert_link(&ntree, link.tonode, new_link);
}
/* add link to the node tree */
/* Add link to the node tree. */
BLI_addtail(&ntree.links, new_link);
BKE_ntree_update_tag_link_added(&ntree, new_link);
/* we might need to remove a link */
/* We might need to remove a link. */
node_remove_extra_links(snode, *new_link);
}
@ -952,13 +941,11 @@ static void node_link_find_socket(bContext &C, wmOperator &op, const float2 &cur
bNodeLinkDrag &nldrag = *static_cast<bNodeLinkDrag *>(op.customdata);
if (nldrag.in_out == SOCK_OUT) {
bNode *tnode;
bNodeSocket *tsock = nullptr;
snode.edittree->ensure_topology_cache();
if (node_find_indicated_socket(snode, &tnode, &tsock, cursor, SOCK_IN)) {
if (bNodeSocket *tsock = node_find_indicated_socket(snode, cursor, SOCK_IN)) {
bNode &tnode = tsock->owner_node();
for (bNodeLink &link : nldrag.links) {
/* skip if socket is on the same node as the fromsock */
if (tnode && link.fromnode == tnode) {
/* Skip if socket is on the same node as the fromsock. */
if (link.fromnode == &tnode) {
continue;
}
@ -971,17 +958,17 @@ static void node_link_find_socket(bContext &C, wmOperator &op, const float2 &cur
}
}
/* attach links to the socket */
link.tonode = tnode;
/* Attach links to the socket. */
link.tonode = &tnode;
link.tosock = tsock;
nldrag.last_node_hovered_while_dragging_a_link = tnode;
nldrag.last_node_hovered_while_dragging_a_link = &tnode;
if (existing_link_connected_to_fromsock) {
link.multi_input_socket_index =
existing_link_connected_to_fromsock->multi_input_socket_index;
continue;
}
if (link.tosock && link.tosock->flag & SOCK_MULTI_INPUT) {
sort_multi_input_socket_links_with_drag(*tnode, link, cursor);
if (tsock && tsock->is_multi_input()) {
sort_multi_input_socket_links_with_drag(*tsock, link, cursor);
}
}
}
@ -997,21 +984,20 @@ static void node_link_find_socket(bContext &C, wmOperator &op, const float2 &cur
}
}
else {
bNode *tnode;
bNodeSocket *tsock = nullptr;
if (node_find_indicated_socket(snode, &tnode, &tsock, cursor, SOCK_OUT)) {
if (bNodeSocket *tsock = node_find_indicated_socket(snode, cursor, SOCK_OUT)) {
bNode &node = tsock->owner_node();
for (bNodeLink &link : nldrag.links) {
/* skip if this is already the target socket */
/* Skip if this is already the target socket. */
if (link.fromsock == tsock) {
continue;
}
/* skip if socket is on the same node as the fromsock */
if (tnode && link.tonode == tnode) {
/* Skip if socket is on the same node as the `fromsock`. */
if (link.tonode == &node) {
continue;
}
/* attach links to the socket */
link.fromnode = tnode;
/* Attach links to the socket. */
link.fromnode = &node;
link.fromsock = tsock;
}
}
@ -1094,19 +1080,18 @@ static std::unique_ptr<bNodeLinkDrag> node_link_init(SpaceNode &snode,
const float2 cursor,
const bool detach)
{
/* output indicated? */
bNode *node;
bNodeSocket *sock;
if (node_find_indicated_socket(snode, &node, &sock, cursor, SOCK_OUT)) {
if (bNodeSocket *sock = node_find_indicated_socket(snode, cursor, SOCK_OUT)) {
bNode &node = sock->owner_node();
std::unique_ptr<bNodeLinkDrag> nldrag = std::make_unique<bNodeLinkDrag>();
nldrag->start_node = node;
nldrag->start_node = &node;
nldrag->start_socket = sock;
nldrag->start_link_count = nodeCountSocketLinks(snode.edittree, sock);
int link_limit = nodeSocketLinkLimit(sock);
if (nldrag->start_link_count > 0 && (nldrag->start_link_count >= link_limit || detach)) {
/* dragged links are fixed on input side */
/* Dragged links are fixed on input side. */
nldrag->in_out = SOCK_IN;
/* detach current links and store them in the operator data */
/* Detach current links and store them in the operator data. */
LISTBASE_FOREACH_MUTABLE (bNodeLink *, link, &snode.edittree->links) {
if (link->fromsock == sock) {
bNodeLink oplink = *link;
@ -1119,25 +1104,25 @@ static std::unique_ptr<bNodeLinkDrag> node_link_init(SpaceNode &snode,
}
}
else {
/* dragged links are fixed on output side */
/* Dragged links are fixed on output side. */
nldrag->in_out = SOCK_OUT;
nldrag->links.append(create_drag_link(*node, *sock));
nldrag->links.append(create_drag_link(node, *sock));
}
return nldrag;
}
/* or an input? */
if (node_find_indicated_socket(snode, &node, &sock, cursor, SOCK_IN)) {
if (bNodeSocket *sock = node_find_indicated_socket(snode, cursor, SOCK_IN)) {
bNode &node = sock->owner_node();
std::unique_ptr<bNodeLinkDrag> nldrag = std::make_unique<bNodeLinkDrag>();
nldrag->last_node_hovered_while_dragging_a_link = node;
nldrag->start_node = node;
nldrag->last_node_hovered_while_dragging_a_link = &node;
nldrag->start_node = &node;
nldrag->start_socket = sock;
nldrag->start_link_count = nodeCountSocketLinks(snode.edittree, sock);
if (nldrag->start_link_count > 0) {
/* dragged links are fixed on output side */
/* Dragged links are fixed on output side. */
nldrag->in_out = SOCK_OUT;
/* detach current links and store them in the operator data */
/* Detach current links and store them in the operator data. */
bNodeLink *link_to_pick;
LISTBASE_FOREACH_MUTABLE (bNodeLink *, link, &snode.edittree->links) {
if (link->tosock == sock) {
@ -1153,16 +1138,14 @@ static std::unique_ptr<bNodeLinkDrag> node_link_init(SpaceNode &snode,
nldrag->links.append(oplink);
nodeRemLink(snode.edittree, link_to_pick);
/* send changed event to original link->tonode */
if (node) {
BKE_ntree_update_tag_node_property(snode.edittree, node);
}
/* Send changed event to original link->tonode. */
BKE_ntree_update_tag_node_property(snode.edittree, &node);
}
}
else {
/* dragged links are fixed on input side */
/* Dragged links are fixed on input side. */
nldrag->in_out = SOCK_IN;
nldrag->links.append(create_drag_link(*node, *sock));
nldrag->links.append(create_drag_link(node, *sock));
}
return nldrag;
}
@ -1216,7 +1199,6 @@ void NODE_OT_link(wmOperatorType *ot)
/* api callbacks */
ot->invoke = node_link_invoke;
ot->modal = node_link_modal;
// ot->exec = node_link_exec;
ot->poll = ED_operator_node_editable;
ot->cancel = node_link_cancel;
@ -1250,7 +1232,7 @@ void NODE_OT_link(wmOperatorType *ot)
/** \name Make Link Operator
* \{ */
/* makes a link between selected output and input sockets */
/* Makes a link between selected output and input sockets. */
static int node_make_link_exec(bContext *C, wmOperator *op)
{
Main &bmain = *CTX_data_main(C);
@ -1262,7 +1244,7 @@ static int node_make_link_exec(bContext *C, wmOperator *op)
snode_autoconnect(snode, true, replace);
/* deselect sockets after linking */
/* Deselect sockets after linking. */
node_deselect_all_input_sockets(node_tree, false);
node_deselect_all_output_sockets(node_tree, false);
@ -1725,14 +1707,14 @@ static int node_attach_invoke(bContext *C, wmOperator * /*op*/, const wmEvent *e
}
if (node->parent == nullptr) {
/* disallow moving a parent into its child */
/* Disallow moving a parent into its child. */
if (nodeAttachNodeCheck(frame, node) == false) {
/* attach all unparented nodes */
/* Attach all unparented nodes. */
nodeAttachNode(&ntree, node, frame);
}
}
else {
/* attach nodes which share parent with the frame */
/* Attach nodes which share parent with the frame. */
bNode *parent;
for (parent = frame->parent; parent; parent = parent->parent) {
if (parent == node->parent) {
@ -1741,7 +1723,7 @@ static int node_attach_invoke(bContext *C, wmOperator * /*op*/, const wmEvent *e
}
if (parent) {
/* disallow moving a parent into its child */
/* Disallow moving a parent into its child. */
if (nodeAttachNodeCheck(frame, node) == false) {
nodeDetachNode(&ntree, node);
nodeAttachNode(&ntree, node, frame);
@ -1790,17 +1772,17 @@ static void node_detach_recursive(bNodeTree &ntree,
detach_states[node->index()].done = true;
if (node->parent) {
/* call recursively */
/* Call recursively. */
if (!detach_states[node->parent->index()].done) {
node_detach_recursive(ntree, detach_states, node->parent);
}
/* in any case: if the parent is a descendant, so is the child */
/* In any case: if the parent is a descendant, so is the child. */
if (detach_states[node->parent->index()].descendent) {
detach_states[node->index()].descendent = true;
}
else if (node->flag & NODE_SELECT) {
/* if parent is not a descendant of a selected node, detach */
/* If parent is not a descendant of a selected node, detach. */
nodeDetachNode(&ntree, node);
detach_states[node->index()].descendent = true;
}
@ -1810,7 +1792,7 @@ static void node_detach_recursive(bNodeTree &ntree,
}
}
/* detach the root nodes in the current selection */
/* Detach the root nodes in the current selection. */
static int node_detach_exec(bContext *C, wmOperator * /*op*/)
{
SpaceNode &snode = *CTX_wm_space_node(C);
@ -1818,9 +1800,7 @@ static int node_detach_exec(bContext *C, wmOperator * /*op*/)
Array<NodeDetachstate> detach_states(ntree.all_nodes().size(), NodeDetachstate{false, false});
/* detach nodes recursively
* relative order is preserved here!
*/
/* Detach nodes recursively. Relative order is preserved here. */
for (bNode *node : ntree.all_nodes()) {
if (!detach_states[node->index()].done) {
node_detach_recursive(ntree, detach_states, node);
@ -1898,7 +1878,7 @@ void node_insert_on_link_flags_set(SpaceNode &snode, const ARegion &region)
return;
}
/* find link to select/highlight */
/* Find link to select/highlight. */
bNodeLink *selink = nullptr;
float dist_best = FLT_MAX;
LISTBASE_FOREACH (bNodeLink *, link, &node_tree.links) {
@ -1910,23 +1890,22 @@ void node_insert_on_link_flags_set(SpaceNode &snode, const ARegion &region)
node_link_bezier_points_evaluated(*link, coords);
float dist = FLT_MAX;
/* loop over link coords to find shortest dist to
* upper left node edge of a intersected line segment */
/* Loop over link coords to find shortest dist to upper left node edge of a intersected line
* segment. */
for (int i = 0; i < NODE_LINK_RESOL; i++) {
/* Check if the node rectangle intersects the line from this point to next one. */
if (BLI_rctf_isect_segment(&node_to_insert->runtime->totr, coords[i], coords[i + 1])) {
/* store the shortest distance to the upper left edge
* of all intersections found so far */
/* Store the shortest distance to the upper left edge of all intersections found so far. */
const float node_xy[] = {node_to_insert->runtime->totr.xmin,
node_to_insert->runtime->totr.ymax};
/* to be precise coords should be clipped by select->totr,
* but not done since there's no real noticeable difference */
/* To be precise coords should be clipped by `select->totr`, but not done since there's no
* real noticeable difference. */
dist = min_ff(dist_squared_to_line_segment_v2(node_xy, coords[i], coords[i + 1]), dist);
}
}
/* we want the link with the shortest distance to node center */
/* We want the link with the shortest distance to node center. */
if (dist < dist_best) {
dist_best = dist;
selink = link;
@ -2079,7 +2058,7 @@ bNodeSocket *get_main_socket(bNodeTree &ntree, bNode &node, eNodeSocketInOut in_
}
}
/* find priority range */
/* Find priority range. */
int maxpriority = -1;
LISTBASE_FOREACH (bNodeSocket *, sock, sockets) {
if (sock->flag & SOCK_UNAVAIL) {
@ -2088,7 +2067,7 @@ bNodeSocket *get_main_socket(bNodeTree &ntree, bNode &node, eNodeSocketInOut in_
maxpriority = max_ii(get_main_socket_priority(sock), maxpriority);
}
/* try all priorities, starting from 'highest' */
/* Try all priorities, starting from 'highest'. */
for (int priority = maxpriority; priority >= 0; priority--) {
LISTBASE_FOREACH (bNodeSocket *, sock, sockets) {
if (!!sock->is_visible() && priority == get_main_socket_priority(sock)) {
@ -2097,7 +2076,7 @@ bNodeSocket *get_main_socket(bNodeTree &ntree, bNode &node, eNodeSocketInOut in_
}
}
/* no visible sockets, unhide first of highest priority */
/* No visible sockets, unhide first of highest priority. */
for (int priority = maxpriority; priority >= 0; priority--) {
LISTBASE_FOREACH (bNodeSocket *, sock, sockets) {
if (sock->flag & SOCK_UNAVAIL) {
@ -2245,13 +2224,13 @@ static void node_link_insert_offset_ntree(NodeInsertOfsData *iofsd,
rctf totr_insert;
node_to_updated_rect(insert, totr_insert);
/* frame attachment wasn't handled yet
* so we search the frame that the node will be attached to later */
/* Frame attachment wasn't handled yet so we search the frame that the node will be attached to
* later. */
insert.parent = node_find_frame_to_attach(*region, *ntree, mouse_xy);
/* this makes sure nodes are also correctly offset when inserting a node on top of a frame
/* This makes sure nodes are also correctly offset when inserting a node on top of a frame
* without actually making it a part of the frame (because mouse isn't intersecting it)
* - logic here is similar to node_find_frame_to_attach */
* - logic here is similar to node_find_frame_to_attach. */
if (!insert.parent ||
(prev->parent && (prev->parent == next->parent) && (prev->parent != insert.parent))) {
bNode *frame;

View File

@ -175,14 +175,9 @@ static bool is_position_over_node_or_socket(SpaceNode &snode, const float2 &mous
if (node_under_mouse_tweak(*snode.edittree, mouse)) {
return true;
}
bNode *node;
bNodeSocket *sock;
if (node_find_indicated_socket(
snode, &node, &sock, mouse, (eNodeSocketInOut)(SOCK_IN | SOCK_OUT))) {
if (node_find_indicated_socket(snode, mouse, SOCK_IN | SOCK_OUT)) {
return true;
}
return false;
}
@ -267,7 +262,7 @@ void node_deselect_all_input_sockets(bNodeTree &node_tree, const bool deselect_n
socket->flag &= ~SELECT;
}
/* if no selected sockets remain, also deselect the node */
/* If no selected sockets remain, also deselect the node. */
if (deselect_nodes) {
LISTBASE_FOREACH (bNodeSocket *, socket, &node->outputs) {
if (socket->flag & SELECT) {
@ -532,26 +527,27 @@ static bool node_mouse_select(bContext *C,
const Object *ob = CTX_data_active_object(C);
const Scene *scene = CTX_data_scene(C);
const wmWindowManager *wm = CTX_wm_manager(C);
bNode *node;
bNode *node = nullptr;
bNodeSocket *sock = nullptr;
bNodeSocket *tsock;
/* always do socket_select when extending selection. */
/* Always do socket_select when extending selection. */
const bool socket_select = (params->sel_op == SEL_OP_XOR) ||
RNA_boolean_get(op->ptr, "socket_select");
bool changed = false;
bool found = false;
bool node_was_selected = false;
/* get mouse coordinates in view2d space */
/* Get mouse coordinates in view2d space. */
float2 cursor;
UI_view2d_region_to_view(&region.v2d, mval.x, mval.y, &cursor.x, &cursor.y);
/* first do socket selection, these generally overlap with nodes. */
/* First do socket selection, these generally overlap with nodes. */
if (socket_select) {
/* NOTE: unlike nodes #SelectPick_Params isn't fully supported. */
const bool extend = (params->sel_op == SEL_OP_XOR);
if (node_find_indicated_socket(snode, &node, &sock, cursor, SOCK_IN)) {
sock = node_find_indicated_socket(snode, cursor, SOCK_IN);
if (sock) {
node = &sock->owner_node();
found = true;
node_was_selected = node->flag & SELECT;
@ -560,48 +556,50 @@ static bool node_mouse_select(bContext *C,
node_socket_toggle(node, *sock, true);
changed = true;
}
else if (node_find_indicated_socket(snode, &node, &sock, cursor, SOCK_OUT)) {
found = true;
node_was_selected = node->flag & SELECT;
if (!changed) {
sock = node_find_indicated_socket(snode, cursor, SOCK_OUT);
if (sock) {
node = &sock->owner_node();
found = true;
node_was_selected = node->flag & SELECT;
if (sock->flag & SELECT) {
if (extend) {
node_socket_deselect(node, *sock, true);
changed = true;
if (sock->flag & SELECT) {
if (extend) {
node_socket_deselect(node, *sock, true);
changed = true;
}
}
}
else {
/* Only allow one selected output per node, for sensible linking.
* Allow selecting outputs from different nodes though, if extend is true. */
if (node) {
for (tsock = (bNodeSocket *)node->outputs.first; tsock; tsock = tsock->next) {
else {
/* Only allow one selected output per node, for sensible linking.
* Allow selecting outputs from different nodes though, if extend is true. */
for (bNodeSocket *tsock : node->output_sockets()) {
if (tsock == sock) {
continue;
}
node_socket_deselect(node, *tsock, true);
changed = true;
}
}
if (!extend) {
for (bNode *tnode : node_tree.all_nodes()) {
if (tnode == node) {
continue;
}
for (tsock = (bNodeSocket *)tnode->outputs.first; tsock; tsock = tsock->next) {
node_socket_deselect(tnode, *tsock, true);
changed = true;
if (!extend) {
for (bNode *tnode : node_tree.all_nodes()) {
if (tnode == node) {
continue;
}
for (bNodeSocket *tsock : tnode->output_sockets()) {
node_socket_deselect(tnode, *tsock, true);
changed = true;
}
}
}
node_socket_select(node, *sock);
changed = true;
}
node_socket_select(node, *sock);
changed = true;
}
}
}
if (!sock) {
/* find the closest visible node */
/* Find the closest visible node. */
node = node_under_mouse_select(node_tree, cursor);
found = (node != nullptr);
node_was_selected = node && (node->flag & SELECT);
@ -619,28 +617,25 @@ static bool node_mouse_select(bContext *C,
if (found) {
switch (params->sel_op) {
case SEL_OP_ADD: {
case SEL_OP_ADD:
nodeSetSelected(node, true);
break;
}
case SEL_OP_SUB: {
case SEL_OP_SUB:
nodeSetSelected(node, false);
break;
}
case SEL_OP_XOR: {
/* Check active so clicking on an inactive node activates it. */
bool is_selected = (node->flag & NODE_SELECT) && (node->flag & NODE_ACTIVE);
nodeSetSelected(node, !is_selected);
break;
}
case SEL_OP_SET: {
case SEL_OP_SET:
nodeSetSelected(node, true);
break;
}
case SEL_OP_AND: {
BLI_assert_unreachable(); /* Doesn't make sense for picking. */
case SEL_OP_AND:
/* Doesn't make sense for picking. */
BLI_assert_unreachable();
break;
}
}
changed = true;
@ -683,20 +678,20 @@ static bool node_mouse_select(bContext *C,
static int node_select_exec(bContext *C, wmOperator *op)
{
/* get settings from RNA properties for operator */
/* Get settings from RNA properties for operator. */
int2 mval;
RNA_int_get_array(op->ptr, "location", mval);
SelectPick_Params params = {};
ED_select_pick_params_from_operator(op->ptr, &params);
/* perform the select */
/* Perform the selection. */
const bool changed = node_mouse_select(C, op, mval, &params);
if (changed) {
return OPERATOR_PASS_THROUGH | OPERATOR_FINISHED;
}
/* Nothing selected, just passthrough. */
/* Nothing selected, just pass through. */
return OPERATOR_PASS_THROUGH | OPERATOR_CANCELLED;
}
@ -964,10 +959,9 @@ static bool do_lasso_select_node(bContext *C,
changed = true;
}
/* get rectangle from operator */
/* Get rectangle from operator. */
BLI_lasso_boundbox(&rect, mcoords, mcoords_len);
/* do actual selection */
for (bNode *node : node_tree.all_nodes()) {
if (select && (node->flag & NODE_SELECT)) {
continue;
@ -1391,7 +1385,7 @@ static uiBlock *node_find_menu(bContext *C, ARegion *region, void *arg_op)
but, nullptr, node_find_update_fn, op->type, false, nullptr, node_find_exec_fn, nullptr);
UI_but_flag_enable(but, UI_BUT_ACTIVATE_ON_INIT);
/* fake button, it holds space for search items */
/* Fake button holds space for search items. */
uiDefBut(block,
UI_BTYPE_LABEL,
0,

View File

@ -25,7 +25,7 @@
#include "DEG_depsgraph_query.h"
#include "ED_curves_sculpt.h"
#include "ED_curves.h"
#include "ED_spreadsheet.h"
#include "NOD_geometry_nodes_lazy_function.hh"
@ -265,7 +265,7 @@ bool GeometryDataSource::has_selection_filter() const
if (object_orig->type != OB_CURVES) {
return false;
}
if (object_orig->mode != OB_MODE_SCULPT_CURVES) {
if (!ELEM(object_orig->mode, OB_MODE_SCULPT_CURVES, OB_MODE_EDIT)) {
return false;
}
return true;
@ -339,9 +339,9 @@ IndexMask GeometryDataSource::apply_selection_filter(Vector<int64_t> &indices) c
const Curves &curves_id = *component.get_for_read();
switch (domain_) {
case ATTR_DOMAIN_POINT:
return sculpt_paint::retrieve_selected_points(curves_id, indices);
return curves::retrieve_selected_points(curves_id, indices);
case ATTR_DOMAIN_CURVE:
return sculpt_paint::retrieve_selected_curves(curves_id, indices);
return curves::retrieve_selected_curves(curves_id, indices);
default:
BLI_assert_unreachable();
}

View File

@ -397,6 +397,9 @@ tSlider *ED_slider_create(struct bContext *C)
}
}
/* Hide the area menu bar contents, as the slider will be drawn on top. */
ED_area_status_text(slider->area, "");
return slider;
}

View File

@ -95,7 +95,7 @@ inline void execute_lazy_function_eagerly_impl(
fn.destruct_storage(context.storage);
/* Make sure all outputs have been computed. */
BLI_assert(!Span(set_outputs).contains(false));
BLI_assert(!Span<bool>(set_outputs).contains(false));
}
} // namespace detail

View File

@ -151,9 +151,15 @@ struct NodeState {
*/
bool node_has_finished = false;
/**
* Set to true once the node is done running for the first time.
* Set to true once the always required inputs have been requested.
* This happens the first time the node is run.
*/
bool had_initialization = false;
bool always_used_inputs_requested = false;
/**
* Set to true when the storage and defaults have been initialized.
* This happens the first time the node function is executed.
*/
bool storage_and_defaults_initialized = false;
/**
* Nodes with side effects should always be executed when their required inputs have been
* computed.
@ -729,39 +735,20 @@ class Executor {
return;
}
if (!node_state.had_initialization) {
/* Initialize storage. */
node_state.storage = fn.init_storage(allocator);
/* Load unlinked inputs. */
for (const int input_index : node.inputs().index_range()) {
const InputSocket &input_socket = node.input(input_index);
if (input_socket.origin() != nullptr) {
continue;
}
InputState &input_state = node_state.inputs[input_index];
const CPPType &type = input_socket.type();
const void *default_value = input_socket.default_value();
BLI_assert(default_value != nullptr);
if (self_.logger_ != nullptr) {
self_.logger_->log_socket_value(input_socket, {type, default_value}, *context_);
}
void *buffer = allocator.allocate(type.size(), type.alignment());
type.copy_construct(default_value, buffer);
this->forward_value_to_input(locked_node, input_state, {type, buffer}, current_task);
}
if (!node_state.always_used_inputs_requested) {
/* Request linked inputs that are always needed. */
const Span<Input> fn_inputs = fn.inputs();
for (const int input_index : fn_inputs.index_range()) {
const Input &fn_input = fn_inputs[input_index];
if (fn_input.usage == ValueUsage::Used) {
const InputSocket &input_socket = node.input(input_index);
this->set_input_required(locked_node, input_socket);
if (input_socket.origin() != nullptr) {
this->set_input_required(locked_node, input_socket);
}
}
}
node_state.had_initialization = true;
node_state.always_used_inputs_requested = true;
}
for (const int input_index : node_state.inputs.index_range()) {
@ -784,6 +771,32 @@ class Executor {
});
if (node_needs_execution) {
if (!node_state.storage_and_defaults_initialized) {
/* Initialize storage. */
node_state.storage = fn.init_storage(allocator);
/* Load unlinked inputs. */
for (const int input_index : node.inputs().index_range()) {
const InputSocket &input_socket = node.input(input_index);
if (input_socket.origin() != nullptr) {
continue;
}
InputState &input_state = node_state.inputs[input_index];
const CPPType &type = input_socket.type();
const void *default_value = input_socket.default_value();
BLI_assert(default_value != nullptr);
if (self_.logger_ != nullptr) {
self_.logger_->log_socket_value(input_socket, {type, default_value}, *context_);
}
BLI_assert(input_state.value == nullptr);
input_state.value = allocator.allocate(type.size(), type.alignment());
type.copy_construct(default_value, input_state.value);
input_state.was_ready_for_execution = true;
}
node_state.storage_and_defaults_initialized = true;
}
/* Importantly, the node must not be locked when it is executed. That would result in locks
* being hold very long in some cases and results in multiple locks being hold by the same
* thread in the same graph which can lead to deadlocks. */

View File

@ -47,6 +47,8 @@ struct AddCurvesOnMeshInputs {
struct AddCurvesOnMeshOutputs {
bool uv_error = false;
IndexRange new_curves_range;
IndexRange new_points_range;
};
/**

View File

@ -303,6 +303,10 @@ AddCurvesOnMeshOutputs add_curves_on_mesh(CurvesGeometry &curves,
curves.resize(new_points_num, new_curves_num);
MutableSpan<float3> positions_cu = curves.positions_for_write();
/* The new elements are added at the end of the arrays. */
outputs.new_points_range = curves.points_range().drop_front(old_points_num);
outputs.new_curves_range = curves.curves_range().drop_front(old_curves_num);
/* Initialize attachment information. */
MutableSpan<float2> surface_uv_coords = curves.surface_uv_coords_for_write();
surface_uv_coords.take_back(added_curves_num).copy_from(used_uvs);
@ -338,18 +342,6 @@ AddCurvesOnMeshOutputs add_curves_on_mesh(CurvesGeometry &curves,
}
});
/* Update selection arrays when available. */
const VArray<float> points_selection = curves.selection_point_float();
if (points_selection.is_span()) {
MutableSpan<float> points_selection_span = curves.selection_point_float_for_write();
points_selection_span.drop_front(old_points_num).fill(1.0f);
}
const VArray<float> curves_selection = curves.selection_curve_float();
if (curves_selection.is_span()) {
MutableSpan<float> curves_selection_span = curves.selection_curve_float_for_write();
curves_selection_span.slice(new_curves_range).fill(1.0f);
}
/* Initialize position attribute. */
if (inputs.interpolate_shape) {
interpolate_position_with_interpolation(curves,
@ -374,24 +366,20 @@ AddCurvesOnMeshOutputs add_curves_on_mesh(CurvesGeometry &curves,
curves.fill_curve_types(new_curves_range, CURVE_TYPE_CATMULL_ROM);
/* Explicitly set all other attributes besides those processed above to default values. */
bke::MutableAttributeAccessor attributes = curves.attributes_for_write();
Set<std::string> attributes_to_skip{{"position",
"curve_type",
"surface_uv_coordinate",
".selection_point_float",
".selection_curve_float"}};
/* Explicitly set all other attributes besides those processed above to default values. */
Set<std::string> attributes_to_skip{{"position", "curve_type", "surface_uv_coordinate"}};
attributes.for_all(
[&](const bke::AttributeIDRef &id, const bke::AttributeMetaData /*meta_data*/) {
if (id.is_named() && attributes_to_skip.contains(id.name())) {
return true;
}
bke::GSpanAttributeWriter attribute = attributes.lookup_for_write_span(id);
/* The new elements are added at the end of the array. */
const int old_elements_num = attribute.domain == ATTR_DOMAIN_POINT ? old_points_num :
old_curves_num;
const CPPType &type = attribute.span.type();
GMutableSpan new_data = attribute.span.drop_front(old_elements_num);
GMutableSpan new_data = attribute.span.slice(attribute.domain == ATTR_DOMAIN_POINT ?
outputs.new_points_range :
outputs.new_curves_range);
type.fill_assign_n(type.default_value(), new_data.data(), new_data.size());
attribute.finish();
return true;

View File

@ -491,18 +491,28 @@ inline bool validate_data_format(eGPUTextureFormat tex_format, eGPUDataFormat da
case GPU_DEPTH24_STENCIL8:
case GPU_DEPTH32F_STENCIL8:
return ELEM(data_format, GPU_DATA_UINT_24_8, GPU_DATA_UINT);
case GPU_R8UI:
case GPU_R16UI:
case GPU_RG16UI:
case GPU_RGBA16UI:
case GPU_R32UI:
case GPU_RG32UI:
case GPU_RGBA32UI:
return data_format == GPU_DATA_UINT;
case GPU_R32I:
case GPU_RG16I:
case GPU_R8I:
case GPU_RG8I:
case GPU_RGBA8I:
case GPU_R16I:
case GPU_RG16I:
case GPU_RGBA16I:
case GPU_R32I:
case GPU_RG32I:
case GPU_RGBA32I:
return data_format == GPU_DATA_INT;
case GPU_R8:
case GPU_RG8:
case GPU_RGBA8:
case GPU_R8UI:
case GPU_RG8UI:
case GPU_RGBA8UI:
case GPU_SRGB8_A8:
return ELEM(data_format, GPU_DATA_UBYTE, GPU_DATA_FLOAT);

View File

@ -519,8 +519,8 @@ char *MSLGeneratorInterface::msl_patch_default_get()
}
std::stringstream ss_patch;
ss_patch << datatoc_mtl_shader_shared_h << std::endl;
ss_patch << datatoc_mtl_shader_defines_msl << std::endl;
ss_patch << datatoc_mtl_shader_shared_h << std::endl;
size_t len = strlen(ss_patch.str().c_str());
msl_patch_default = (char *)malloc(len * sizeof(char));
@ -607,7 +607,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
/*** Regex Commands ***/
/* Source cleanup and syntax replacement. */
static std::regex remove_excess_newlines("\\n+");
static std::regex replace_mat3("mat3\\s*\\(");
static std::regex replace_matrix_construct("mat([234](x[234])?)\\s*\\(");
/* Special condition - mat3 and array constructor replacement.
* Also replace excessive new lines to ensure cases are not missed.
@ -615,14 +615,14 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
shd_builder_->glsl_vertex_source_ = std::regex_replace(
shd_builder_->glsl_vertex_source_, remove_excess_newlines, "\n");
shd_builder_->glsl_vertex_source_ = std::regex_replace(
shd_builder_->glsl_vertex_source_, replace_mat3, "MAT3(");
shd_builder_->glsl_vertex_source_, replace_matrix_construct, "MAT$1(");
replace_array_initializers_func(shd_builder_->glsl_vertex_source_);
if (!msl_iface.uses_transform_feedback) {
shd_builder_->glsl_fragment_source_ = std::regex_replace(
shd_builder_->glsl_fragment_source_, remove_excess_newlines, "\n");
shd_builder_->glsl_fragment_source_ = std::regex_replace(
shd_builder_->glsl_fragment_source_, replace_mat3, "MAT3(");
shd_builder_->glsl_fragment_source_, replace_matrix_construct, "MAT$1(");
replace_array_initializers_func(shd_builder_->glsl_fragment_source_);
}

View File

@ -3,7 +3,7 @@
/** Special header for mapping commonly defined tokens to API-specific variations.
* Where possible, this will adhere closely to base GLSL, where semantics are the same.
* However, host code shader code may need modifying to support types where necessary variations
* exist between APIs but are not expressed through the source. (e.g. distinctio between depth2d
* exist between APIs but are not expressed through the source. (e.g. distinction between depth2d
* and texture2d types in metal).
*/
@ -16,19 +16,27 @@
#define DFDY_SIGN 1.0
/* Type definitions. */
#define vec2 float2
#define vec3 float3
#define vec4 float4
#define mat2 float2x2
#define mat2x2 float2x2
#define mat3 float3x3
#define mat4 float4x4
#define ivec2 int2
#define ivec3 int3
#define ivec4 int4
#define uvec2 uint2
#define uvec3 uint3
#define uvec4 uint4
using vec2 = float2;
using vec3 = float3;
using vec4 = float4;
using mat2x2 = float2x2;
using mat2x3 = float2x3;
using mat2x4 = float2x4;
using mat3x2 = float3x2;
using mat3x3 = float3x3;
using mat3x4 = float3x4;
using mat4x2 = float4x2;
using mat4x3 = float4x3;
using mat4x4 = float4x4;
using mat2 = float2x2;
using mat3 = float3x3;
using mat4 = float4x4;
using ivec2 = int2;
using ivec3 = int3;
using ivec4 = int4;
using uvec2 = uint2;
using uvec3 = uint3;
using uvec4 = uint4;
/* MTLBOOL is used for native boolean's generated by the Metal backend, to avoid type-emulation
* for GLSL bools, which are treated as integers. */
#define MTLBOOL bool
@ -687,6 +695,76 @@ inline void _texture_write_internal(thread _mtl_combined_image_sampler_3d<S, A>
}
}
/* Matrix compare operators. */
/** TODO(fclem): Template. */
inline bool operator==(float4x4 a, float4x4 b)
{
for (int i = 0; i < 4; i++) {
if (any(a[i] != b[i])) {
return false;
}
}
return true;
}
inline bool operator==(float3x3 a, float3x3 b)
{
for (int i = 0; i < 3; i++) {
if (any(a[i] != b[i])) {
return false;
}
}
return true;
}
inline bool operator==(float2x2 a, float2x2 b)
{
for (int i = 0; i < 2; i++) {
if (any(a[i] != b[i])) {
return false;
}
}
return true;
}
inline bool operator!=(float4x4 a, float4x4 b)
{
return !(a == b);
}
inline bool operator!=(float3x3 a, float3x3 b)
{
return !(a == b);
}
inline bool operator!=(float2x2 a, float2x2 b)
{
return !(a == b);
}
/* Matrix unary minus operator. */
inline float4x4 operator-(float4x4 a)
{
float4x4 b;
for (int i = 0; i < 4; i++) {
b[i] = -a[i];
}
return b;
}
inline float3x3 operator-(float3x3 a)
{
float3x3 b;
for (int i = 0; i < 3; i++) {
b[i] = -a[i];
}
return b;
}
inline float2x2 operator-(float2x2 a)
{
float2x2 b;
for (int i = 0; i < 2; i++) {
b[i] = -a[i];
}
return b;
}
/* SSBO Vertex Fetch Mode. */
#ifdef MTL_SSBO_VERTEX_FETCH
/* Enabled when geometry is passed via raw buffer bindings, rather than using
@ -997,47 +1075,59 @@ float4x4 inverse(float4x4 a)
float b10 = a[2][1] * a[3][3] - a[2][3] * a[3][1];
float b11 = a[2][2] * a[3][3] - a[2][3] * a[3][2];
float invdet = 1.0 / (b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06);
float inv_det = 1.0 / (b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06);
return float4x4(a[1][1] * b11 - a[1][2] * b10 + a[1][3] * b09,
a[0][2] * b10 - a[0][1] * b11 - a[0][3] * b09,
a[3][1] * b05 - a[3][2] * b04 + a[3][3] * b03,
a[2][2] * b04 - a[2][1] * b05 - a[2][3] * b03,
a[1][2] * b08 - a[1][0] * b11 - a[1][3] * b07,
a[0][0] * b11 - a[0][2] * b08 + a[0][3] * b07,
a[3][2] * b02 - a[3][0] * b05 - a[3][3] * b01,
a[2][0] * b05 - a[2][2] * b02 + a[2][3] * b01,
a[1][0] * b10 - a[1][1] * b08 + a[1][3] * b06,
a[0][1] * b08 - a[0][0] * b10 - a[0][3] * b06,
a[3][0] * b04 - a[3][1] * b02 + a[3][3] * b00,
a[2][1] * b02 - a[2][0] * b04 - a[2][3] * b00,
a[1][1] * b07 - a[1][0] * b09 - a[1][2] * b06,
a[0][0] * b09 - a[0][1] * b07 + a[0][2] * b06,
a[3][1] * b01 - a[3][0] * b03 - a[3][2] * b00,
a[2][0] * b03 - a[2][1] * b01 + a[2][2] * b00) *
invdet;
float4x4 adjoint{};
adjoint[0][0] = a[1][1] * b11 - a[1][2] * b10 + a[1][3] * b09;
adjoint[0][1] = a[0][2] * b10 - a[0][1] * b11 - a[0][3] * b09;
adjoint[0][2] = a[3][1] * b05 - a[3][2] * b04 + a[3][3] * b03;
adjoint[0][3] = a[2][2] * b04 - a[2][1] * b05 - a[2][3] * b03;
adjoint[1][0] = a[1][2] * b08 - a[1][0] * b11 - a[1][3] * b07;
adjoint[1][1] = a[0][0] * b11 - a[0][2] * b08 + a[0][3] * b07;
adjoint[1][2] = a[3][2] * b02 - a[3][0] * b05 - a[3][3] * b01;
adjoint[1][3] = a[2][0] * b05 - a[2][2] * b02 + a[2][3] * b01;
adjoint[2][0] = a[1][0] * b10 - a[1][1] * b08 + a[1][3] * b06;
adjoint[2][1] = a[0][1] * b08 - a[0][0] * b10 - a[0][3] * b06;
adjoint[2][2] = a[3][0] * b04 - a[3][1] * b02 + a[3][3] * b00;
adjoint[2][3] = a[2][1] * b02 - a[2][0] * b04 - a[2][3] * b00;
adjoint[3][0] = a[1][1] * b07 - a[1][0] * b09 - a[1][2] * b06;
adjoint[3][1] = a[0][0] * b09 - a[0][1] * b07 + a[0][2] * b06;
adjoint[3][2] = a[3][1] * b01 - a[3][0] * b03 - a[3][2] * b00;
adjoint[3][3] = a[2][0] * b03 - a[2][1] * b01 + a[2][2] * b00;
return adjoint * inv_det;
}
float3x3 inverse(float3x3 m)
{
float b00 = m[1][1] * m[2][2] - m[2][1] * m[1][2];
float b01 = m[0][1] * m[2][2] - m[2][1] * m[0][2];
float b02 = m[0][1] * m[1][2] - m[1][1] * m[0][2];
float invdet = 1.0 / (m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2]) -
m[1][0] * (m[0][1] * m[2][2] - m[2][1] * m[0][2]) +
m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2]));
float inv_det = 1.0 / (m[0][0] * b00 - m[1][0] * b01 + m[2][0] * b02);
float3x3 inverse(0);
inverse[0][0] = +(m[1][1] * m[2][2] - m[2][1] * m[1][2]);
inverse[1][0] = -(m[1][0] * m[2][2] - m[2][0] * m[1][2]);
inverse[2][0] = +(m[1][0] * m[2][1] - m[2][0] * m[1][1]);
inverse[0][1] = -(m[0][1] * m[2][2] - m[2][1] * m[0][2]);
inverse[1][1] = +(m[0][0] * m[2][2] - m[2][0] * m[0][2]);
inverse[2][1] = -(m[0][0] * m[2][1] - m[2][0] * m[0][1]);
inverse[0][2] = +(m[0][1] * m[1][2] - m[1][1] * m[0][2]);
inverse[1][2] = -(m[0][0] * m[1][2] - m[1][0] * m[0][2]);
inverse[2][2] = +(m[0][0] * m[1][1] - m[1][0] * m[0][1]);
inverse = inverse * invdet;
float3x3 adjoint{};
adjoint[0][0] = +b00;
adjoint[0][1] = -b01;
adjoint[0][2] = +b02;
adjoint[1][0] = -(m[1][0] * m[2][2] - m[2][0] * m[1][2]);
adjoint[1][1] = +(m[0][0] * m[2][2] - m[2][0] * m[0][2]);
adjoint[1][2] = -(m[0][0] * m[1][2] - m[1][0] * m[0][2]);
adjoint[2][0] = +(m[1][0] * m[2][1] - m[2][0] * m[1][1]);
adjoint[2][1] = -(m[0][0] * m[2][1] - m[2][0] * m[0][1]);
adjoint[2][2] = +(m[0][0] * m[1][1] - m[1][0] * m[0][1]);
return adjoint * inv_det;
}
return inverse;
float2x2 inverse(float2x2 m)
{
float inv_det = 1.0 / (m[0][0] * m[1][1] - m[1][0] * m[0][1]);
float2x2 adjoint{};
adjoint[0][0] = +m[1][1];
adjoint[1][0] = -m[1][0];
adjoint[0][1] = -m[0][1];
adjoint[1][1] = +m[0][0];
return adjoint * inv_det;
}
/* Additional overloads for builtin functions. */
@ -1110,44 +1200,96 @@ template<typename T, unsigned int Size> bool is_zero(vec<T, Size> a)
return true;
}
/* Matrix conversion fallback. */
mat3 MAT3(vec3 a, vec3 b, vec3 c)
/**
* Matrix conversion fallback for functional style casting & constructors.
* To avoid name collision with the types, they are replaced with uppercase version
* before compilation.
*/
mat2 MAT2x2(vec2 a, vec2 b)
{
return mat2(a, b);
}
mat2 MAT2x2(float a1, float a2, float b1, float b2)
{
return mat2(vec2(a1, a2), vec2(b1, b2));
}
mat2 MAT2x2(float f)
{
return mat2(f);
}
mat2 MAT2x2(mat3 m)
{
return mat2(m[0].xy, m[1].xy);
}
mat2 MAT2x2(mat4 m)
{
return mat2(m[0].xy, m[1].xy);
}
mat3 MAT3x3(vec3 a, vec3 b, vec3 c)
{
return mat3(a, b, c);
}
mat3 MAT3(vec3 a, vec3 b, float c1, float c2, float c3)
{
return mat3(a, b, vec3(c1, c2, c3));
}
mat3 MAT3(vec3 a, float b1, float b2, float b3, vec3 c)
{
return mat3(a, vec3(b1, b2, b3), c);
}
mat3 MAT3(vec3 a, float b1, float b2, float b3, float c1, float c2, float c3)
{
return mat3(a, vec3(b1, b2, b3), vec3(c1, c2, c3));
}
mat3 MAT3(float a1, float a2, float a3, float b1, float b2, float b3, float c1, float c2, float c3)
mat3 MAT3x3(
float a1, float a2, float a3, float b1, float b2, float b3, float c1, float c2, float c3)
{
return mat3(vec3(a1, a2, a3), vec3(b1, b2, b3), vec3(c1, c2, c3));
}
mat3 MAT3(float a1, float a2, float a3, vec3 b, vec3 c)
{
return mat3(vec3(a1, a2, a3), b, c);
}
mat3 MAT3(float a1, float a2, float a3, vec3 b, float c1, float c2, float c3)
{
return mat3(vec3(a1, a2, a3), b, vec3(c1, c2, c3));
}
mat3 MAT3(float a1, float a2, float a3, float b1, float b2, float b3, vec3 c)
{
return mat3(vec3(a1, a2, a3), vec3(b1, b2, b3), c);
}
mat3 MAT3(float f)
mat3 MAT3x3(float f)
{
return mat3(f);
}
mat3 MAT3(mat4 m)
mat3 MAT3x3(mat4 m)
{
return mat4_to_mat3(m);
}
return mat3(m[0].xyz, m[1].xyz, m[2].xyz);
}
mat3 MAT3x3(mat2 m)
{
return mat3(vec3(m[0].xy, 0.0), vec3(m[1].xy, 0.0), vec3(0.0, 0.0, 1.0));
}
mat4 MAT4x4(vec4 a, vec4 b, vec4 c, vec4 d)
{
return mat4(a, b, c, d);
}
mat4 MAT4x4(float a1,
float a2,
float a3,
float a4,
float b1,
float b2,
float b3,
float b4,
float c1,
float c2,
float c3,
float c4,
float d1,
float d2,
float d3,
float d4)
{
return mat4(
vec4(a1, a2, a3, a4), vec4(b1, b2, b3, b4), vec4(c1, c2, c3, c4), vec4(d1, d2, d3, d4));
}
mat4 MAT4x4(float f)
{
return mat4(f);
}
mat4 MAT4x4(mat3 m)
{
return mat4(
vec4(m[0].xyz, 0.0), vec4(m[1].xyz, 0.0), vec4(m[2].xyz, 0.0), vec4(0.0, 0.0, 0.0, 1.0));
}
mat4 MAT4x4(mat2 m)
{
return mat4(vec4(m[0].xy, 0.0, 0.0),
vec4(m[1].xy, 0.0, 0.0),
vec4(0.0, 0.0, 1.0, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
}
#define MAT2 MAT2x2
#define MAT3 MAT3x3
#define MAT4 MAT4x4

View File

@ -73,7 +73,11 @@ static ImBuf *imb_oiio_load_image(
ibuf = IMB_allocImBuf(width, height, is_alpha ? 32 : 24, flags | IB_rect);
try {
if (!in->read_image(TypeDesc::UINT8,
if (!in->read_image(0,
0,
0,
components,
TypeDesc::UINT8,
(uchar *)ibuf->rect + (height - 1) * scanlinesize,
AutoStride,
-scanlinesize,
@ -113,7 +117,11 @@ static ImBuf *imb_oiio_load_image_float(
ibuf = IMB_allocImBuf(width, height, is_alpha ? 32 : 24, flags | IB_rectfloat);
try {
if (!in->read_image(TypeDesc::FLOAT,
if (!in->read_image(0,
0,
0,
components,
TypeDesc::FLOAT,
(uchar *)ibuf->rect_float + (height - 1) * scanlinesize,
AutoStride,
-scanlinesize,

View File

@ -1017,9 +1017,9 @@ bool DocumentImporter::writeLight(const COLLADAFW::Light *light)
et->setData("clipsta", &(lamp->clipsta));
et->setData("clipend", &(lamp->clipend));
et->setData("bias", &(lamp->bias));
et->setData("soft", &(lamp->soft));
et->setData("bufsize", &(lamp->bufsize));
et->setData("buffers", &(lamp->buffers));
et->setData("radius", &(lamp->radius));
et->setData("area_shape", &(lamp->area_shape));
et->setData("area_size", &(lamp->area_size));
et->setData("area_sizey", &(lamp->area_sizey));

View File

@ -129,10 +129,10 @@ bool LightsExporter::exportBlenderProfile(COLLADASW::Light &cla, Light *la)
cla.addExtraTechniqueParameter("blender", "clipsta", la->clipsta);
cla.addExtraTechniqueParameter("blender", "clipend", la->clipend);
cla.addExtraTechniqueParameter("blender", "bias", la->bias);
cla.addExtraTechniqueParameter("blender", "soft", la->soft);
cla.addExtraTechniqueParameter("blender", "bufsize", la->bufsize);
cla.addExtraTechniqueParameter("blender", "samp", la->samp);
cla.addExtraTechniqueParameter("blender", "buffers", la->buffers);
cla.addExtraTechniqueParameter("blender", "radius", la->radius);
cla.addExtraTechniqueParameter("blender", "area_shape", la->area_shape);
cla.addExtraTechniqueParameter("blender", "area_size", la->area_size);
cla.addExtraTechniqueParameter("blender", "area_sizey", la->area_sizey);

View File

@ -175,7 +175,7 @@ void USDLightReader::read_object_data(Main *bmain, const double motionSampleTime
if (pxr::UsdAttribute radius_attr = sphere_light.GetRadiusAttr()) {
float radius = 0.0f;
if (radius_attr.Get(&radius, motionSampleTime)) {
blight->area_size = radius;
blight->radius = radius;
}
}
}
@ -192,7 +192,7 @@ void USDLightReader::read_object_data(Main *bmain, const double motionSampleTime
if (pxr::UsdAttribute radius_attr = sphere_light.GetRadiusAttr()) {
float radius = 0.0f;
if (radius_attr.Get(&radius, motionSampleTime)) {
blight->area_size = radius;
blight->radius = radius;
}
}

View File

@ -80,7 +80,7 @@ void USDLightWriter::do_write(HierarchyContext &context)
break;
case LA_LOCAL: {
pxr::UsdLuxSphereLight sphere_light = pxr::UsdLuxSphereLight::Define(stage, usd_path);
sphere_light.CreateRadiusAttr().Set(light->area_size, timecode);
sphere_light.CreateRadiusAttr().Set(light->radius, timecode);
#if PXR_VERSION >= 2111
usd_light_api = sphere_light.LightAPI();
#else

View File

@ -28,10 +28,8 @@
.bufsize = 512, \
.clipsta = 0.05f, \
.clipend = 40.0f, \
.bleedexp = 2.5f, \
.samp = 3, \
.bias = 1.0f, \
.soft = 3.0f, \
.area_size = 0.25f, \
.area_sizey = 0.25f, \
.area_sizez = 0.25f, \
@ -47,7 +45,6 @@
.cascade_fade = 0.1f, \
.contact_dist = 0.2f, \
.contact_bias = 0.03f, \
.contact_spread = 0.2f, \
.contact_thickness = 0.2f, \
.diff_fac = 1.0f, \
.spec_fac = 1.0f, \

View File

@ -48,9 +48,7 @@ typedef struct Light {
float clipsta, clipend;
float bias;
float soft; /* DEPRECATED kept for compatibility. */
float bleedbias; /* DEPRECATED kept for compatibility. */
float bleedexp; /* DEPRECATED kept for compatibility. */
float radius;
short bufsize, samp, buffers, filtertype;
char bufflag, buftype;
@ -66,7 +64,6 @@ typedef struct Light {
/** Old animation system, deprecated for 2.5. */
struct Ipo *ipo DNA_DEPRECATED;
short pr_texture, use_nodes;
char _pad6[4];
/* Eevee */
float cascade_max_dist;
@ -76,7 +73,6 @@ typedef struct Light {
float contact_dist;
float contact_bias;
float contact_spread; /* DEPRECATED kept for compatibility. */
float contact_thickness;
float diff_fac, volume_fac;

View File

@ -254,6 +254,7 @@ typedef enum eNodeSocketInOut {
SOCK_IN = 1 << 0,
SOCK_OUT = 1 << 1,
} eNodeSocketInOut;
ENUM_OPERATORS(eNodeSocketInOut, SOCK_OUT);
/** #bNodeSocket.flag, first bit is selection. */
typedef enum eNodeSocketFlag {

View File

@ -55,6 +55,7 @@ set(SRC
../../../../intern/guardedalloc/intern/mallocn.c
../../../../intern/guardedalloc/intern/mallocn_guarded_impl.c
../../../../intern/guardedalloc/intern/mallocn_lockfree_impl.c
../../../../intern/guardedalloc/intern/memory_usage.cc
${dna_header_include_file}
${dna_header_string_file}
)

Some files were not shown because too many files have changed in this diff Show More