Merge branch 'master' into asset-browser-grid-view

This commit is contained in:
Julian Eisel 2022-02-14 17:57:16 +01:00
commit d46357dd25
39 changed files with 346 additions and 400 deletions

View File

@ -44,8 +44,6 @@ Project Files
Package Targets
* package_debian: Build a debian package.
* package_pacman: Build an arch linux pacman package.
* package_archive: Build an archive package.
Testing Targets
@ -204,7 +202,7 @@ endif
# in libraries, or python 2 for running make update to get it.
ifeq ($(OS_NCASE),darwin)
ifeq (, $(shell command -v $(PYTHON)))
PYTHON:=$(DEPS_INSTALL_DIR)/python/bin/python3.7m
PYTHON:=$(DEPS_INSTALL_DIR)/python/bin/python3.10
ifeq (, $(shell command -v $(PYTHON)))
PYTHON:=python
endif
@ -313,7 +311,7 @@ CMAKE_CONFIG = cmake $(CMAKE_CONFIG_ARGS) \
# -----------------------------------------------------------------------------
# Tool for 'make config'
# X11 spesific
# X11 specific.
ifdef DISPLAY
CMAKE_CONFIG_TOOL = cmake-gui
else
@ -390,11 +388,6 @@ help: .FORCE
# -----------------------------------------------------------------------------
# Packages
#
package_debian: .FORCE
cd build_files/package_spec ; DEB_BUILD_OPTIONS="parallel=$(NPROCS)" sh ./build_debian.sh
package_pacman: .FORCE
cd build_files/package_spec/pacman ; MAKEFLAGS="-j$(NPROCS)" makepkg
package_archive: .FORCE
make -C "$(BUILD_DIR)" -s package_archive
@ -405,21 +398,21 @@ package_archive: .FORCE
# Tests
#
test: .FORCE
$(PYTHON) ./build_files/utils/make_test.py "$(BUILD_DIR)"
@$(PYTHON) ./build_files/utils/make_test.py "$(BUILD_DIR)"
# run pep8 check check on scripts we distribute.
test_pep8: .FORCE
$(PYTHON) tests/python/pep8.py > test_pep8.log 2>&1
@$(PYTHON) tests/python/pep8.py > test_pep8.log 2>&1
@echo "written: test_pep8.log"
# run some checks on our cmakefiles.
# run some checks on our CMAKE files.
test_cmake: .FORCE
$(PYTHON) build_files/cmake/cmake_consistency_check.py > test_cmake_consistency.log 2>&1
@$(PYTHON) build_files/cmake/cmake_consistency_check.py > test_cmake_consistency.log 2>&1
@echo "written: test_cmake_consistency.log"
# run deprecation tests, see if we have anything to remove.
test_deprecated: .FORCE
$(PYTHON) tests/check_deprecated.py
@$(PYTHON) tests/check_deprecated.py
# -----------------------------------------------------------------------------
@ -441,44 +434,44 @@ project_eclipse: .FORCE
#
check_cppcheck: .FORCE
$(CMAKE_CONFIG)
cd "$(BUILD_DIR)" ; \
@$(CMAKE_CONFIG)
@cd "$(BUILD_DIR)" ; \
$(PYTHON) \
"$(BLENDER_DIR)/build_files/cmake/cmake_static_check_cppcheck.py" 2> \
"$(BLENDER_DIR)/check_cppcheck.txt"
@echo "written: check_cppcheck.txt"
check_clang_array: .FORCE
$(CMAKE_CONFIG)
cd "$(BUILD_DIR)" ; \
@$(CMAKE_CONFIG)
@cd "$(BUILD_DIR)" ; \
$(PYTHON) "$(BLENDER_DIR)/build_files/cmake/cmake_static_check_clang_array.py"
check_splint: .FORCE
$(CMAKE_CONFIG)
cd "$(BUILD_DIR)" ; \
@$(CMAKE_CONFIG)
@cd "$(BUILD_DIR)" ; \
$(PYTHON) "$(BLENDER_DIR)/build_files/cmake/cmake_static_check_splint.py"
check_sparse: .FORCE
$(CMAKE_CONFIG)
cd "$(BUILD_DIR)" ; \
@$(CMAKE_CONFIG)
@cd "$(BUILD_DIR)" ; \
$(PYTHON) "$(BLENDER_DIR)/build_files/cmake/cmake_static_check_sparse.py"
check_smatch: .FORCE
$(CMAKE_CONFIG)
cd "$(BUILD_DIR)" ; \
@$(CMAKE_CONFIG)
@cd "$(BUILD_DIR)" ; \
$(PYTHON) "$(BLENDER_DIR)/build_files/cmake/cmake_static_check_smatch.py"
check_mypy: .FORCE
$(PYTHON) "$(BLENDER_DIR)/source/tools/check_source/check_mypy.py"
@$(PYTHON) "$(BLENDER_DIR)/source/tools/check_source/check_mypy.py"
check_spelling_py: .FORCE
cd "$(BUILD_DIR)" ; \
@cd "$(BUILD_DIR)" ; \
PYTHONIOENCODING=utf_8 $(PYTHON) \
"$(BLENDER_DIR)/source/tools/check_source/check_spelling.py" \
"$(BLENDER_DIR)/release/scripts"
check_spelling_c: .FORCE
cd "$(BUILD_DIR)" ; \
@cd "$(BUILD_DIR)" ; \
PYTHONIOENCODING=utf_8 $(PYTHON) \
"$(BLENDER_DIR)/source/tools/check_source/check_spelling.py" \
--cache-file=$(CHECK_SPELLING_CACHE) \
@ -488,18 +481,18 @@ check_spelling_c: .FORCE
"$(BLENDER_DIR)/intern/ghost" \
check_spelling_osl: .FORCE
cd "$(BUILD_DIR)" ;\
@cd "$(BUILD_DIR)" ; \
PYTHONIOENCODING=utf_8 $(PYTHON) \
"$(BLENDER_DIR)/source/tools/check_source/check_spelling.py" \
--cache-file=$(CHECK_SPELLING_CACHE) \
"$(BLENDER_DIR)/intern/cycles/kernel/shaders"
check_descriptions: .FORCE
$(BLENDER_BIN) --background -noaudio --factory-startup --python \
@$(BLENDER_BIN) --background -noaudio --factory-startup --python \
"$(BLENDER_DIR)/source/tools/check_source/check_descriptions.py"
check_licenses: .FORCE
PYTHONIOENCODING=utf_8 $(PYTHON) \
@PYTHONIOENCODING=utf_8 $(PYTHON) \
"$(BLENDER_DIR)/source/tools/check_source/check_licenses.py" \
"--show-headers=$(SHOW_HEADERS)"
@ -509,37 +502,37 @@ check_licenses: .FORCE
#
source_archive: .FORCE
python3 ./build_files/utils/make_source_archive.py
@$(PYTHON) ./build_files/utils/make_source_archive.py
source_archive_complete: .FORCE
cmake \
@cmake \
-S "$(BLENDER_DIR)/build_files/build_environment" -B"$(BUILD_DIR)/source_archive" \
-DCMAKE_BUILD_TYPE_INIT:STRING=$(BUILD_TYPE) -DPACKAGE_USE_UPSTREAM_SOURCES=OFF
# This assumes CMake is still using a default `PACKAGE_DIR` variable:
python3 ./build_files/utils/make_source_archive.py --include-packages "$(BUILD_DIR)/source_archive/packages"
@$(PYTHON) ./build_files/utils/make_source_archive.py --include-packages "$(BUILD_DIR)/source_archive/packages"
INKSCAPE_BIN?="inkscape"
icons: .FORCE
BLENDER_BIN=$(BLENDER_BIN) INKSCAPE_BIN=$(INKSCAPE_BIN) \
@BLENDER_BIN=$(BLENDER_BIN) INKSCAPE_BIN=$(INKSCAPE_BIN) \
"$(BLENDER_DIR)/release/datafiles/blender_icons_update.py"
INKSCAPE_BIN=$(INKSCAPE_BIN) \
@INKSCAPE_BIN=$(INKSCAPE_BIN) \
"$(BLENDER_DIR)/release/datafiles/prvicons_update.py"
INKSCAPE_BIN=$(INKSCAPE_BIN) \
@INKSCAPE_BIN=$(INKSCAPE_BIN) \
"$(BLENDER_DIR)/release/datafiles/alert_icons_update.py"
icons_geom: .FORCE
BLENDER_BIN=$(BLENDER_BIN) \
@BLENDER_BIN=$(BLENDER_BIN) \
"$(BLENDER_DIR)/release/datafiles/blender_icons_geom_update.py"
update: .FORCE
$(PYTHON) ./build_files/utils/make_update.py
@$(PYTHON) ./build_files/utils/make_update.py
update_code: .FORCE
$(PYTHON) ./build_files/utils/make_update.py --no-libraries
@$(PYTHON) ./build_files/utils/make_update.py --no-libraries
format: .FORCE
PATH="../lib/${OS_NCASE}_${CPU}/llvm/bin/:../lib/${OS_NCASE}_centos7_${CPU}/llvm/bin/:../lib/${OS_NCASE}/llvm/bin/:$(PATH)" \
@PATH="../lib/${OS_NCASE}_${CPU}/llvm/bin/:../lib/${OS_NCASE}_centos7_${CPU}/llvm/bin/:../lib/${OS_NCASE}/llvm/bin/:$(PATH)" \
$(PYTHON) source/tools/utils_maintenance/clang_format_paths.py $(PATHS)
@ -549,25 +542,25 @@ format: .FORCE
# Simple version of ./doc/python_api/sphinx_doc_gen.sh with no PDF generation.
doc_py: .FORCE
ASAN_OPTIONS=halt_on_error=0:${ASAN_OPTIONS} \
@ASAN_OPTIONS=halt_on_error=0:${ASAN_OPTIONS} \
$(BLENDER_BIN) \
--background -noaudio --factory-startup \
--python doc/python_api/sphinx_doc_gen.py
sphinx-build -b html -j $(NPROCS) doc/python_api/sphinx-in doc/python_api/sphinx-out
@sphinx-build -b html -j $(NPROCS) doc/python_api/sphinx-in doc/python_api/sphinx-out
@echo "docs written into: '$(BLENDER_DIR)/doc/python_api/sphinx-out/index.html'"
doc_doxy: .FORCE
cd doc/doxygen; doxygen Doxyfile
@cd doc/doxygen; doxygen Doxyfile
@echo "docs written into: '$(BLENDER_DIR)/doc/doxygen/html/index.html'"
doc_dna: .FORCE
$(BLENDER_BIN) \
@$(BLENDER_BIN) \
--background -noaudio --factory-startup \
--python doc/blender_file_format/BlendFileDnaExporter_25.py
@echo "docs written into: '$(BLENDER_DIR)/doc/blender_file_format/dna.html'"
doc_man: .FORCE
$(PYTHON) doc/manpage/blender.1.py $(BLENDER_BIN) blender.1
@$(PYTHON) doc/manpage/blender.1.py --blender="$(BLENDER_BIN)" --output=blender.1 --verbose
help_features: .FORCE
@$(PYTHON) "$(BLENDER_DIR)/build_files/cmake/cmake_print_build_options.py" $(BLENDER_DIR)"/CMakeLists.txt"

View File

@ -79,7 +79,9 @@ def blender_extract_info(blender_bin: str) -> Dict[str, str]:
}
def man_page_from_blender_help(fh: TextIO, blender_bin: str) -> None:
def man_page_from_blender_help(fh: TextIO, blender_bin: str, verbose: bool) -> None:
if verbose:
print("Extracting help text:", blender_bin)
blender_info = blender_extract_info(blender_bin)
# Header Content.
@ -178,6 +180,13 @@ def create_argparse() -> argparse.ArgumentParser:
required=True,
help="Path to the blender binary."
)
parser.add_argument(
"--verbose",
default=False,
required=False,
action='store_true',
help="Print additional progress."
)
return parser
@ -188,9 +197,12 @@ def main() -> None:
blender_bin = args.blender
output_filename = args.output
verbose = args.verbose
with open(output_filename, "w", encoding="utf-8") as fh:
man_page_from_blender_help(fh, blender_bin)
man_page_from_blender_help(fh, blender_bin, verbose)
if verbose:
print("Written:", output_filename)
if __name__ == "__main__":

View File

@ -26,33 +26,20 @@ bool device_metal_init()
return true;
}
static int device_metal_get_num_devices_safe(uint32_t *num_devices)
{
*num_devices = MTLCopyAllDevices().count;
return 0;
}
void device_metal_info(vector<DeviceInfo> &devices)
{
uint32_t num_devices = 0;
device_metal_get_num_devices_safe(&num_devices);
if (num_devices == 0) {
return;
}
vector<MetalPlatformDevice> usable_devices;
MetalInfo::get_usable_devices(&usable_devices);
auto usable_devices = MetalInfo::get_usable_devices();
/* Devices are numbered consecutively across platforms. */
set<string> unique_ids;
int device_index = 0;
for (MetalPlatformDevice &device : usable_devices) {
for (id<MTLDevice> &device : usable_devices) {
/* Compute unique ID for persistent user preferences. */
const string &device_name = device.device_name;
string device_name = [device.name UTF8String];
string id = string("METAL_") + device_name;
/* Hardware ID might not be unique, add device number in that case. */
if (unique_ids.find(id) != unique_ids.end()) {
id += string_printf("_ID_%d", num_devices);
id += string_printf("_ID_%d", device_index);
}
unique_ids.insert(id);
@ -81,15 +68,13 @@ void device_metal_info(vector<DeviceInfo> &devices)
string device_metal_capabilities()
{
string result = "";
string error_msg = "";
uint32_t num_devices = 0;
assert(device_metal_get_num_devices_safe(&num_devices));
auto allDevices = MTLCopyAllDevices();
uint32_t num_devices = allDevices.count;
if (num_devices == 0) {
return "No Metal devices found\n";
}
result += string_printf("Number of devices: %u\n", num_devices);
NSArray<id<MTLDevice>> *allDevices = MTLCopyAllDevices();
for (id<MTLDevice> device in allDevices) {
result += string_printf("\t\tDevice: %s\n", [device.name UTF8String]);
}

View File

@ -40,16 +40,10 @@ MetalDevice::MetalDevice(const DeviceInfo &info, Stats &stats, Profiler &profile
mtlDevId = info.num;
/* select chosen device */
vector<MetalPlatformDevice> usable_devices;
MetalInfo::get_usable_devices(&usable_devices);
if (usable_devices.size() == 0) {
set_error("Metal: no devices found.");
return;
}
auto usable_devices = MetalInfo::get_usable_devices();
assert(mtlDevId < usable_devices.size());
MetalPlatformDevice &platform_device = usable_devices[mtlDevId];
mtlDevice = platform_device.device_id;
device_name = platform_device.device_name;
mtlDevice = usable_devices[mtlDevId];
device_name = [mtlDevice.name UTF8String];
device_vendor = MetalInfo::get_vendor_from_device_name(device_name);
assert(device_vendor != METAL_GPU_UNKNOWN);
metal_printf("Creating new Cycles device for Metal: %s\n", device_name.c_str());

View File

@ -23,33 +23,10 @@ enum MetalGPUVendor {
METAL_GPU_INTEL = 3,
};
/* Retains a named MTLDevice for device enumeration. */
struct MetalPlatformDevice {
MetalPlatformDevice(id<MTLDevice> device, const string &device_name)
: device_id(device), device_name(device_name)
{
[device_id retain];
}
~MetalPlatformDevice()
{
[device_id release];
}
id<MTLDevice> device_id;
string device_name;
};
/* Contains static Metal helper functions. */
struct MetalInfo {
static bool device_version_check(id<MTLDevice> device);
static void get_usable_devices(vector<MetalPlatformDevice> *usable_devices);
static vector<id<MTLDevice>> const &get_usable_devices();
static MetalGPUVendor get_vendor_from_device_name(string const &device_name);
/* Platform information. */
static bool get_num_devices(uint32_t *num_platforms);
static uint32_t get_num_devices();
static bool get_device_name(id<MTLDevice> device_id, string *device_name);
static string get_device_name(id<MTLDevice> device_id);
};
/* Pool of MTLBuffers whose lifetime is linked to a single MTLCommandBuffer */

View File

@ -30,83 +30,45 @@ MetalGPUVendor MetalInfo::get_vendor_from_device_name(string const &device_name)
return METAL_GPU_UNKNOWN;
}
bool MetalInfo::device_version_check(id<MTLDevice> device)
vector<id<MTLDevice>> const &MetalInfo::get_usable_devices()
{
/* Metal Cycles doesn't work correctly on macOS versions older than 12.0 */
if (@available(macos 12.0, *)) {
MetalGPUVendor vendor = get_vendor_from_device_name([[device name] UTF8String]);
static vector<id<MTLDevice>> usable_devices;
static bool already_enumerated = false;
/* Metal Cycles works on Apple Silicon GPUs at present */
return (vendor == METAL_GPU_APPLE);
if (already_enumerated) {
return usable_devices;
}
return false;
}
metal_printf("Usable Metal devices:\n");
for (id<MTLDevice> device in MTLCopyAllDevices()) {
const char *device_name = [device.name UTF8String];
void MetalInfo::get_usable_devices(vector<MetalPlatformDevice> *usable_devices)
{
static bool first_time = true;
# define FIRST_VLOG(severity) \
if (first_time) \
VLOG(severity)
MetalGPUVendor vendor = get_vendor_from_device_name(device_name);
bool usable = false;
usable_devices->clear();
NSArray<id<MTLDevice>> *allDevices = MTLCopyAllDevices();
for (id<MTLDevice> device in allDevices) {
string device_name;
if (!get_device_name(device, &device_name)) {
FIRST_VLOG(2) << "Failed to get device name, ignoring.";
continue;
if (@available(macos 12.2, *)) {
usable |= (vendor == METAL_GPU_APPLE);
}
static const char *forceIntelStr = getenv("CYCLES_METAL_FORCE_INTEL");
bool forceIntel = forceIntelStr ? (atoi(forceIntelStr) != 0) : false;
if (forceIntel && device_name.find("Intel") == string::npos) {
FIRST_VLOG(2) << "CYCLES_METAL_FORCE_INTEL causing non-Intel device " << device_name
<< " to be ignored.";
continue;
if (@available(macos 12.3, *)) {
usable |= (vendor == METAL_GPU_AMD);
}
if (!device_version_check(device)) {
FIRST_VLOG(2) << "Ignoring device " << device_name << " due to too old compiler version.";
continue;
if (usable) {
metal_printf("- %s\n", device_name);
[device retain];
usable_devices.push_back(device);
}
else {
metal_printf(" (skipping \"%s\")\n", device_name);
}
FIRST_VLOG(2) << "Adding new device " << device_name << ".";
string hardware_id;
usable_devices->push_back(MetalPlatformDevice(device, device_name));
}
first_time = false;
}
bool MetalInfo::get_num_devices(uint32_t *num_devices)
{
*num_devices = MTLCopyAllDevices().count;
return true;
}
uint32_t MetalInfo::get_num_devices()
{
uint32_t num_devices;
if (!get_num_devices(&num_devices)) {
return 0;
if (usable_devices.empty()) {
metal_printf(" No usable Metal devices found\n");
}
return num_devices;
}
already_enumerated = true;
bool MetalInfo::get_device_name(id<MTLDevice> device, string *platform_name)
{
*platform_name = [device.name UTF8String];
return true;
}
string MetalInfo::get_device_name(id<MTLDevice> device)
{
string platform_name;
if (!get_device_name(device, &platform_name)) {
return "";
}
return platform_name;
return usable_devices;
}
id<MTLBuffer> MetalBufferPool::get_buffer(id<MTLDevice> device,

View File

@ -193,9 +193,6 @@ ccl_device_forceinline float3 MF_FUNCTION_FULL_NAME(mf_sample)(float3 wi,
bool outside = true;
float F0 = fresnel_dielectric_cos(1.0f, eta);
if (use_fresnel) {
throughput = interpolate_fresnel_color(wi, normalize(wi + wr), eta, F0, cspec0);
}
int order;
for (order = 0; order < 10; order++) {

View File

@ -282,7 +282,7 @@ ccl_gpu_kernel_threads(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE)
int kernel_index);
ccl_gpu_kernel_lambda_pass.kernel_index = kernel_index;
gpu_parallel_active_index_array<GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE>(
gpu_parallel_active_index_array(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE,
num_states, indices, num_indices, ccl_gpu_kernel_lambda_pass);
}
@ -297,7 +297,7 @@ ccl_gpu_kernel_threads(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE)
int kernel_index);
ccl_gpu_kernel_lambda_pass.kernel_index = kernel_index;
gpu_parallel_active_index_array<GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE>(
gpu_parallel_active_index_array(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE,
num_states, indices, num_indices, ccl_gpu_kernel_lambda_pass);
}
@ -309,7 +309,7 @@ ccl_gpu_kernel_threads(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE)
{
ccl_gpu_kernel_lambda(INTEGRATOR_STATE(state, path, queued_kernel) != 0);
gpu_parallel_active_index_array<GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE>(
gpu_parallel_active_index_array(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE,
num_states, indices, num_indices, ccl_gpu_kernel_lambda_pass);
}
@ -322,7 +322,7 @@ ccl_gpu_kernel_threads(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE)
{
ccl_gpu_kernel_lambda(INTEGRATOR_STATE(state, path, queued_kernel) == 0);
gpu_parallel_active_index_array<GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE>(
gpu_parallel_active_index_array(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE,
num_states, indices + indices_offset, num_indices, ccl_gpu_kernel_lambda_pass);
}
@ -335,7 +335,7 @@ ccl_gpu_kernel_threads(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE)
{
ccl_gpu_kernel_lambda(INTEGRATOR_STATE(state, shadow_path, queued_kernel) == 0);
gpu_parallel_active_index_array<GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE>(
gpu_parallel_active_index_array(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE,
num_states, indices + indices_offset, num_indices, ccl_gpu_kernel_lambda_pass);
}
@ -378,7 +378,7 @@ ccl_gpu_kernel_threads(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE)
int num_active_paths);
ccl_gpu_kernel_lambda_pass.num_active_paths = num_active_paths;
gpu_parallel_active_index_array<GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE>(
gpu_parallel_active_index_array(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE,
num_states, indices, num_indices, ccl_gpu_kernel_lambda_pass);
}
@ -411,7 +411,7 @@ ccl_gpu_kernel_threads(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE)
int num_active_paths);
ccl_gpu_kernel_lambda_pass.num_active_paths = num_active_paths;
gpu_parallel_active_index_array<GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE>(
gpu_parallel_active_index_array(GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE,
num_states, indices, num_indices, ccl_gpu_kernel_lambda_pass);
}

View File

@ -18,44 +18,26 @@ CCL_NAMESPACE_BEGIN
# define GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE 512
#endif
#ifdef __KERNEL_METAL__
struct ActiveIndexContext {
ActiveIndexContext(int _thread_index,
int _global_index,
int _threadgroup_size,
int _simdgroup_size,
int _simd_lane_index,
int _simd_group_index,
int _num_simd_groups,
threadgroup int *_simdgroup_offset)
: thread_index(_thread_index),
global_index(_global_index),
blocksize(_threadgroup_size),
ccl_gpu_warp_size(_simdgroup_size),
thread_warp(_simd_lane_index),
warp_index(_simd_group_index),
num_warps(_num_simd_groups),
warp_offset(_simdgroup_offset)
{
}
const int thread_index, global_index, blocksize, ccl_gpu_warp_size, thread_warp, warp_index,
num_warps;
threadgroup int *warp_offset;
template<uint blocksizeDummy, typename IsActiveOp>
void active_index_array(const uint num_states,
ccl_global int *indices,
ccl_global int *num_indices,
IsActiveOp is_active_op)
{
const uint state_index = global_index;
#else
#ifndef __KERNEL_METAL__
template<uint blocksize, typename IsActiveOp>
__device__ void gpu_parallel_active_index_array(const uint num_states,
ccl_global int *indices,
ccl_global int *num_indices,
IsActiveOp is_active_op)
__device__
#endif
void gpu_parallel_active_index_array_impl(const uint num_states,
ccl_global int *indices,
ccl_global int *num_indices,
#ifdef __KERNEL_METAL__
const uint is_active,
const uint blocksize,
const int thread_index,
const uint state_index,
const int ccl_gpu_warp_size,
const int thread_warp,
const int warp_index,
const int num_warps,
threadgroup int *warp_offset)
{
#else
IsActiveOp is_active_op)
{
extern ccl_gpu_shared int warp_offset[];
@ -66,61 +48,59 @@ __device__ void gpu_parallel_active_index_array(const uint num_states,
const uint num_warps = blocksize / ccl_gpu_warp_size;
const uint state_index = ccl_gpu_block_idx_x * blocksize + thread_index;
/* Test if state corresponding to this thread is active. */
const uint is_active = (state_index < num_states) ? is_active_op(state_index) : 0;
#endif
/* Test if state corresponding to this thread is active. */
const uint is_active = (state_index < num_states) ? is_active_op(state_index) : 0;
/* For each thread within a warp compute how many other active states precede it. */
const uint thread_offset = popcount(ccl_gpu_ballot(is_active) &
ccl_gpu_thread_mask(thread_warp));
/* For each thread within a warp compute how many other active states precede it. */
const uint thread_offset = popcount(ccl_gpu_ballot(is_active) &
ccl_gpu_thread_mask(thread_warp));
/* Last thread in warp stores number of active states for each warp. */
if (thread_warp == ccl_gpu_warp_size - 1) {
warp_offset[warp_index] = thread_offset + is_active;
}
ccl_gpu_syncthreads();
/* Last thread in block converts per-warp sizes to offsets, increments global size of
* index array and gets offset to write to. */
if (thread_index == blocksize - 1) {
/* TODO: parallelize this. */
int offset = 0;
for (int i = 0; i < num_warps; i++) {
int num_active = warp_offset[i];
warp_offset[i] = offset;
offset += num_active;
}
const uint block_num_active = warp_offset[warp_index] + thread_offset + is_active;
warp_offset[num_warps] = atomic_fetch_and_add_uint32(num_indices, block_num_active);
}
ccl_gpu_syncthreads();
/* Write to index array. */
if (is_active) {
const uint block_offset = warp_offset[num_warps];
indices[block_offset + warp_offset[warp_index] + thread_offset] = state_index;
}
/* Last thread in warp stores number of active states for each warp. */
if (thread_warp == ccl_gpu_warp_size - 1) {
warp_offset[warp_index] = thread_offset + is_active;
}
#ifdef __KERNEL_METAL__
}; /* end class ActiveIndexContext */
ccl_gpu_syncthreads();
/* Last thread in block converts per-warp sizes to offsets, increments global size of
* index array and gets offset to write to. */
if (thread_index == blocksize - 1) {
/* TODO: parallelize this. */
int offset = 0;
for (int i = 0; i < num_warps; i++) {
int num_active = warp_offset[i];
warp_offset[i] = offset;
offset += num_active;
}
const uint block_num_active = warp_offset[warp_index] + thread_offset + is_active;
warp_offset[num_warps] = atomic_fetch_and_add_uint32(num_indices, block_num_active);
}
ccl_gpu_syncthreads();
/* Write to index array. */
if (is_active) {
const uint block_offset = warp_offset[num_warps];
indices[block_offset + warp_offset[warp_index] + thread_offset] = state_index;
}
}
#ifdef __KERNEL_METAL__
# define gpu_parallel_active_index_array(dummy, num_states, indices, num_indices, is_active_op) \
const uint is_active = (ccl_gpu_global_id_x() < num_states) ? is_active_op(ccl_gpu_global_id_x()) : 0; \
gpu_parallel_active_index_array_impl(num_states, indices, num_indices, is_active, \
metal_local_size, metal_local_id, metal_global_id, simdgroup_size, simd_lane_index, \
simd_group_index, num_simd_groups, simdgroup_offset)
#else
# define gpu_parallel_active_index_array(blocksize, num_states, indices, num_indices, is_active_op) \
gpu_parallel_active_index_array_impl<blocksize>(num_states, indices, num_indices, is_active_op)
/* inject the required thread params into a struct, and redirect to its templated member function
*/
# define gpu_parallel_active_index_array \
ActiveIndexContext(metal_local_id, \
metal_global_id, \
metal_local_size, \
simdgroup_size, \
simd_lane_index, \
simd_group_index, \
num_simd_groups, \
simdgroup_offset) \
.active_index_array
#endif
CCL_NAMESPACE_END

View File

@ -58,6 +58,12 @@ class PlayRenderedAnim(Operator):
# file_path = bpy.path.abspath(rd.filepath) # UNUSED
is_movie = rd.is_movie_format
views_format = rd.image_settings.views_format
if rd.use_multiview and views_format == 'INDIVIDUAL':
view_suffix = rd.views.active.file_suffix
else:
view_suffix = ""
# try and guess a command line if it doesn't exist
if preset == 'CUSTOM':
player_path = prefs.filepaths.animation_player
@ -66,16 +72,16 @@ class PlayRenderedAnim(Operator):
if is_movie is False and preset in {'FRAMECYCLER', 'RV', 'MPLAYER'}:
# replace the number with '#'
file_a = rd.frame_path(frame=0)
file_a = rd.frame_path(frame=0, view=view_suffix)
# TODO, make an api call for this
frame_tmp = 9
file_b = rd.frame_path(frame=frame_tmp)
file_b = rd.frame_path(frame=frame_tmp, view=view_suffix)
while len(file_a) == len(file_b):
frame_tmp = (frame_tmp * 10) + 9
file_b = rd.frame_path(frame=frame_tmp)
file_b = rd.frame_path(frame=int(frame_tmp / 10))
file_b = rd.frame_path(frame=frame_tmp, view=view_suffix)
file_b = rd.frame_path(frame=int(frame_tmp / 10), view=view_suffix)
file = ("".join((c if file_b[i] == c else "#")
for i, c in enumerate(file_a)))
@ -84,7 +90,7 @@ class PlayRenderedAnim(Operator):
else:
path_valid = True
# works for movies and images
file = rd.frame_path(frame=scene.frame_start, preview=scene.use_preview_range)
file = rd.frame_path(frame=scene.frame_start, preview=scene.use_preview_range, view=view_suffix)
file = bpy.path.abspath(file) # expand '//'
if not os.path.exists(file):
err_msg = tip_("File %r not found") % file
@ -93,7 +99,7 @@ class PlayRenderedAnim(Operator):
# one last try for full range if we used preview range
if scene.use_preview_range and not path_valid:
file = rd.frame_path(frame=scene.frame_start, preview=False)
file = rd.frame_path(frame=scene.frame_start, preview=False, view=view_suffix)
file = bpy.path.abspath(file) # expand '//'
err_msg = tip_("File %r not found") % file
if not os.path.exists(file):

View File

@ -578,14 +578,6 @@ class USERPREF_PT_system_sound(SystemPanel, CenterAlignMixIn, Panel):
class USERPREF_PT_system_cycles_devices(SystemPanel, CenterAlignMixIn, Panel):
bl_label = "Cycles Render Devices"
@classmethod
def poll(cls, _context):
# No GPU rendering on macOS x86_64 currently.
import platform
import sys
return bpy.app.build_options.cycles and \
(sys.platform != "darwin" or platform.machine() == "arm64")
def draw_centered(self, context, layout):
prefs = context.preferences

View File

@ -23,6 +23,7 @@ struct FCurve;
struct ID;
struct KS_Path;
struct KeyingSet;
struct LibraryForeachIDData;
struct ListBase;
struct Main;
struct NlaKeyframingContext;
@ -86,6 +87,9 @@ struct KS_Path *BKE_keyingset_find_path(struct KeyingSet *ks,
/* Copy all KeyingSets in the given list */
void BKE_keyingsets_copy(struct ListBase *newlist, const struct ListBase *list);
/** Process the ID pointers inside a scene's keyingsets, in see `BKE_lib_query.h` for details. */
void BKE_keyingsets_foreach_id(struct LibraryForeachIDData *data, const struct ListBase *keyingsets);
/* Free the given Keying Set path */
void BKE_keyingset_free_path(struct KeyingSet *ks, struct KS_Path *ksp);

View File

@ -41,6 +41,7 @@
#include "BKE_fcurve.h"
#include "BKE_global.h"
#include "BKE_lib_id.h"
#include "BKE_lib_query.h"
#include "BKE_main.h"
#include "BKE_material.h"
#include "BKE_nla.h"
@ -249,6 +250,15 @@ void BKE_keyingsets_copy(ListBase *newlist, const ListBase *list)
}
}
void BKE_keyingsets_foreach_id(LibraryForeachIDData *data, const ListBase *keyingsets)
{
for (KeyingSet *ksn = keyingsets->first; ksn; ksn = ksn->next) {
for (KS_Path *kspn = ksn->paths.first; kspn; kspn = kspn->next) {
BKE_LIB_FOREACHID_PROCESS_ID(data, kspn->id, IDWALK_CB_NOP);
}
}
}
/* Freeing Tools --------------------------- */
void BKE_keyingset_free(KeyingSet *ks)

View File

@ -4361,7 +4361,7 @@ RenderResult *BKE_image_acquire_renderresult(Scene *scene, Image *ima)
}
else {
rr = BKE_image_get_renderslot(ima, ima->render_slot)->render;
ima->gpuflag |= IMA_GPU_REFRESH;
BKE_image_partial_update_mark_full_update(ima);
}
/* set proper views */
@ -5729,7 +5729,7 @@ void BKE_image_user_frame_calc(Image *ima, ImageUser *iuser, int cfra)
/* NOTE: a single texture and refresh doesn't really work when
* multiple image users may use different frames, this is to
* be improved with perhaps a GPU texture cache. */
ima->gpuflag |= IMA_GPU_REFRESH;
BKE_image_partial_update_mark_full_update(ima);
ima->gpuframenr = iuser->framenr;
}

View File

@ -389,18 +389,9 @@ static GPUTexture *image_get_gpu_texture(Image *ima,
ima->gpu_pass = requested_pass;
ima->gpu_layer = requested_layer;
ima->gpu_view = requested_view;
ima->gpuflag |= IMA_GPU_REFRESH;
}
#undef GPU_FLAGS_TO_CHECK
/* TODO(jbakker): We should replace the IMA_GPU_REFRESH flag with a call to
* BKE_image-partial_update_mark_full_update. Although the flag is quicker it leads to double
* administration. */
if ((ima->gpuflag & IMA_GPU_REFRESH) != 0) {
BKE_image_partial_update_mark_full_update(ima);
ima->gpuflag &= ~IMA_GPU_REFRESH;
}
if (ima->runtime.partial_update_user == nullptr) {
ima->runtime.partial_update_user = BKE_image_partial_update_create(ima);
}

View File

@ -1468,7 +1468,8 @@ class NodeTreeMainUpdater {
while (!sockets_to_check.is_empty()) {
const SocketRef &in_out_socket = *sockets_to_check.pop();
const bNode &bnode = *in_out_socket.node().bnode();
const NodeRef &node = in_out_socket.node();
const bNode &bnode = *node.bnode();
const bNodeSocket &bsocket = *in_out_socket.bsocket();
if (bsocket.changed_flag != NTREE_CHANGED_NOTHING) {
return true;
@ -1493,7 +1494,7 @@ class NodeTreeMainUpdater {
}
else {
const OutputSocketRef &socket = in_out_socket.as_output();
for (const InputSocketRef *input_socket : socket.node().inputs()) {
for (const InputSocketRef *input_socket : node.inputs()) {
if (input_socket->is_available()) {
bool &pushed = pushed_by_socket_id[input_socket->id()];
if (!pushed) {
@ -1502,6 +1503,18 @@ class NodeTreeMainUpdater {
}
}
}
/* The Normal node has a special case, because the value stored in the first output socket
* is used as input in the node. */
if (bnode.type == SH_NODE_NORMAL && socket.index() == 1) {
BLI_assert(socket.name() == "Dot");
const OutputSocketRef &normal_output = node.output(0);
BLI_assert(normal_output.name() == "Normal");
bool &pushed = pushed_by_socket_id[normal_output.id()];
if (!pushed) {
sockets_to_check.push(&normal_output);
pushed = true;
}
}
}
}
return false;

View File

@ -800,6 +800,9 @@ static void scene_foreach_id(ID *id, LibraryForeachIDData *data)
data, SEQ_for_each_callback(&scene->ed->seqbase, seq_foreach_member_id_cb, data));
}
BKE_LIB_FOREACHID_PROCESS_FUNCTION_CALL(data,
BKE_keyingsets_foreach_id(data, &scene->keyingsets));
/* This pointer can be NULL during old files reading, better be safe than sorry. */
if (scene->master_collection != NULL) {
BKE_LIB_FOREACHID_PROCESS_FUNCTION_CALL(

View File

@ -200,7 +200,10 @@ void ViewerOperation::update_image(const rcti *rect)
rect->ymin,
rect->xmax,
rect->ymax);
image_->gpuflag |= IMA_GPU_REFRESH;
/* This could be improved to use partial updates. For now disabled as the full frame compositor
* would not use partial frames anymore and the image engine requires more testing.*/
BKE_image_partial_update_mark_full_update(image_);
this->update_draw();
}

View File

@ -35,7 +35,7 @@ void GPencilBackup::restore_to_gpencil(bGPdata *gpd)
}
/* Doing a copy-on-write copies the update cache pointer. Make sure to reset it
* to NULL as we should never use the update cache from eval data. */
gpd->runtime.update_cache = NULL;
gpd->runtime.update_cache = nullptr;
/* Make sure to update the original runtime pointers in the eval data. */
BKE_gpencil_data_update_orig_pointers(gpd_orig, gpd);
}

View File

@ -47,20 +47,6 @@ struct OneTextureMethod {
}
}
void update_region_uv_bounds(const ARegion *region)
{
TextureInfo &info = instance_data->texture_infos[0];
if (!BLI_rctf_compare(&info.region_uv_bounds, &region->v2d.cur, EPSILON_UV_BOUNDS)) {
info.region_uv_bounds = region->v2d.cur;
info.dirty = true;
}
/* Mark the other textures as invalid. */
for (int i = 1; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
BLI_rctf_init_minmax(&instance_data->texture_infos[i].clipping_bounds);
}
}
void update_screen_uv_bounds()
{
for (int i = 0; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
@ -77,7 +63,13 @@ struct OneTextureMethod {
float4x4 mat_inv = mat.inverted();
float3 min_uv = mat_inv * float3(0.0f, 0.0f, 0.0f);
float3 max_uv = mat_inv * float3(1.0f, 1.0f, 0.0f);
BLI_rctf_init(&info.clipping_uv_bounds, min_uv[0], max_uv[0], min_uv[1], max_uv[1]);
rctf new_clipping_bounds;
BLI_rctf_init(&new_clipping_bounds, min_uv[0], max_uv[0], min_uv[1], max_uv[1]);
if (!BLI_rctf_compare(&info.clipping_uv_bounds, &new_clipping_bounds, EPSILON_UV_BOUNDS)) {
info.clipping_uv_bounds = new_clipping_bounds;
info.dirty = true;
}
}
};
@ -252,7 +244,7 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
static_cast<float>(iterator.tile_data.tile_buffer->y) +
tile_offset_y);
rctf changed_overlapping_region_in_uv_space;
const bool region_overlap = BLI_rctf_isect(&info.region_uv_bounds,
const bool region_overlap = BLI_rctf_isect(&info.clipping_uv_bounds,
&changed_region_in_uv_space,
&changed_overlapping_region_in_uv_space);
if (!region_overlap) {
@ -264,14 +256,14 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
rcti gpu_texture_region_to_update;
BLI_rcti_init(
&gpu_texture_region_to_update,
floor((changed_overlapping_region_in_uv_space.xmin - info.region_uv_bounds.xmin) *
texture_width / BLI_rctf_size_x(&info.region_uv_bounds)),
floor((changed_overlapping_region_in_uv_space.xmax - info.region_uv_bounds.xmin) *
texture_width / BLI_rctf_size_x(&info.region_uv_bounds)),
ceil((changed_overlapping_region_in_uv_space.ymin - info.region_uv_bounds.ymin) *
texture_height / BLI_rctf_size_y(&info.region_uv_bounds)),
ceil((changed_overlapping_region_in_uv_space.ymax - info.region_uv_bounds.ymin) *
texture_height / BLI_rctf_size_y(&info.region_uv_bounds)));
floor((changed_overlapping_region_in_uv_space.xmin - info.clipping_uv_bounds.xmin) *
texture_width / BLI_rctf_size_x(&info.clipping_uv_bounds)),
floor((changed_overlapping_region_in_uv_space.xmax - info.clipping_uv_bounds.xmin) *
texture_width / BLI_rctf_size_x(&info.clipping_uv_bounds)),
ceil((changed_overlapping_region_in_uv_space.ymin - info.clipping_uv_bounds.ymin) *
texture_height / BLI_rctf_size_y(&info.clipping_uv_bounds)),
ceil((changed_overlapping_region_in_uv_space.ymax - info.clipping_uv_bounds.ymin) *
texture_height / BLI_rctf_size_y(&info.clipping_uv_bounds)));
rcti tile_region_to_extract;
BLI_rcti_init(
@ -295,13 +287,13 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
for (int y = gpu_texture_region_to_update.ymin; y < gpu_texture_region_to_update.ymax;
y++) {
float yf = y / (float)texture_height;
float v = info.region_uv_bounds.ymax * yf + info.region_uv_bounds.ymin * (1.0 - yf) -
float v = info.clipping_uv_bounds.ymax * yf + info.clipping_uv_bounds.ymin * (1.0 - yf) -
tile_offset_y;
for (int x = gpu_texture_region_to_update.xmin; x < gpu_texture_region_to_update.xmax;
x++) {
float xf = x / (float)texture_width;
float u = info.region_uv_bounds.xmax * xf + info.region_uv_bounds.xmin * (1.0 - xf) -
tile_offset_x;
float u = info.clipping_uv_bounds.xmax * xf +
info.clipping_uv_bounds.xmin * (1.0 - xf) - tile_offset_x;
nearest_interpolation_color(tile_buffer,
nullptr,
&extracted_buffer.rect_float[offset * 4],
@ -344,7 +336,6 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
IMAGE_InstanceData &instance_data,
const ImageUser *image_user) const
{
ImBuf texture_buffer;
const int texture_width = GPU_texture_width(info.texture);
const int texture_height = GPU_texture_height(info.texture);
@ -413,9 +404,9 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
1.0f};
rescale_m4(uv_to_texel, scale);
uv_to_texel[3][0] += image_tile.get_tile_x_offset() /
BLI_rctf_size_x(&texture_info.region_uv_bounds);
BLI_rctf_size_x(&texture_info.clipping_uv_bounds);
uv_to_texel[3][1] += image_tile.get_tile_y_offset() /
BLI_rctf_size_y(&texture_info.region_uv_bounds);
BLI_rctf_size_y(&texture_info.clipping_uv_bounds);
uv_to_texel[3][0] *= texture_width;
uv_to_texel[3][1] *= texture_height;
invert_m4(uv_to_texel);
@ -465,9 +456,11 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
* screen space textures that aren't needed. */
const ARegion *region = draw_ctx->region;
method.update_screen_space_bounds(region);
method.update_region_uv_bounds(region);
method.update_screen_uv_bounds();
/* Check for changes in the image user compared to the last time. */
instance_data->update_image_user(iuser);
/* Step: Update the GPU textures based on the changes in the image. */
instance_data->update_gpu_texture_allocations();
update_textures(*instance_data, image, iuser);

View File

@ -21,10 +21,12 @@
*
* 4 textures are used to reduce uploading screen space textures when translating the image.
*/
constexpr int SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN = 4;
constexpr int SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN = 1;
struct IMAGE_InstanceData {
struct Image *image;
/** Copy of the last image user to detect iuser differences that require a full update. */
struct ImageUser last_image_user;
PartialImageUpdater partial_update;
@ -93,6 +95,27 @@ struct IMAGE_InstanceData {
}
}
void update_image_user(const ImageUser *image_user)
{
short requested_pass = image_user ? image_user->pass : 0;
short requested_layer = image_user ? image_user->layer : 0;
short requested_view = image_user ? image_user->multi_index : 0;
/* There is room for 2 multiview textures. When a higher number is requested we should always
* target the first view slot. This is fine as multi view images aren't used together. */
if (requested_view < 2) {
requested_view = 0;
}
if (last_image_user.pass != requested_pass || last_image_user.layer != requested_layer ||
last_image_user.multi_index != requested_view) {
last_image_user.pass = requested_pass;
last_image_user.layer = requested_layer;
last_image_user.multi_index = requested_view;
reset_dirty_flag(true);
}
}
private:
/** \brief Set dirty flag of all texture slots to the given value. */
void reset_dirty_flag(bool new_value)

View File

@ -29,8 +29,6 @@ struct TextureInfo {
/** \brief area of the texture in screen space. */
rctf clipping_bounds;
/** \brief uv area of the texture (copy from ARegion). */
rctf region_uv_bounds;
/** \brief uv area of the texture in screen space. */
rctf clipping_uv_bounds;

View File

@ -1810,6 +1810,12 @@ static void gpencil_sculpt_brush_apply(bContext *C, wmOperator *op, PointerRNA *
gso->mval[0] = mouse[0] = (int)(mousef[0]);
gso->mval[1] = mouse[1] = (int)(mousef[1]);
/* If the mouse/pen has not moved, no reason to continue. This also avoid a small
* drift due precision acumulation errors. */
if ((gso->mval[0] == gso->mval_prev[0]) && (gso->mval[1] == gso->mval_prev[1])) {
return;
}
gso->pressure = RNA_float_get(itemptr, "pressure");
if (RNA_boolean_get(itemptr, "pen_flip")) {

View File

@ -2293,54 +2293,29 @@ void UI_draw_box_shadow(const rctf *rect, uchar alpha)
void ui_draw_dropshadow(
const rctf *rct, float radius, float aspect, float alpha, int UNUSED(select))
{
float rad;
const float max_radius = (BLI_rctf_size_y(rct) - 10.0f) * 0.5f;
const float rad = min_ff(radius, max_radius);
if (radius > (BLI_rctf_size_y(rct) - 10.0f) * 0.5f) {
rad = (BLI_rctf_size_y(rct) - 10.0f) * 0.5f;
}
else {
rad = radius;
}
/* This undoes the scale of the view for higher zoom factors to clamp the shadow size. */
const float clamped_aspect = smoothminf(aspect, 1.0f, 0.5f);
int a, i = 12;
#if 0
if (select) {
a = i * aspect; /* same as below */
}
else
#endif
{
a = i * aspect;
}
const float shadow_softness = 0.6f * U.widget_unit * clamped_aspect;
const float shadow_offset = 0.5f * U.widget_unit * clamped_aspect;
const float shadow_alpha = 0.5f * alpha;
GPU_blend(GPU_BLEND_ALPHA);
const float dalpha = alpha * 2.0f / 255.0f;
float calpha = dalpha;
float visibility = 1.0f;
for (; i--;) {
/* alpha ranges from 2 to 20 or so */
#if 0 /* Old Method (pre 2.8) */
float color[4] = {0.0f, 0.0f, 0.0f, calpha};
UI_draw_roundbox_4fv(
true, rct->xmin - a, rct->ymin - a, rct->xmax + a, rct->ymax - 10.0f + a, rad + a, color);
#endif
/* Compute final visibility to match old method result. */
/* TODO: we could just find a better fit function inside the shader instead of this. */
visibility = visibility * (1.0f - calpha);
calpha += dalpha;
}
uiWidgetBaseParameters widget_params = {
.recti.xmin = rct->xmin,
.recti.ymin = rct->ymin,
.recti.xmax = rct->xmax,
.recti.ymax = rct->ymax - 10.0f,
.rect.xmin = rct->xmin - a,
.rect.ymin = rct->ymin - a,
.rect.xmax = rct->xmax + a,
.rect.ymax = rct->ymax - 10.0f + a,
.recti.ymax = rct->ymax - shadow_offset,
.rect.xmin = rct->xmin - shadow_softness,
.rect.ymin = rct->ymin - shadow_softness,
.rect.xmax = rct->xmax + shadow_softness,
.rect.ymax = rct->ymax - shadow_offset + shadow_softness,
.radi = rad,
.rad = rad + a,
.rad = rad + shadow_softness,
.round_corners[0] = (roundboxtype & UI_CNR_BOTTOM_LEFT) ? 1.0f : 0.0f,
.round_corners[1] = (roundboxtype & UI_CNR_BOTTOM_RIGHT) ? 1.0f : 0.0f,
.round_corners[2] = (roundboxtype & UI_CNR_TOP_RIGHT) ? 1.0f : 0.0f,
@ -2351,7 +2326,7 @@ void ui_draw_dropshadow(
GPUBatch *batch = ui_batch_roundbox_shadow_get();
GPU_batch_program_set_builtin(batch, GPU_SHADER_2D_WIDGET_SHADOW);
GPU_batch_uniform_4fv_array(batch, "parameters", 4, (const float(*)[4]) & widget_params);
GPU_batch_uniform_1f(batch, "alpha", 1.0f - visibility);
GPU_batch_uniform_1f(batch, "alpha", shadow_alpha);
GPU_batch_draw(batch);
/* outline emphasis */

View File

@ -512,7 +512,7 @@ static void screen_opengl_render_apply(const bContext *C, OGLRender *oglrender)
ibuf->userflags |= IB_DISPLAY_BUFFER_INVALID;
}
BKE_image_release_ibuf(oglrender->ima, ibuf, lock);
oglrender->ima->gpuflag |= IMA_GPU_REFRESH;
BKE_image_partial_update_mark_full_update(oglrender->ima);
if (oglrender->write_still) {
screen_opengl_render_write(oglrender);

View File

@ -388,7 +388,7 @@ bool ED_image_slot_cycle(struct Image *image, int direction)
}
if ((cur != image->render_slot)) {
image->gpuflag |= IMA_GPU_REFRESH;
BKE_image_partial_update_mark_full_update(image);
}
return (cur != image->render_slot);
}

View File

@ -730,7 +730,7 @@ static int node_box_select_exec(bContext *C, wmOperator *op)
/* Frame nodes are selectable by their borders (including their whole rect - as for other
* nodes - would prevent selection of other nodes inside that frame. */
const rctf frame_inside = node_frame_rect_inside(*node);
if (BLI_rctf_isect(&rectf, &node->totr, NULL) &&
if (BLI_rctf_isect(&rectf, &node->totr, nullptr) &&
!BLI_rctf_inside_rctf(&frame_inside, &rectf)) {
nodeSetSelected(node, select);
is_inside = true;
@ -932,7 +932,7 @@ static bool do_lasso_select_node(bContext *C,
BLI_rctf_rcti_copy(&rectf, &rect);
UI_view2d_region_to_view_rctf(&region->v2d, &rectf, &rectf);
const rctf frame_inside = node_frame_rect_inside(*node);
if (BLI_rctf_isect(&rectf, &node->totr, NULL) &&
if (BLI_rctf_isect(&rectf, &node->totr, nullptr) &&
!BLI_rctf_inside_rctf(&frame_inside, &rectf)) {
nodeSetSelected(node, select);
changed = true;

View File

@ -628,13 +628,16 @@ static short snap_select_type_get(TransInfo *t)
else if (!t->tsnap.snap_self) {
r_snap_select = SNAP_NOT_ACTIVE;
}
else {
r_snap_select = SNAP_NOT_SELECTED;
}
}
else if ((obedit_type == -1) && base_act && base_act->object &&
(base_act->object->mode & OB_MODE_PARTICLE_EDIT)) {
/* Particles edit mode. */
}
else if (obedit_type == -1) {
/* Object mode */
/* Object or pose mode. */
r_snap_select = SNAP_NOT_SELECTED;
}
}

View File

@ -477,7 +477,10 @@ static void iter_snap_objects(SnapObjectContext *sctx,
}
}
else if (snap_select == SNAP_NOT_SELECTED) {
if ((base->flag & BASE_SELECTED) || (base->flag_legacy & BA_WAS_SEL)) {
if (is_object_active && !(base->object->mode & OB_MODE_OBJECT)) {
/* Pass. Consider the selection of elements being edited. */
}
else if ((base->flag & BASE_SELECTED) || (base->flag_legacy & BA_WAS_SEL)) {
continue;
}
}
@ -1818,6 +1821,7 @@ static short snapArmature(SnapObjectContext *sctx,
const struct SnapObjectParams *params,
Object *ob_eval,
const float obmat[4][4],
bool is_object_active,
/* read/write args */
float *dist_px,
/* return args */
@ -1838,9 +1842,10 @@ static short snapArmature(SnapObjectContext *sctx,
dist_squared_to_projected_aabb_precalc(
&neasrest_precalc, lpmat, sctx->runtime.win_size, sctx->runtime.mval);
bool use_obedit = ((bArmature *)ob_eval->data)->edbo != NULL;
bArmature *arm = ob_eval->data;
const bool is_editmode = arm->edbo != NULL;
if (use_obedit == false) {
if (is_editmode == false) {
/* Test BoundBox */
BoundBox *bb = BKE_armature_boundbox_get(ob_eval);
if (bb && !snap_bound_box_check_dist(bb->vec[0],
@ -1859,10 +1864,11 @@ static short snapArmature(SnapObjectContext *sctx,
mul_v4_m4v4(clip_planes_local[i], tobmat, sctx->runtime.clip_plane[i]);
}
const eSnapSelect snap_select = params->snap_select;
bool is_persp = sctx->runtime.view_proj == VIEW_PROJ_PERSP;
const bool is_posemode = is_object_active && (ob_eval->mode & OB_MODE_POSE);
const bool skip_selected = (is_editmode || is_posemode) &&
(params->snap_select == SNAP_NOT_SELECTED);
const bool is_persp = sctx->runtime.view_proj == VIEW_PROJ_PERSP;
bArmature *arm = ob_eval->data;
if (arm->edbo) {
LISTBASE_FOREACH (EditBone *, eBone, arm->edbo) {
if (eBone->layer & arm->layer) {
@ -1872,7 +1878,7 @@ static short snapArmature(SnapObjectContext *sctx,
}
const bool is_selected = (eBone->flag & (BONE_ROOTSEL | BONE_TIPSEL)) != 0;
if (is_selected && snap_select == SNAP_NOT_SELECTED) {
if (is_selected && skip_selected) {
continue;
}
bool has_vert_snap = false;
@ -1916,10 +1922,16 @@ static short snapArmature(SnapObjectContext *sctx,
else if (ob_eval->pose && ob_eval->pose->chanbase.first) {
LISTBASE_FOREACH (bPoseChannel *, pchan, &ob_eval->pose->chanbase) {
Bone *bone = pchan->bone;
/* skip hidden bones */
if (!bone || (bone->flag & (BONE_HIDDEN_P | BONE_HIDDEN_PG))) {
/* Skip hidden bones. */
continue;
}
const bool is_selected = (bone->flag & (BONE_SELECTED | BONE_ROOTSEL | BONE_TIPSEL)) != 0;
if (is_selected && skip_selected) {
continue;
}
bool has_vert_snap = false;
const float *head_vec = pchan->pose_head;
const float *tail_vec = pchan->pose_tail;
@ -2685,7 +2697,7 @@ static void snap_obj_fn(SnapObjectContext *sctx,
const struct SnapObjectParams *params,
Object *ob_eval,
float obmat[4][4],
bool UNUSED(is_object_active),
bool is_object_active,
void *data)
{
struct SnapObjUserData *dt = data;
@ -2721,8 +2733,15 @@ static void snap_obj_fn(SnapObjectContext *sctx,
break;
}
case OB_ARMATURE:
retval = snapArmature(
sctx, params, ob_eval, obmat, dt->dist_px, dt->r_loc, dt->r_no, dt->r_index);
retval = snapArmature(sctx,
params,
ob_eval,
obmat,
is_object_active,
dt->dist_px,
dt->r_loc,
dt->r_no,
dt->r_index);
break;
case OB_CURVE:
retval = snapCurve(

View File

@ -11,6 +11,8 @@
namespace blender::gpu {
#define QUERY_MIN_LEN 16
typedef enum GPUQueryType {
GPU_QUERY_OCCLUSION = 0,
} GPUQueryType;

View File

@ -37,7 +37,7 @@ struct GPUSelectQueryState {
/** GPU queries abstraction. Contains an array of queries. */
QueryPool *queries;
/** Array holding the id corresponding id to each query. */
Vector<uint> *ids;
Vector<uint, QUERY_MIN_LEN> *ids;
/** Cache on initialization. */
GPUSelectResult *buffer;
/** The capacity of the `buffer` array. */
@ -71,7 +71,7 @@ void gpu_select_query_begin(GPUSelectResult *buffer,
g_query_state.index = 0;
g_query_state.oldhits = oldhits;
g_query_state.ids = new Vector<uint>();
g_query_state.ids = new Vector<uint, QUERY_MIN_LEN>();
g_query_state.queries = GPUBackend::get()->querypool_alloc();
g_query_state.queries->init(GPU_QUERY_OCCLUSION);
@ -149,7 +149,7 @@ uint gpu_select_query_end()
}
Span<uint> ids = *g_query_state.ids;
Vector<uint32_t> result(ids.size());
Vector<uint32_t, QUERY_MIN_LEN> result(ids.size());
g_query_state.queries->get_occlusion_result(result);
for (int i = 0; i < result.size(); i++) {

View File

@ -37,8 +37,9 @@ void GLQueryPool::begin_query()
/* TODO: add assert about expected usage. */
while (query_issued_ >= query_ids_.size()) {
int64_t prev_size = query_ids_.size();
query_ids_.resize(prev_size + QUERY_CHUNCK_LEN);
glGenQueries(QUERY_CHUNCK_LEN, &query_ids_[prev_size]);
int64_t chunk_size = prev_size == 0 ? query_ids_.capacity() : QUERY_CHUNCK_LEN;
query_ids_.resize(prev_size + chunk_size);
glGenQueries(chunk_size, &query_ids_[prev_size]);
}
glBeginQuery(gl_type_, query_ids_[query_issued_++]);
}

View File

@ -18,7 +18,7 @@ namespace blender::gpu {
class GLQueryPool : public QueryPool {
private:
/** Contains queries object handles. */
Vector<GLuint> query_ids_;
Vector<GLuint, QUERY_MIN_LEN> query_ids_;
/** Type of this query pool. */
GPUQueryType type_;
/** Associated GL type. */

View File

@ -499,7 +499,7 @@ static std::string main_function_wrapper(std::string &pre_main, std::string &pos
std::string GLShader::vertex_interface_declare(const ShaderCreateInfo &info) const
{
std::stringstream ss;
std::string post_main = "";
std::string post_main;
ss << "\n/* Inputs. */\n";
for (const ShaderCreateInfo::VertIn &attr : info.vertex_inputs_) {
@ -532,7 +532,7 @@ std::string GLShader::vertex_interface_declare(const ShaderCreateInfo &info) con
ss << "\n";
if (post_main.empty() == false) {
std::string pre_main = "";
std::string pre_main;
ss << main_function_wrapper(pre_main, post_main);
}
return ss.str();
@ -541,7 +541,7 @@ std::string GLShader::vertex_interface_declare(const ShaderCreateInfo &info) con
std::string GLShader::fragment_interface_declare(const ShaderCreateInfo &info) const
{
std::stringstream ss;
std::string pre_main = "";
std::string pre_main;
ss << "\n/* Interfaces. */\n";
const Vector<StageInterfaceInfo *> &in_interfaces = (info.geometry_source_.is_empty()) ?
@ -595,7 +595,7 @@ std::string GLShader::fragment_interface_declare(const ShaderCreateInfo &info) c
ss << "\n";
if (pre_main.empty() == false) {
std::string post_main = "";
std::string post_main;
ss << main_function_wrapper(pre_main, post_main);
}
return ss.str();
@ -715,7 +715,7 @@ std::string GLShader::workaround_geometry_shader_source_create(
ss << " gl_Layer = gpu_Layer[0];\n";
}
for (auto i : IndexRange(3)) {
for (auto iface : info_modified.vertex_out_interfaces_) {
for (StageInterfaceInfo *iface : info_modified.vertex_out_interfaces_) {
for (auto &inout : iface->inouts) {
ss << " " << iface->instance_name << "_out." << inout.name;
ss << " = " << iface->instance_name << "_in[" << i << "]." << inout.name << ";\n";

View File

@ -67,7 +67,7 @@ ArchiveReader *ArchiveReader::get(struct Main *bmain, const std::vector<const ch
std::vector<ArchiveReader *> readers;
for (const char *filename : filenames) {
auto reader = new ArchiveReader(bmain, filename);
ArchiveReader *reader = new ArchiveReader(bmain, filename);
if (!reader->valid()) {
delete reader;
@ -77,7 +77,7 @@ ArchiveReader *ArchiveReader::get(struct Main *bmain, const std::vector<const ch
readers.push_back(reader);
}
if (readers.size() == 0) {
if (readers.empty()) {
return nullptr;
}
@ -92,7 +92,7 @@ ArchiveReader::ArchiveReader(const std::vector<ArchiveReader *> &readers) : m_re
{
Alembic::AbcCoreLayer::ArchiveReaderPtrs archives;
for (auto &reader : readers) {
for (ArchiveReader *reader : readers) {
archives.push_back(reader->m_archive.getPtr());
}

View File

@ -82,7 +82,7 @@ struct InputSpec {
};
/* Map Blender socket names to USD Preview Surface InputSpec structs. */
typedef std::map<std::string, InputSpec> InputSpecMap;
using InputSpecMap = std::map<std::string, InputSpec>;
/* Static function forward declarations. */
static pxr::UsdShadeShader create_usd_preview_shader(const USDExporterContext &usd_export_context,

View File

@ -237,15 +237,13 @@ enum {
/* Image.gpuflag */
enum {
/** GPU texture needs to be refreshed. */
IMA_GPU_REFRESH = (1 << 0),
/** All mipmap levels in OpenGL texture set? */
IMA_GPU_MIPMAP_COMPLETE = (1 << 1),
IMA_GPU_MIPMAP_COMPLETE = (1 << 0),
/* Reuse the max resolution textures as they fit in the limited scale. */
IMA_GPU_REUSE_MAX_RESOLUTION = (1 << 2),
IMA_GPU_REUSE_MAX_RESOLUTION = (1 << 1),
/* Has any limited scale textures been allocated.
* Adds additional checks to reuse max resolution images when they fit inside limited scale. */
IMA_GPU_HAS_LIMITED_SCALE_TEXTURES = (1 << 3),
IMA_GPU_HAS_LIMITED_SCALE_TEXTURES = (1 << 2),
};
/* Image.source, where the image comes from */

View File

@ -594,7 +594,7 @@ static void rna_render_slots_active_set(PointerRNA *ptr,
int index = BLI_findindex(&image->renderslots, slot);
if (index != -1) {
image->render_slot = index;
image->gpuflag |= IMA_GPU_REFRESH;
BKE_image_partial_update_mark_full_update(image);
}
}
}
@ -610,7 +610,7 @@ static void rna_render_slots_active_index_set(PointerRNA *ptr, int value)
Image *image = (Image *)ptr->owner_id;
int num_slots = BLI_listbase_count(&image->renderslots);
image->render_slot = value;
image->gpuflag |= IMA_GPU_REFRESH;
BKE_image_partial_update_mark_full_update(image);
CLAMP(image->render_slot, 0, num_slots - 1);
}

View File

@ -35,6 +35,12 @@
static bool args_contain_key(PyObject *kwargs, const char *name)
{
if (kwargs == NULL) {
/* When a function gets called without any kwargs, Python just passes NULL instead.
* PyDict_Contains() is not NULL-safe, though. */
return false;
}
PyObject *py_key = PyUnicode_FromString(name);
const bool result = PyDict_Contains(kwargs, py_key) == 1;
Py_DECREF(py_key);