Merge remote-tracking branch 'origin' into sculpt-dev

This commit is contained in:
Joseph Eagar 2022-06-14 02:01:58 -07:00
commit e108318af8
534 changed files with 10836 additions and 6702 deletions

View File

@ -300,6 +300,9 @@ option(WITH_USD "Enable Universal Scene Description (USD) Suppor
# 3D format support
# Disable opencollada when we don't have precompiled libs
option(WITH_OPENCOLLADA "Enable OpenCollada Support (http://www.opencollada.org)" ON)
option(WITH_IO_WAVEFRONT_OBJ "Enable Wavefront-OBJ 3D file format support (*.obj)" ON)
option(WITH_IO_STL "Enable STL 3D file format support (*.stl)" ON)
option(WITH_IO_GPENCIL "Enable grease-pencil file format IO (*.svg, *.pdf)" ON)
# Sound output
option(WITH_SDL "Enable SDL for sound" ON)

View File

@ -37,6 +37,9 @@ set(WITH_IMAGE_TIFF OFF CACHE BOOL "" FORCE)
set(WITH_IMAGE_WEBP OFF CACHE BOOL "" FORCE)
set(WITH_INPUT_NDOF OFF CACHE BOOL "" FORCE)
set(WITH_INTERNATIONAL OFF CACHE BOOL "" FORCE)
set(WITH_IO_STL OFF CACHE BOOL "" FORCE)
set(WITH_IO_WAVEFRONT_OBJ OFF CACHE BOOL "" FORCE)
set(WITH_IO_GPENCIL OFF CACHE BOOL "" FORCE)
set(WITH_JACK OFF CACHE BOOL "" FORCE)
set(WITH_LIBMV OFF CACHE BOOL "" FORCE)
set(WITH_LLVM OFF CACHE BOOL "" FORCE)

View File

@ -1,12 +1,12 @@
sphinx==4.1.1
sphinx==5.0.1
# Sphinx dependencies that are important
Jinja2==3.0.1
Pygments==2.10.0
Jinja2==3.1.2
Pygments==2.12.0
docutils==0.17.1
snowballstemmer==2.1.0
babel==2.9.1
requests==2.26.0
snowballstemmer==2.2.0
babel==2.10.1
requests==2.27.1
# Only needed to match the theme used for the official documentation.
# Without this theme, the default theme will be used.

View File

@ -754,8 +754,6 @@ class CYCLES_RENDER_PT_filter(CyclesButtonsPanel, Panel):
layout.use_property_split = True
layout.use_property_decorate = False
with_freestyle = bpy.app.build_options.freestyle
scene = context.scene
rd = scene.render
view_layer = context.view_layer

View File

@ -28,10 +28,33 @@ BlenderImageLoader::BlenderImageLoader(BL::Image b_image,
bool BlenderImageLoader::load_metadata(const ImageDeviceFeatures &, ImageMetaData &metadata)
{
metadata.width = b_image.size()[0];
metadata.height = b_image.size()[1];
if (b_image.source() != BL::Image::source_TILED) {
/* Image sequence might have different dimensions, and hence needs to be handled in a special
* manner.
* NOTE: Currently the sequences are not handled by this image loader. */
assert(b_image.source() != BL::Image::source_SEQUENCE);
metadata.width = b_image.size()[0];
metadata.height = b_image.size()[1];
metadata.channels = b_image.channels();
}
else {
/* Different UDIM tiles might have different resolutions, so get resolution from the actual
* tile. */
BL::UDIMTile b_udim_tile = b_image.tiles.get(tile_number);
if (b_udim_tile) {
metadata.width = b_udim_tile.size()[0];
metadata.height = b_udim_tile.size()[1];
metadata.channels = b_udim_tile.channels();
}
else {
metadata.width = 0;
metadata.height = 0;
metadata.channels = 0;
}
}
metadata.depth = 1;
metadata.channels = b_image.channels();
if (b_image.is_float()) {
if (metadata.channels == 1) {
@ -104,7 +127,7 @@ bool BlenderImageLoader::load_pixels(const ImageMetaData &metadata,
}
else if (metadata.type == IMAGE_DATA_TYPE_HALF || metadata.type == IMAGE_DATA_TYPE_HALF4) {
/* Half float. Blender does not have a half type, but in some cases
* we upsample byte to half to avoid precision loss for colorspace
* we up-sample byte to half to avoid precision loss for colorspace
* conversion. */
unsigned char *in_pixels = image_get_pixels_for_frame(b_image, frame, tile_number);

View File

@ -11,6 +11,7 @@
# include "util/progress.h"
# include "device/metal/bvh.h"
# include "device/metal/util.h"
CCL_NAMESPACE_BEGIN
@ -18,6 +19,7 @@ CCL_NAMESPACE_BEGIN
{ \
string str = string_printf(__VA_ARGS__); \
progress.set_substatus(str); \
metal_printf("%s\n", str.c_str()); \
}
BVHMetal::BVHMetal(const BVHParams &params_,

View File

@ -31,6 +31,8 @@ class MetalDevice : public Device {
string source[PSO_NUM];
string source_md5[PSO_NUM];
bool capture_enabled = false;
KernelParamsMetal launch_params = {0};
/* MetalRT members ----------------------------------*/

View File

@ -86,6 +86,10 @@ MetalDevice::MetalDevice(const DeviceInfo &info, Stats &stats, Profiler &profile
use_metalrt = (atoi(metalrt) != 0);
}
if (getenv("CYCLES_DEBUG_METAL_CAPTURE_KERNEL")) {
capture_enabled = true;
}
MTLArgumentDescriptor *arg_desc_params = [[MTLArgumentDescriptor alloc] init];
arg_desc_params.dataType = MTLDataTypePointer;
arg_desc_params.access = MTLArgumentAccessReadOnly;
@ -394,7 +398,7 @@ MetalDevice::MetalMem *MetalDevice::generic_alloc(device_memory &mem)
}
if (size > 0) {
if (mem.type == MEM_DEVICE_ONLY) {
if (mem.type == MEM_DEVICE_ONLY && !capture_enabled) {
options = MTLResourceStorageModePrivate;
}

View File

@ -12,8 +12,6 @@
# include "device/metal/util.h"
# include "kernel/device/metal/globals.h"
# define metal_printf VLOG(4) << string_printf
CCL_NAMESPACE_BEGIN
class MetalDevice;
@ -40,43 +38,82 @@ class MetalDeviceQueue : public DeviceQueue {
virtual void copy_from_device(device_memory &mem) override;
protected:
void setup_capture();
void update_capture(DeviceKernel kernel);
void begin_capture();
void end_capture();
void prepare_resources(DeviceKernel kernel);
id<MTLComputeCommandEncoder> get_compute_encoder(DeviceKernel kernel);
id<MTLBlitCommandEncoder> get_blit_encoder();
MetalDevice *metal_device;
MetalBufferPool temp_buffer_pool;
MetalDevice *metal_device_;
MetalBufferPool temp_buffer_pool_;
API_AVAILABLE(macos(11.0), ios(14.0))
MTLCommandBufferDescriptor *command_buffer_desc = nullptr;
id<MTLDevice> mtlDevice = nil;
id<MTLCommandQueue> mtlCommandQueue = nil;
id<MTLCommandBuffer> mtlCommandBuffer = nil;
id<MTLComputeCommandEncoder> mtlComputeEncoder = nil;
id<MTLBlitCommandEncoder> mtlBlitEncoder = nil;
MTLCommandBufferDescriptor *command_buffer_desc_ = nullptr;
id<MTLDevice> mtlDevice_ = nil;
id<MTLCommandQueue> mtlCommandQueue_ = nil;
id<MTLCommandBuffer> mtlCommandBuffer_ = nil;
id<MTLComputeCommandEncoder> mtlComputeEncoder_ = nil;
id<MTLBlitCommandEncoder> mtlBlitEncoder_ = nil;
API_AVAILABLE(macos(10.14), ios(14.0))
id<MTLSharedEvent> shared_event = nil;
id<MTLSharedEvent> shared_event_ = nil;
API_AVAILABLE(macos(10.14), ios(14.0))
MTLSharedEventListener *shared_event_listener = nil;
MTLSharedEventListener *shared_event_listener_ = nil;
dispatch_queue_t event_queue;
dispatch_semaphore_t wait_semaphore;
dispatch_queue_t event_queue_;
dispatch_semaphore_t wait_semaphore_;
struct CopyBack {
void *host_pointer;
void *gpu_mem;
uint64_t size;
};
std::vector<CopyBack> copy_back_mem;
std::vector<CopyBack> copy_back_mem_;
uint64_t shared_event_id;
uint64_t command_buffers_submitted = 0;
uint64_t command_buffers_completed = 0;
Stats &stats;
uint64_t shared_event_id_;
uint64_t command_buffers_submitted_ = 0;
uint64_t command_buffers_completed_ = 0;
Stats &stats_;
void close_compute_encoder();
void close_blit_encoder();
bool verbose_tracing_ = false;
bool label_command_encoders_ = false;
/* Per-kernel profiling (see CYCLES_METAL_PROFILING). */
struct TimingData {
DeviceKernel kernel;
int work_size;
uint64_t timing_id;
};
std::vector<TimingData> command_encoder_labels_;
API_AVAILABLE(macos(10.14), ios(14.0))
id<MTLSharedEvent> timing_shared_event_ = nil;
uint64_t timing_shared_event_id_;
uint64_t command_buffer_start_timing_id_;
struct TimingStats {
double total_time = 0.0;
uint64_t total_work_size = 0;
uint64_t num_dispatches = 0;
};
TimingStats timing_stats_[DEVICE_KERNEL_NUM];
double last_completion_time_ = 0.0;
/* .gputrace capture (see CYCLES_DEBUG_METAL_CAPTURE_...). */
id<MTLCaptureScope> mtlCaptureScope_ = nil;
DeviceKernel capture_kernel_;
int capture_dispatch_counter_ = 0;
bool capture_samples_ = false;
int capture_reset_counter_ = 0;
bool is_capturing_ = false;
bool is_capturing_to_disk_ = false;
bool has_captured_to_disk_ = false;
};
CCL_NAMESPACE_END

View File

@ -17,46 +17,250 @@ CCL_NAMESPACE_BEGIN
/* MetalDeviceQueue */
MetalDeviceQueue::MetalDeviceQueue(MetalDevice *device)
: DeviceQueue(device), metal_device(device), stats(device->stats)
: DeviceQueue(device), metal_device_(device), stats_(device->stats)
{
if (@available(macos 11.0, *)) {
command_buffer_desc = [[MTLCommandBufferDescriptor alloc] init];
command_buffer_desc.errorOptions = MTLCommandBufferErrorOptionEncoderExecutionStatus;
command_buffer_desc_ = [[MTLCommandBufferDescriptor alloc] init];
command_buffer_desc_.errorOptions = MTLCommandBufferErrorOptionEncoderExecutionStatus;
}
mtlDevice = device->mtlDevice;
mtlCommandQueue = [mtlDevice newCommandQueue];
mtlDevice_ = device->mtlDevice;
mtlCommandQueue_ = [mtlDevice_ newCommandQueue];
if (@available(macos 10.14, *)) {
shared_event = [mtlDevice newSharedEvent];
shared_event_id = 1;
shared_event_ = [mtlDevice_ newSharedEvent];
shared_event_id_ = 1;
/* Shareable event listener */
event_queue = dispatch_queue_create("com.cycles.metal.event_queue", NULL);
shared_event_listener = [[MTLSharedEventListener alloc] initWithDispatchQueue:event_queue];
event_queue_ = dispatch_queue_create("com.cycles.metal.event_queue", NULL);
shared_event_listener_ = [[MTLSharedEventListener alloc] initWithDispatchQueue:event_queue_];
}
wait_semaphore = dispatch_semaphore_create(0);
wait_semaphore_ = dispatch_semaphore_create(0);
if (@available(macos 10.14, *)) {
if (getenv("CYCLES_METAL_PROFILING")) {
/* Enable per-kernel timing breakdown (shown at end of render). */
timing_shared_event_ = [mtlDevice_ newSharedEvent];
label_command_encoders_ = true;
}
if (getenv("CYCLES_METAL_DEBUG")) {
/* Enable very verbose tracing (shows every dispatch). */
verbose_tracing_ = true;
label_command_encoders_ = true;
}
timing_shared_event_id_ = 1;
}
setup_capture();
}
void MetalDeviceQueue::setup_capture()
{
capture_kernel_ = DeviceKernel(-1);
if (auto capture_kernel_str = getenv("CYCLES_DEBUG_METAL_CAPTURE_KERNEL")) {
/* CYCLES_DEBUG_METAL_CAPTURE_KERNEL captures a single dispatch of the specified kernel. */
capture_kernel_ = DeviceKernel(atoi(capture_kernel_str));
printf("Capture kernel: %d = %s\n", capture_kernel_, device_kernel_as_string(capture_kernel_));
capture_dispatch_counter_ = 0;
if (auto capture_dispatch_str = getenv("CYCLES_DEBUG_METAL_CAPTURE_DISPATCH")) {
capture_dispatch_counter_ = atoi(capture_dispatch_str);
printf("Capture dispatch number %d\n", capture_dispatch_counter_);
}
}
else if (auto capture_samples_str = getenv("CYCLES_DEBUG_METAL_CAPTURE_SAMPLES")) {
/* CYCLES_DEBUG_METAL_CAPTURE_SAMPLES captures a block of dispatches from reset#(N) to
* reset#(N+1). */
capture_samples_ = true;
capture_reset_counter_ = atoi(capture_samples_str);
capture_dispatch_counter_ = INT_MAX;
if (auto capture_limit_str = getenv("CYCLES_DEBUG_METAL_CAPTURE_LIMIT")) {
/* CYCLES_DEBUG_METAL_CAPTURE_LIMIT sets the maximum number of dispatches to capture. */
capture_dispatch_counter_ = atoi(capture_limit_str);
}
printf("Capturing sample block %d (dispatch limit: %d)\n",
capture_reset_counter_,
capture_dispatch_counter_);
}
else {
/* No capturing requested. */
return;
}
/* Enable .gputrace capture for the specified DeviceKernel. */
MTLCaptureManager *captureManager = [MTLCaptureManager sharedCaptureManager];
mtlCaptureScope_ = [captureManager newCaptureScopeWithDevice:mtlDevice_];
mtlCaptureScope_.label = [NSString stringWithFormat:@"Cycles kernel dispatch"];
[captureManager setDefaultCaptureScope:mtlCaptureScope_];
label_command_encoders_ = true;
if (auto capture_url = getenv("CYCLES_DEBUG_METAL_CAPTURE_URL")) {
if (@available(macos 10.15, *)) {
if ([captureManager supportsDestination:MTLCaptureDestinationGPUTraceDocument]) {
MTLCaptureDescriptor *captureDescriptor = [[MTLCaptureDescriptor alloc] init];
captureDescriptor.captureObject = mtlCaptureScope_;
captureDescriptor.destination = MTLCaptureDestinationGPUTraceDocument;
captureDescriptor.outputURL = [NSURL fileURLWithPath:@(capture_url)];
NSError *error;
if (![captureManager startCaptureWithDescriptor:captureDescriptor error:&error]) {
NSString *err = [error localizedDescription];
printf("Start capture failed: %s\n", [err UTF8String]);
}
else {
printf("Capture started (URL: %s)\n", capture_url);
is_capturing_to_disk_ = true;
}
}
else {
printf("Capture to file is not supported\n");
}
}
}
}
void MetalDeviceQueue::update_capture(DeviceKernel kernel)
{
/* Handle capture end triggers. */
if (is_capturing_) {
capture_dispatch_counter_ -= 1;
if (capture_dispatch_counter_ <= 0 || kernel == DEVICE_KERNEL_INTEGRATOR_RESET) {
/* End capture if we've hit the dispatch limit or we hit a "reset". */
end_capture();
}
return;
}
if (capture_dispatch_counter_ < 0) {
/* We finished capturing. */
return;
}
/* Handle single-capture start trigger. */
if (kernel == capture_kernel_) {
/* Start capturing when the we hit the Nth dispatch of the specified kernel. */
if (capture_dispatch_counter_ == 0) {
begin_capture();
}
capture_dispatch_counter_ -= 1;
return;
}
/* Handle multi-capture start trigger. */
if (capture_samples_) {
/* Start capturing when the reset countdown is at 0. */
if (capture_reset_counter_ == 0) {
begin_capture();
}
if (kernel == DEVICE_KERNEL_INTEGRATOR_RESET) {
capture_reset_counter_ -= 1;
}
return;
}
}
void MetalDeviceQueue::begin_capture()
{
/* Start gputrace capture. */
if (mtlCommandBuffer_) {
synchronize();
}
[mtlCaptureScope_ beginScope];
printf("[mtlCaptureScope_ beginScope]\n");
is_capturing_ = true;
}
void MetalDeviceQueue::end_capture()
{
[mtlCaptureScope_ endScope];
is_capturing_ = false;
printf("[mtlCaptureScope_ endScope]\n");
if (is_capturing_to_disk_) {
if (@available(macos 10.15, *)) {
[[MTLCaptureManager sharedCaptureManager] stopCapture];
has_captured_to_disk_ = true;
is_capturing_to_disk_ = false;
is_capturing_ = false;
printf("Capture stopped\n");
}
}
}
MetalDeviceQueue::~MetalDeviceQueue()
{
/* Tidying up here isn't really practical - we should expect and require the work
* queue to be empty here. */
assert(mtlCommandBuffer == nil);
assert(command_buffers_submitted == command_buffers_completed);
assert(mtlCommandBuffer_ == nil);
assert(command_buffers_submitted_ == command_buffers_completed_);
if (@available(macos 10.14, *)) {
[shared_event_listener release];
[shared_event release];
[shared_event_listener_ release];
[shared_event_ release];
}
if (@available(macos 11.0, *)) {
[command_buffer_desc release];
[command_buffer_desc_ release];
}
if (mtlCommandQueue) {
[mtlCommandQueue release];
mtlCommandQueue = nil;
if (mtlCommandQueue_) {
[mtlCommandQueue_ release];
mtlCommandQueue_ = nil;
}
if (mtlCaptureScope_) {
[mtlCaptureScope_ release];
}
double total_time = 0.0;
/* Show per-kernel timings, if gathered (see CYCLES_METAL_PROFILING). */
int64_t num_dispatches = 0;
for (auto &stat : timing_stats_) {
total_time += stat.total_time;
num_dispatches += stat.num_dispatches;
}
if (num_dispatches) {
printf("\nMetal dispatch stats:\n\n");
auto header = string_printf("%-40s %16s %12s %12s %7s %7s",
"Kernel name",
"Total threads",
"Dispatches",
"Avg. T/D",
"Time",
"Time%");
auto divider = string(header.length(), '-');
printf("%s\n%s\n%s\n", divider.c_str(), header.c_str(), divider.c_str());
for (size_t i = 0; i < DEVICE_KERNEL_NUM; i++) {
auto &stat = timing_stats_[i];
if (stat.num_dispatches > 0) {
printf("%-40s %16s %12s %12s %6.2fs %6.2f%%\n",
device_kernel_as_string(DeviceKernel(i)),
string_human_readable_number(stat.total_work_size).c_str(),
string_human_readable_number(stat.num_dispatches).c_str(),
string_human_readable_number(stat.total_work_size / stat.num_dispatches).c_str(),
stat.total_time,
stat.total_time * 100.0 / total_time);
}
}
printf("%s\n", divider.c_str());
printf("%-40s %16s %12s %12s %6.2fs %6.2f%%\n",
"",
"",
string_human_readable_number(num_dispatches).c_str(),
"",
total_time,
100.0);
printf("%s\n\n", divider.c_str());
}
}
@ -66,10 +270,10 @@ int MetalDeviceQueue::num_concurrent_states(const size_t /*state_size*/) const
/* TODO: compute automatically. */
/* TODO: must have at least num_threads_per_block. */
int result = 1048576;
if (metal_device->device_vendor == METAL_GPU_AMD) {
if (metal_device_->device_vendor == METAL_GPU_AMD) {
result *= 2;
}
else if (metal_device->device_vendor == METAL_GPU_APPLE) {
else if (metal_device_->device_vendor == METAL_GPU_APPLE) {
result *= 4;
}
return result;
@ -80,10 +284,10 @@ int MetalDeviceQueue::num_concurrent_busy_states() const
/* METAL_WIP */
/* TODO: compute automatically. */
int result = 65536;
if (metal_device->device_vendor == METAL_GPU_AMD) {
if (metal_device_->device_vendor == METAL_GPU_AMD) {
result *= 2;
}
else if (metal_device->device_vendor == METAL_GPU_APPLE) {
else if (metal_device_->device_vendor == METAL_GPU_APPLE) {
result *= 4;
}
return result;
@ -92,7 +296,7 @@ int MetalDeviceQueue::num_concurrent_busy_states() const
void MetalDeviceQueue::init_execution()
{
/* Synchronize all textures and memory copies before executing task. */
metal_device->load_texture_info();
metal_device_->load_texture_info();
synchronize();
}
@ -101,7 +305,9 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
const int work_size,
DeviceKernelArguments const &args)
{
if (metal_device->have_error()) {
update_capture(kernel);
if (metal_device_->have_error()) {
return false;
}
@ -110,6 +316,12 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
id<MTLComputeCommandEncoder> mtlComputeCommandEncoder = get_compute_encoder(kernel);
if (@available(macos 10.14, *)) {
if (timing_shared_event_) {
command_encoder_labels_.push_back({kernel, work_size, timing_shared_event_id_});
}
}
/* Determine size requirement for argument buffer. */
size_t arg_buffer_length = 0;
for (size_t i = 0; i < args.count; i++) {
@ -126,8 +338,8 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
/* Metal ancillary bindless pointers. */
size_t metal_offsets = arg_buffer_length;
arg_buffer_length += metal_device->mtlAncillaryArgEncoder.encodedLength;
arg_buffer_length = round_up(arg_buffer_length, metal_device->mtlAncillaryArgEncoder.alignment);
arg_buffer_length += metal_device_->mtlAncillaryArgEncoder.encodedLength;
arg_buffer_length = round_up(arg_buffer_length, metal_device_->mtlAncillaryArgEncoder.alignment);
/* Temporary buffer used to prepare arg_buffer */
uint8_t *init_arg_buffer = (uint8_t *)alloca(arg_buffer_length);
@ -150,19 +362,23 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
sizeof(IntegratorStateGPU);
size_t plain_old_launch_data_size = sizeof(KernelParamsMetal) - plain_old_launch_data_offset;
memcpy(init_arg_buffer + globals_offsets + plain_old_launch_data_offset,
(uint8_t *)&metal_device->launch_params + plain_old_launch_data_offset,
(uint8_t *)&metal_device_->launch_params + plain_old_launch_data_offset,
plain_old_launch_data_size);
/* Allocate an argument buffer. */
MTLResourceOptions arg_buffer_options = MTLResourceStorageModeManaged;
if (@available(macOS 11.0, *)) {
if ([mtlDevice hasUnifiedMemory]) {
if ([mtlDevice_ hasUnifiedMemory]) {
arg_buffer_options = MTLResourceStorageModeShared;
}
}
id<MTLBuffer> arg_buffer = temp_buffer_pool.get_buffer(
mtlDevice, mtlCommandBuffer, arg_buffer_length, arg_buffer_options, init_arg_buffer, stats);
id<MTLBuffer> arg_buffer = temp_buffer_pool_.get_buffer(mtlDevice_,
mtlCommandBuffer_,
arg_buffer_length,
arg_buffer_options,
init_arg_buffer,
stats_);
/* Encode the pointer "enqueue" arguments */
bytes_written = 0;
@ -170,16 +386,16 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
size_t size_in_bytes = args.sizes[i];
bytes_written = round_up(bytes_written, size_in_bytes);
if (args.types[i] == DeviceKernelArguments::POINTER) {
[metal_device->mtlBufferKernelParamsEncoder setArgumentBuffer:arg_buffer
offset:bytes_written];
[metal_device_->mtlBufferKernelParamsEncoder setArgumentBuffer:arg_buffer
offset:bytes_written];
if (MetalDevice::MetalMem *mmem = *(MetalDevice::MetalMem **)args.values[i]) {
[mtlComputeCommandEncoder useResource:mmem->mtlBuffer
usage:MTLResourceUsageRead | MTLResourceUsageWrite];
[metal_device->mtlBufferKernelParamsEncoder setBuffer:mmem->mtlBuffer offset:0 atIndex:0];
[metal_device_->mtlBufferKernelParamsEncoder setBuffer:mmem->mtlBuffer offset:0 atIndex:0];
}
else {
if (@available(macos 12.0, *)) {
[metal_device->mtlBufferKernelParamsEncoder setBuffer:nil offset:0 atIndex:0];
[metal_device_->mtlBufferKernelParamsEncoder setBuffer:nil offset:0 atIndex:0];
}
}
}
@ -187,49 +403,58 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
}
/* Encode KernelParamsMetal buffers */
[metal_device->mtlBufferKernelParamsEncoder setArgumentBuffer:arg_buffer offset:globals_offsets];
[metal_device_->mtlBufferKernelParamsEncoder setArgumentBuffer:arg_buffer
offset:globals_offsets];
if (label_command_encoders_) {
/* Add human-readable labels if we're doing any form of debugging / profiling. */
mtlComputeCommandEncoder.label = [[NSString alloc]
initWithFormat:@"Metal queue launch %s, work_size %d",
device_kernel_as_string(kernel),
work_size];
}
/* this relies on IntegratorStateGPU layout being contiguous device_ptrs */
const size_t pointer_block_end = offsetof(KernelParamsMetal, __integrator_state) +
sizeof(IntegratorStateGPU);
for (size_t offset = 0; offset < pointer_block_end; offset += sizeof(device_ptr)) {
int pointer_index = offset / sizeof(device_ptr);
int pointer_index = int(offset / sizeof(device_ptr));
MetalDevice::MetalMem *mmem = *(
MetalDevice::MetalMem **)((uint8_t *)&metal_device->launch_params + offset);
if (mmem && (mmem->mtlBuffer || mmem->mtlTexture)) {
[metal_device->mtlBufferKernelParamsEncoder setBuffer:mmem->mtlBuffer
offset:0
atIndex:pointer_index];
MetalDevice::MetalMem **)((uint8_t *)&metal_device_->launch_params + offset);
if (mmem && mmem->mem && (mmem->mtlBuffer || mmem->mtlTexture)) {
[metal_device_->mtlBufferKernelParamsEncoder setBuffer:mmem->mtlBuffer
offset:0
atIndex:pointer_index];
}
else {
if (@available(macos 12.0, *)) {
[metal_device->mtlBufferKernelParamsEncoder setBuffer:nil offset:0 atIndex:pointer_index];
[metal_device_->mtlBufferKernelParamsEncoder setBuffer:nil offset:0 atIndex:pointer_index];
}
}
}
bytes_written = globals_offsets + sizeof(KernelParamsMetal);
const MetalKernelPipeline *metal_kernel_pso = MetalDeviceKernels::get_best_pipeline(metal_device,
kernel);
const MetalKernelPipeline *metal_kernel_pso = MetalDeviceKernels::get_best_pipeline(
metal_device_, kernel);
if (!metal_kernel_pso) {
metal_device->set_error(
metal_device_->set_error(
string_printf("No MetalKernelPipeline for %s\n", device_kernel_as_string(kernel)));
return false;
}
/* Encode ancillaries */
[metal_device->mtlAncillaryArgEncoder setArgumentBuffer:arg_buffer offset:metal_offsets];
[metal_device->mtlAncillaryArgEncoder setBuffer:metal_device->texture_bindings_2d
offset:0
atIndex:0];
[metal_device->mtlAncillaryArgEncoder setBuffer:metal_device->texture_bindings_3d
offset:0
atIndex:1];
[metal_device_->mtlAncillaryArgEncoder setArgumentBuffer:arg_buffer offset:metal_offsets];
[metal_device_->mtlAncillaryArgEncoder setBuffer:metal_device_->texture_bindings_2d
offset:0
atIndex:0];
[metal_device_->mtlAncillaryArgEncoder setBuffer:metal_device_->texture_bindings_3d
offset:0
atIndex:1];
if (@available(macos 12.0, *)) {
if (metal_device->use_metalrt) {
if (metal_device->bvhMetalRT) {
id<MTLAccelerationStructure> accel_struct = metal_device->bvhMetalRT->accel_struct;
[metal_device->mtlAncillaryArgEncoder setAccelerationStructure:accel_struct atIndex:2];
if (metal_device_->use_metalrt) {
if (metal_device_->bvhMetalRT) {
id<MTLAccelerationStructure> accel_struct = metal_device_->bvhMetalRT->accel_struct;
[metal_device_->mtlAncillaryArgEncoder setAccelerationStructure:accel_struct atIndex:2];
}
for (int table = 0; table < METALRT_TABLE_NUM; table++) {
@ -237,19 +462,19 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
[metal_kernel_pso->intersection_func_table[table] setBuffer:arg_buffer
offset:globals_offsets
atIndex:1];
[metal_device->mtlAncillaryArgEncoder
[metal_device_->mtlAncillaryArgEncoder
setIntersectionFunctionTable:metal_kernel_pso->intersection_func_table[table]
atIndex:3 + table];
[mtlComputeCommandEncoder useResource:metal_kernel_pso->intersection_func_table[table]
usage:MTLResourceUsageRead];
}
else {
[metal_device->mtlAncillaryArgEncoder setIntersectionFunctionTable:nil
atIndex:3 + table];
[metal_device_->mtlAncillaryArgEncoder setIntersectionFunctionTable:nil
atIndex:3 + table];
}
}
}
bytes_written = metal_offsets + metal_device->mtlAncillaryArgEncoder.encodedLength;
bytes_written = metal_offsets + metal_device_->mtlAncillaryArgEncoder.encodedLength;
}
if (arg_buffer.storageMode == MTLStorageModeManaged) {
@ -260,10 +485,10 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
[mtlComputeCommandEncoder setBuffer:arg_buffer offset:globals_offsets atIndex:1];
[mtlComputeCommandEncoder setBuffer:arg_buffer offset:metal_offsets atIndex:2];
if (metal_device->use_metalrt) {
if (metal_device_->use_metalrt) {
if (@available(macos 12.0, *)) {
auto bvhMetalRT = metal_device->bvhMetalRT;
auto bvhMetalRT = metal_device_->bvhMetalRT;
switch (kernel) {
case DEVICE_KERNEL_INTEGRATOR_INTERSECT_CLOSEST:
case DEVICE_KERNEL_INTEGRATOR_INTERSECT_SHADOW:
@ -305,7 +530,7 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
case DEVICE_KERNEL_INTEGRATOR_COMPACT_SHADOW_PATHS_ARRAY:
/* See parallel_active_index.h for why this amount of shared memory is needed.
* Rounded up to 16 bytes for Metal */
shared_mem_bytes = round_up((num_threads_per_block + 1) * sizeof(int), 16);
shared_mem_bytes = (int)round_up((num_threads_per_block + 1) * sizeof(int), 16);
[mtlComputeCommandEncoder setThreadgroupMemoryLength:shared_mem_bytes atIndex:0];
break;
@ -319,7 +544,7 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
[mtlComputeCommandEncoder dispatchThreadgroups:size_threadgroups_per_dispatch
threadsPerThreadgroup:size_threads_per_threadgroup];
[mtlCommandBuffer addCompletedHandler:^(id<MTLCommandBuffer> command_buffer) {
[mtlCommandBuffer_ addCompletedHandler:^(id<MTLCommandBuffer> command_buffer) {
NSString *kernel_name = metal_kernel_pso->function.label;
/* Enhanced command buffer errors are only available in 11.0+ */
@ -344,50 +569,117 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
}
}];
return !(metal_device->have_error());
if (verbose_tracing_ || is_capturing_) {
/* Force a sync we've enabled step-by-step verbose tracing or if we're capturing. */
synchronize();
/* Show queue counters and dispatch timing. */
if (verbose_tracing_) {
if (kernel == DEVICE_KERNEL_INTEGRATOR_RESET) {
printf(
"_____________________________________.____________________.______________.___________"
"______________________________________\n");
}
printf("%-40s| %7d threads |%5.2fms | buckets [",
device_kernel_as_string(kernel),
work_size,
last_completion_time_ * 1000.0);
std::lock_guard<std::recursive_mutex> lock(metal_device_->metal_mem_map_mutex);
for (auto &it : metal_device_->metal_mem_map) {
const string c_integrator_queue_counter = "integrator_queue_counter";
if (it.first->name == c_integrator_queue_counter) {
/* Workaround "device_copy_from" being protected. */
struct MyDeviceMemory : device_memory {
void device_copy_from__IntegratorQueueCounter()
{
device_copy_from(0, data_width, 1, sizeof(IntegratorQueueCounter));
}
};
((MyDeviceMemory *)it.first)->device_copy_from__IntegratorQueueCounter();
if (IntegratorQueueCounter *queue_counter = (IntegratorQueueCounter *)
it.first->host_pointer) {
for (int i = 0; i < DEVICE_KERNEL_INTEGRATOR_NUM; i++)
printf("%s%d", i == 0 ? "" : ",", int(queue_counter->num_queued[i]));
}
break;
}
}
printf("]\n");
}
}
return !(metal_device_->have_error());
}
bool MetalDeviceQueue::synchronize()
{
if (metal_device->have_error()) {
if (has_captured_to_disk_ || metal_device_->have_error()) {
return false;
}
if (mtlComputeEncoder) {
if (mtlComputeEncoder_) {
close_compute_encoder();
}
close_blit_encoder();
if (mtlCommandBuffer) {
uint64_t shared_event_id = this->shared_event_id++;
if (mtlCommandBuffer_) {
scoped_timer timer;
if (@available(macos 10.14, *)) {
__block dispatch_semaphore_t block_sema = wait_semaphore;
[shared_event notifyListener:shared_event_listener
atValue:shared_event_id
block:^(id<MTLSharedEvent> sharedEvent, uint64_t value) {
dispatch_semaphore_signal(block_sema);
}];
[mtlCommandBuffer encodeSignalEvent:shared_event value:shared_event_id];
[mtlCommandBuffer commit];
dispatch_semaphore_wait(wait_semaphore, DISPATCH_TIME_FOREVER);
if (timing_shared_event_) {
/* For per-kernel timing, add event handlers to measure & accumulate dispatch times. */
__block double completion_time = 0;
for (uint64_t i = command_buffer_start_timing_id_; i < timing_shared_event_id_; i++) {
[timing_shared_event_ notifyListener:shared_event_listener_
atValue:i
block:^(id<MTLSharedEvent> sharedEvent, uint64_t value) {
completion_time = timer.get_time() - completion_time;
last_completion_time_ = completion_time;
for (auto label : command_encoder_labels_) {
if (label.timing_id == value) {
TimingStats &stat = timing_stats_[label.kernel];
stat.num_dispatches++;
stat.total_time += completion_time;
stat.total_work_size += label.work_size;
}
}
}];
}
}
}
[mtlCommandBuffer release];
uint64_t shared_event_id_ = this->shared_event_id_++;
for (const CopyBack &mmem : copy_back_mem) {
if (@available(macos 10.14, *)) {
__block dispatch_semaphore_t block_sema = wait_semaphore_;
[shared_event_ notifyListener:shared_event_listener_
atValue:shared_event_id_
block:^(id<MTLSharedEvent> sharedEvent, uint64_t value) {
dispatch_semaphore_signal(block_sema);
}];
[mtlCommandBuffer_ encodeSignalEvent:shared_event_ value:shared_event_id_];
[mtlCommandBuffer_ commit];
dispatch_semaphore_wait(wait_semaphore_, DISPATCH_TIME_FOREVER);
}
[mtlCommandBuffer_ release];
for (const CopyBack &mmem : copy_back_mem_) {
memcpy((uchar *)mmem.host_pointer, (uchar *)mmem.gpu_mem, mmem.size);
}
copy_back_mem.clear();
copy_back_mem_.clear();
temp_buffer_pool.process_command_buffer_completion(mtlCommandBuffer);
metal_device->flush_delayed_free_list();
temp_buffer_pool_.process_command_buffer_completion(mtlCommandBuffer_);
metal_device_->flush_delayed_free_list();
mtlCommandBuffer = nil;
mtlCommandBuffer_ = nil;
command_encoder_labels_.clear();
}
return !(metal_device->have_error());
return !(metal_device_->have_error());
}
void MetalDeviceQueue::zero_to_device(device_memory &mem)
@ -400,20 +692,20 @@ void MetalDeviceQueue::zero_to_device(device_memory &mem)
/* Allocate on demand. */
if (mem.device_pointer == 0) {
metal_device->mem_alloc(mem);
metal_device_->mem_alloc(mem);
}
/* Zero memory on device. */
assert(mem.device_pointer != 0);
std::lock_guard<std::recursive_mutex> lock(metal_device->metal_mem_map_mutex);
MetalDevice::MetalMem &mmem = *metal_device->metal_mem_map.at(&mem);
std::lock_guard<std::recursive_mutex> lock(metal_device_->metal_mem_map_mutex);
MetalDevice::MetalMem &mmem = *metal_device_->metal_mem_map.at(&mem);
if (mmem.mtlBuffer) {
id<MTLBlitCommandEncoder> blitEncoder = get_blit_encoder();
[blitEncoder fillBuffer:mmem.mtlBuffer range:NSMakeRange(mmem.offset, mmem.size) value:0];
}
else {
metal_device->mem_zero(mem);
metal_device_->mem_zero(mem);
}
}
@ -425,15 +717,15 @@ void MetalDeviceQueue::copy_to_device(device_memory &mem)
/* Allocate on demand. */
if (mem.device_pointer == 0) {
metal_device->mem_alloc(mem);
metal_device_->mem_alloc(mem);
}
assert(mem.device_pointer != 0);
assert(mem.host_pointer != nullptr);
std::lock_guard<std::recursive_mutex> lock(metal_device->metal_mem_map_mutex);
auto result = metal_device->metal_mem_map.find(&mem);
if (result != metal_device->metal_mem_map.end()) {
std::lock_guard<std::recursive_mutex> lock(metal_device_->metal_mem_map_mutex);
auto result = metal_device_->metal_mem_map.find(&mem);
if (result != metal_device_->metal_mem_map.end()) {
if (mem.host_pointer == mem.shared_pointer) {
return;
}
@ -441,12 +733,12 @@ void MetalDeviceQueue::copy_to_device(device_memory &mem)
MetalDevice::MetalMem &mmem = *result->second;
id<MTLBlitCommandEncoder> blitEncoder = get_blit_encoder();
id<MTLBuffer> buffer = temp_buffer_pool.get_buffer(mtlDevice,
mtlCommandBuffer,
mmem.size,
MTLResourceStorageModeShared,
mem.host_pointer,
stats);
id<MTLBuffer> buffer = temp_buffer_pool_.get_buffer(mtlDevice_,
mtlCommandBuffer_,
mmem.size,
MTLResourceStorageModeShared,
mem.host_pointer,
stats_);
[blitEncoder copyFromBuffer:buffer
sourceOffset:0
@ -455,7 +747,7 @@ void MetalDeviceQueue::copy_to_device(device_memory &mem)
size:mmem.size];
}
else {
metal_device->mem_copy_to(mem);
metal_device_->mem_copy_to(mem);
}
}
@ -470,8 +762,8 @@ void MetalDeviceQueue::copy_from_device(device_memory &mem)
assert(mem.device_pointer != 0);
assert(mem.host_pointer != nullptr);
std::lock_guard<std::recursive_mutex> lock(metal_device->metal_mem_map_mutex);
MetalDevice::MetalMem &mmem = *metal_device->metal_mem_map.at(&mem);
std::lock_guard<std::recursive_mutex> lock(metal_device_->metal_mem_map_mutex);
MetalDevice::MetalMem &mmem = *metal_device_->metal_mem_map.at(&mem);
if (mmem.mtlBuffer) {
const size_t size = mem.memory_size();
@ -481,8 +773,8 @@ void MetalDeviceQueue::copy_from_device(device_memory &mem)
[blitEncoder synchronizeResource:mmem.mtlBuffer];
}
if (mem.host_pointer != mmem.hostPtr) {
if (mtlCommandBuffer) {
copy_back_mem.push_back({mem.host_pointer, mmem.hostPtr, size});
if (mtlCommandBuffer_) {
copy_back_mem_.push_back({mem.host_pointer, mmem.hostPtr, size});
}
else {
memcpy((uchar *)mem.host_pointer, (uchar *)mmem.hostPtr, size);
@ -494,16 +786,16 @@ void MetalDeviceQueue::copy_from_device(device_memory &mem)
}
}
else {
metal_device->mem_copy_from(mem);
metal_device_->mem_copy_from(mem);
}
}
void MetalDeviceQueue::prepare_resources(DeviceKernel kernel)
{
std::lock_guard<std::recursive_mutex> lock(metal_device->metal_mem_map_mutex);
std::lock_guard<std::recursive_mutex> lock(metal_device_->metal_mem_map_mutex);
/* declare resource usage */
for (auto &it : metal_device->metal_mem_map) {
for (auto &it : metal_device_->metal_mem_map) {
device_memory *mem = it.first;
MTLResourceUsage usage = MTLResourceUsageRead;
@ -513,17 +805,17 @@ void MetalDeviceQueue::prepare_resources(DeviceKernel kernel)
if (it.second->mtlBuffer) {
/* METAL_WIP - use array version (i.e. useResources) */
[mtlComputeEncoder useResource:it.second->mtlBuffer usage:usage];
[mtlComputeEncoder_ useResource:it.second->mtlBuffer usage:usage];
}
else if (it.second->mtlTexture) {
/* METAL_WIP - use array version (i.e. useResources) */
[mtlComputeEncoder useResource:it.second->mtlTexture usage:usage | MTLResourceUsageSample];
[mtlComputeEncoder_ useResource:it.second->mtlTexture usage:usage | MTLResourceUsageSample];
}
}
/* ancillaries */
[mtlComputeEncoder useResource:metal_device->texture_bindings_2d usage:MTLResourceUsageRead];
[mtlComputeEncoder useResource:metal_device->texture_bindings_3d usage:MTLResourceUsageRead];
[mtlComputeEncoder_ useResource:metal_device_->texture_bindings_2d usage:MTLResourceUsageRead];
[mtlComputeEncoder_ useResource:metal_device_->texture_bindings_3d usage:MTLResourceUsageRead];
}
id<MTLComputeCommandEncoder> MetalDeviceQueue::get_compute_encoder(DeviceKernel kernel)
@ -531,67 +823,81 @@ id<MTLComputeCommandEncoder> MetalDeviceQueue::get_compute_encoder(DeviceKernel
bool concurrent = (kernel < DEVICE_KERNEL_INTEGRATOR_NUM);
if (@available(macos 10.14, *)) {
if (mtlComputeEncoder) {
if (mtlComputeEncoder.dispatchType == concurrent ? MTLDispatchTypeConcurrent :
MTLDispatchTypeSerial) {
if (timing_shared_event_) {
/* Close the current encoder to ensure we're able to capture per-encoder timing data. */
if (mtlComputeEncoder_) {
close_compute_encoder();
}
}
if (mtlComputeEncoder_) {
if (mtlComputeEncoder_.dispatchType == concurrent ? MTLDispatchTypeConcurrent :
MTLDispatchTypeSerial) {
/* declare usage of MTLBuffers etc */
prepare_resources(kernel);
return mtlComputeEncoder;
return mtlComputeEncoder_;
}
close_compute_encoder();
}
close_blit_encoder();
if (!mtlCommandBuffer) {
mtlCommandBuffer = [mtlCommandQueue commandBuffer];
[mtlCommandBuffer retain];
if (!mtlCommandBuffer_) {
mtlCommandBuffer_ = [mtlCommandQueue_ commandBuffer];
[mtlCommandBuffer_ retain];
}
mtlComputeEncoder = [mtlCommandBuffer
mtlComputeEncoder_ = [mtlCommandBuffer_
computeCommandEncoderWithDispatchType:concurrent ? MTLDispatchTypeConcurrent :
MTLDispatchTypeSerial];
[mtlComputeEncoder setLabel:@(device_kernel_as_string(kernel))];
[mtlComputeEncoder_ setLabel:@(device_kernel_as_string(kernel))];
/* declare usage of MTLBuffers etc */
prepare_resources(kernel);
}
return mtlComputeEncoder;
return mtlComputeEncoder_;
}
id<MTLBlitCommandEncoder> MetalDeviceQueue::get_blit_encoder()
{
if (mtlBlitEncoder) {
return mtlBlitEncoder;
if (mtlBlitEncoder_) {
return mtlBlitEncoder_;
}
if (mtlComputeEncoder) {
if (mtlComputeEncoder_) {
close_compute_encoder();
}
if (!mtlCommandBuffer) {
mtlCommandBuffer = [mtlCommandQueue commandBuffer];
[mtlCommandBuffer retain];
if (!mtlCommandBuffer_) {
mtlCommandBuffer_ = [mtlCommandQueue_ commandBuffer];
[mtlCommandBuffer_ retain];
command_buffer_start_timing_id_ = timing_shared_event_id_;
}
mtlBlitEncoder = [mtlCommandBuffer blitCommandEncoder];
return mtlBlitEncoder;
mtlBlitEncoder_ = [mtlCommandBuffer_ blitCommandEncoder];
return mtlBlitEncoder_;
}
void MetalDeviceQueue::close_compute_encoder()
{
[mtlComputeEncoder endEncoding];
mtlComputeEncoder = nil;
[mtlComputeEncoder_ endEncoding];
mtlComputeEncoder_ = nil;
if (@available(macos 10.14, *)) {
if (timing_shared_event_) {
[mtlCommandBuffer_ encodeSignalEvent:timing_shared_event_ value:timing_shared_event_id_++];
}
}
}
void MetalDeviceQueue::close_blit_encoder()
{
if (mtlBlitEncoder) {
[mtlBlitEncoder endEncoding];
mtlBlitEncoder = nil;
if (mtlBlitEncoder_) {
[mtlBlitEncoder_ endEncoding];
mtlBlitEncoder_ = nil;
}
}

View File

@ -14,6 +14,8 @@
# include "util/thread.h"
# define metal_printf VLOG(4) << string_printf
CCL_NAMESPACE_BEGIN
enum MetalGPUVendor {

View File

@ -553,7 +553,8 @@ bool OptiXDevice::load_kernels(const uint kernel_features)
OptixBuiltinISOptions builtin_options = {};
# if OPTIX_ABI_VERSION >= 55
builtin_options.builtinISModuleType = OPTIX_PRIMITIVE_TYPE_ROUND_CATMULLROM;
builtin_options.buildFlags = OPTIX_BUILD_FLAG_PREFER_FAST_TRACE;
builtin_options.buildFlags = OPTIX_BUILD_FLAG_PREFER_FAST_TRACE |
OPTIX_BUILD_FLAG_ALLOW_COMPACTION;
builtin_options.curveEndcapFlags = OPTIX_CURVE_ENDCAP_DEFAULT; /* Disable end-caps. */
# else
builtin_options.builtinISModuleType = OPTIX_PRIMITIVE_TYPE_ROUND_CUBIC_BSPLINE;
@ -1387,7 +1388,10 @@ bool OptiXDevice::build_optix_bvh(BVHOptiX *bvh,
OptixAccelBufferSizes sizes = {};
OptixAccelBuildOptions options = {};
options.operation = operation;
if (use_fast_trace_bvh) {
if (use_fast_trace_bvh ||
/* The build flags have to match the ones used to query the built-in curve intersection
program (see optixBuiltinISModuleGet above) */
build_input.type == OPTIX_BUILD_INPUT_TYPE_CURVES) {
VLOG(2) << "Using fast to trace OptiX BVH";
options.buildFlags = OPTIX_BUILD_FLAG_PREFER_FAST_TRACE | OPTIX_BUILD_FLAG_ALLOW_COMPACTION;
}

View File

@ -23,10 +23,18 @@ HdCyclesDisplayDriver::HdCyclesDisplayDriver(HdCyclesSession *renderParam, Hgi *
HdCyclesDisplayDriver::~HdCyclesDisplayDriver()
{
deinit();
if (texture_) {
_hgi->DestroyTexture(&texture_);
}
if (gl_pbo_id_) {
glDeleteBuffers(1, &gl_pbo_id_);
}
gl_context_dispose();
}
void HdCyclesDisplayDriver::init()
void HdCyclesDisplayDriver::gl_context_create()
{
#ifdef _WIN32
if (!gl_context_) {
@ -64,16 +72,42 @@ void HdCyclesDisplayDriver::init()
}
}
void HdCyclesDisplayDriver::deinit()
bool HdCyclesDisplayDriver::gl_context_enable()
{
if (texture_) {
_hgi->DestroyTexture(&texture_);
#ifdef _WIN32
if (!hdc_ || !gl_context_) {
return false;
}
if (gl_pbo_id_) {
glDeleteBuffers(1, &gl_pbo_id_);
mutex_.lock();
// Do not change context if this is called in the main thread
if (wglGetCurrentContext() == nullptr) {
if (!TF_VERIFY(wglMakeCurrent((HDC)hdc_, (HGLRC)gl_context_))) {
mutex_.unlock();
return false;
}
}
return true;
#else
return false;
#endif
}
void HdCyclesDisplayDriver::gl_context_disable()
{
#ifdef _WIN32
if (wglGetCurrentContext() == gl_context_) {
TF_VERIFY(wglMakeCurrent(nullptr, nullptr));
}
mutex_.unlock();
#endif
}
void HdCyclesDisplayDriver::gl_context_dispose()
{
#ifdef _WIN32
if (gl_context_) {
TF_VERIFY(wglDeleteContext((HGLRC)gl_context_));
@ -90,13 +124,9 @@ bool HdCyclesDisplayDriver::update_begin(const Params &params,
int texture_width,
int texture_height)
{
#ifdef _WIN32
if (!hdc_ || !gl_context_) {
if (!gl_context_enable()) {
return false;
}
#endif
graphics_interop_activate();
if (gl_render_sync_) {
glWaitSync((GLsync)gl_render_sync_, 0, GL_TIMEOUT_IGNORED);
@ -121,15 +151,14 @@ bool HdCyclesDisplayDriver::update_begin(const Params &params,
void HdCyclesDisplayDriver::update_end()
{
gl_upload_sync_ = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
glFlush();
graphics_interop_deactivate();
gl_context_disable();
}
void HdCyclesDisplayDriver::flush()
{
graphics_interop_activate();
gl_context_enable();
if (gl_upload_sync_) {
glWaitSync((GLsync)gl_upload_sync_, 0, GL_TIMEOUT_IGNORED);
@ -139,7 +168,7 @@ void HdCyclesDisplayDriver::flush()
glWaitSync((GLsync)gl_render_sync_, 0, GL_TIMEOUT_IGNORED);
}
graphics_interop_deactivate();
gl_context_disable();
}
half4 *HdCyclesDisplayDriver::map_texture_buffer()
@ -179,25 +208,12 @@ DisplayDriver::GraphicsInterop HdCyclesDisplayDriver::graphics_interop_get()
void HdCyclesDisplayDriver::graphics_interop_activate()
{
mutex_.lock();
#ifdef _WIN32
// Do not change context if this is called in the main thread
if (wglGetCurrentContext() == nullptr) {
TF_VERIFY(wglMakeCurrent((HDC)hdc_, (HGLRC)gl_context_));
}
#endif
gl_context_enable();
}
void HdCyclesDisplayDriver::graphics_interop_deactivate()
{
#ifdef _WIN32
if (wglGetCurrentContext() == gl_context_) {
TF_VERIFY(wglMakeCurrent(nullptr, nullptr));
}
#endif
mutex_.unlock();
gl_context_disable();
}
void HdCyclesDisplayDriver::clear()
@ -214,7 +230,11 @@ void HdCyclesDisplayDriver::draw(const Params &params)
return;
}
init();
if (!renderBuffer->IsResourceUsed()) {
return;
}
gl_context_create();
// Cycles 'DisplayDriver' only supports 'half4' format
TF_VERIFY(renderBuffer->GetFormat() == HdFormatFloat16Vec4);
@ -255,7 +275,6 @@ void HdCyclesDisplayDriver::draw(const Params &params)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
gl_render_sync_ = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
glFlush();
need_update_ = false;

View File

@ -19,9 +19,6 @@ class HdCyclesDisplayDriver final : public CCL_NS::DisplayDriver {
~HdCyclesDisplayDriver();
private:
void init();
void deinit();
void next_tile_begin() override;
bool update_begin(const Params &params, int texture_width, int texture_height) override;
@ -41,6 +38,11 @@ class HdCyclesDisplayDriver final : public CCL_NS::DisplayDriver {
void draw(const Params &params) override;
void gl_context_create();
bool gl_context_enable();
void gl_context_disable();
void gl_context_dispose();
HdCyclesSession *const _renderParam;
Hgi *const _hgi;
@ -48,7 +50,6 @@ class HdCyclesDisplayDriver final : public CCL_NS::DisplayDriver {
void *hdc_ = nullptr;
void *gl_context_ = nullptr;
#endif
CCL_NS::thread_mutex mutex_;
PXR_NS::HgiTextureHandle texture_;

View File

@ -30,11 +30,11 @@ bool HdCyclesOutputDriver::update_render_tile(const Tile &tile)
std::vector<float> pixels;
for (const HdRenderPassAovBinding &aovBinding : _renderParam->GetAovBindings()) {
if (aovBinding == _renderParam->GetDisplayAovBinding()) {
continue; // Display AOV binding is already updated by Cycles display driver
}
if (const auto renderBuffer = static_cast<HdCyclesRenderBuffer *>(aovBinding.renderBuffer)) {
if (aovBinding == _renderParam->GetDisplayAovBinding() && renderBuffer->IsResourceUsed()) {
continue; // Display AOV binding is already updated by Cycles display driver
}
const HdFormat format = renderBuffer->GetFormat();
if (format == HdFormatInvalid) {
continue; // Skip invalid AOV bindings

View File

@ -35,7 +35,7 @@ bool HdCyclesRenderBuffer::Allocate(const GfVec3i &dimensions, HdFormat format,
return false;
}
const size_t oldSize = _data.size();
const size_t oldSize = _dataSize;
const size_t newSize = dimensions[0] * dimensions[1] * HdDataSizeOfFormat(format);
if (oldSize == newSize) {
return true;
@ -49,8 +49,8 @@ bool HdCyclesRenderBuffer::Allocate(const GfVec3i &dimensions, HdFormat format,
_width = dimensions[0];
_height = dimensions[1];
_format = format;
_data.resize(newSize);
_dataSize = newSize;
_resourceUsed = false;
return true;
}
@ -63,6 +63,7 @@ void HdCyclesRenderBuffer::_Deallocate()
_data.clear();
_data.shrink_to_fit();
_dataSize = 0;
_resource = VtValue();
}
@ -74,6 +75,10 @@ void *HdCyclesRenderBuffer::Map()
return nullptr;
}
if (_data.size() != _dataSize) {
_data.resize(_dataSize);
}
++_mapped;
return _data.data();
@ -103,10 +108,17 @@ void HdCyclesRenderBuffer::SetConverged(bool converged)
_converged = converged;
}
bool HdCyclesRenderBuffer::IsResourceUsed() const
{
return _resourceUsed;
}
VtValue HdCyclesRenderBuffer::GetResource(bool multiSampled) const
{
TF_UNUSED(multiSampled);
_resourceUsed = true;
return _resource;
}

View File

@ -58,6 +58,8 @@ class HdCyclesRenderBuffer final : public PXR_NS::HdRenderBuffer {
void SetConverged(bool converged);
bool IsResourceUsed() const;
PXR_NS::VtValue GetResource(bool multiSampled = false) const override;
void SetResource(const PXR_NS::VtValue &resource);
@ -74,9 +76,11 @@ class HdCyclesRenderBuffer final : public PXR_NS::HdRenderBuffer {
unsigned int _width = 0u;
unsigned int _height = 0u;
PXR_NS::HdFormat _format = PXR_NS::HdFormatInvalid;
size_t _dataSize = 0;
std::vector<uint8_t> _data;
PXR_NS::VtValue _resource;
mutable std::atomic_bool _resourceUsed = false;
std::atomic_int _mapped = 0;
std::atomic_bool _converged = false;

View File

@ -241,7 +241,7 @@ ccl_gpu_kernel(GPU_KERNEL_BLOCK_NUM_THREADS, GPU_KERNEL_MAX_REGISTERS)
}
ccl_gpu_kernel_postfix
#ifdef __KERNEL_METAL__
#if defined(__KERNEL_METAL_APPLE__) && defined(__METALRT__)
constant int __dummy_constant [[function_constant(0)]];
#endif
@ -256,7 +256,7 @@ ccl_gpu_kernel(GPU_KERNEL_BLOCK_NUM_THREADS, GPU_KERNEL_MAX_REGISTERS)
if (global_index < work_size) {
const int state = (path_index_array) ? path_index_array[global_index] : global_index;
#ifdef __KERNEL_METAL__
#if defined(__KERNEL_METAL_APPLE__) && defined(__METALRT__)
KernelGlobals kg = NULL;
/* Workaround Ambient Occlusion and Bevel nodes not working with Metal.
* Dummy offset should not affect result, but somehow fixes bug! */

View File

@ -48,7 +48,7 @@ ccl_device float3 integrator_eval_background_shader(KernelGlobals kg,
PROFILING_SHADER(emission_sd->object, emission_sd->shader);
PROFILING_EVENT(PROFILING_SHADE_LIGHT_EVAL);
shader_eval_surface<KERNEL_FEATURE_NODE_MASK_SURFACE_LIGHT>(
shader_eval_surface<KERNEL_FEATURE_NODE_MASK_SURFACE_BACKGROUND>(
kg, state, emission_sd, render_buffer, path_flag | PATH_RAY_EMISSION);
L = shader_background_eval(emission_sd);

View File

@ -1700,6 +1700,8 @@ enum KernelFeatureFlag : uint32_t {
#define KERNEL_FEATURE_NODE_MASK_SURFACE_LIGHT \
(KERNEL_FEATURE_NODE_EMISSION | KERNEL_FEATURE_NODE_VORONOI_EXTRA | \
KERNEL_FEATURE_NODE_LIGHT_PATH)
#define KERNEL_FEATURE_NODE_MASK_SURFACE_BACKGROUND \
(KERNEL_FEATURE_NODE_MASK_SURFACE_LIGHT | KERNEL_FEATURE_NODE_AOV)
#define KERNEL_FEATURE_NODE_MASK_SURFACE_SHADOW \
(KERNEL_FEATURE_NODE_BSDF | KERNEL_FEATURE_NODE_EMISSION | KERNEL_FEATURE_NODE_VOLUME | \
KERNEL_FEATURE_NODE_BUMP | KERNEL_FEATURE_NODE_BUMP_STATE | \

View File

@ -90,7 +90,7 @@ static vector<ChannelMapping> output_channels()
return map;
}
/* Renderlayer Handling */
/* Render-layer Handling. */
bool DenoiseImageLayer::detect_denoising_channels()
{

View File

@ -402,6 +402,11 @@ extern GHOST_TSuccess GHOST_SetCursorPosition(GHOST_SystemHandle systemhandle,
int32_t x,
int32_t y);
void GHOST_GetCursorGrabState(GHOST_WindowHandle windowhandle,
GHOST_TGrabCursorMode *r_mode,
GHOST_TAxisFlag *r_wrap_axis,
int r_bounds[4]);
/**
* Grabs the cursor for a modal operation, to keep receiving
* events when the mouse is outside the window. X11 only, others
@ -896,6 +901,16 @@ extern int setConsoleWindowState(GHOST_TConsoleWindowState action);
*/
extern int GHOST_UseNativePixels(void);
/**
* Warp the cursor, if supported.
*/
extern int GHOST_SupportsCursorWarp(void);
/**
* Assign the callback which generates a back-trace (may be NULL).
*/
extern void GHOST_SetBacktraceHandler(GHOST_TBacktraceFn backtrace_fn);
/**
* Focus window after opening, or put them in the background.
*/

View File

@ -133,6 +133,9 @@ class GHOST_ISystem {
*/
static GHOST_ISystem *getSystem();
static GHOST_TBacktraceFn getBacktraceFn();
static void setBacktraceFn(GHOST_TBacktraceFn backtrace_fn);
protected:
/**
* Constructor.
@ -304,6 +307,11 @@ class GHOST_ISystem {
*/
virtual bool useNativePixel(void) = 0;
/**
* Return true when warping the cursor is supported.
*/
virtual bool supportsCursorWarp() = 0;
/**
* Focus window after opening, or put them in the background.
*/
@ -477,6 +485,9 @@ class GHOST_ISystem {
/** The one and only system */
static GHOST_ISystem *m_system;
/** Function to call that sets the back-trace. */
static GHOST_TBacktraceFn m_backtrace_fn;
#ifdef WITH_CXX_GUARDEDALLOC
MEM_CXX_CLASS_ALLOC_FUNCS("GHOST:GHOST_ISystem")
#endif

View File

@ -254,6 +254,12 @@ class GHOST_IWindow {
*/
virtual GHOST_TSuccess setCursorShape(GHOST_TStandardCursor cursorShape) = 0;
virtual GHOST_TSuccess getCursorGrabBounds(GHOST_Rect &bounds) = 0;
virtual void getCursorGrabState(GHOST_TGrabCursorMode &mode,
GHOST_TAxisFlag &axis_flag,
GHOST_Rect &bounds) = 0;
/**
* Test if the standard cursor shape is supported by current platform.
* \return Indication of success.

View File

@ -54,6 +54,8 @@ GHOST_DECLARE_HANDLE(GHOST_EventConsumerHandle);
GHOST_DECLARE_HANDLE(GHOST_ContextHandle);
GHOST_DECLARE_HANDLE(GHOST_XrContextHandle);
typedef void (*GHOST_TBacktraceFn)(void *file_handle);
typedef struct {
int flags;
} GHOST_GLSettings;
@ -415,6 +417,8 @@ typedef enum {
GHOST_kGrabHide,
} GHOST_TGrabCursorMode;
#define GHOST_GRAB_NEEDS_SOFTWARE_CURSOR_FOR_WARP(grab) ((grab) == GHOST_kGrabWrap)
typedef enum {
/** Axis that cursor grab will wrap. */
GHOST_kGrabAxisNone = 0,
@ -533,7 +537,7 @@ typedef struct {
} GHOST_TEventNDOFMotionData;
typedef enum { GHOST_kPress, GHOST_kRelease } GHOST_TButtonAction;
/* Good for mouse or other buttons too, hmmm? */
/* Good for mouse or other buttons too? */
typedef struct {
GHOST_TButtonAction action;

View File

@ -376,6 +376,20 @@ GHOST_TSuccess GHOST_SetCursorGrab(GHOST_WindowHandle windowhandle,
mode, wrap_axis, bounds ? &bounds_rect : nullptr, mouse_ungrab_xy ? mouse_xy : nullptr);
}
void GHOST_GetCursorGrabState(GHOST_WindowHandle windowhandle,
GHOST_TGrabCursorMode *r_mode,
GHOST_TAxisFlag *r_axis_flag,
int r_bounds[4])
{
GHOST_IWindow *window = (GHOST_IWindow *)windowhandle;
GHOST_Rect bounds_rect;
window->getCursorGrabState(*r_mode, *r_axis_flag, bounds_rect);
r_bounds[0] = bounds_rect.m_l;
r_bounds[1] = bounds_rect.m_t;
r_bounds[2] = bounds_rect.m_r;
r_bounds[3] = bounds_rect.m_b;
}
GHOST_TSuccess GHOST_GetModifierKeyState(GHOST_SystemHandle systemhandle,
GHOST_TModifierKeyMask mask,
int *isDown)
@ -815,6 +829,17 @@ int GHOST_UseNativePixels(void)
return system->useNativePixel();
}
int GHOST_SupportsCursorWarp(void)
{
GHOST_ISystem *system = GHOST_ISystem::getSystem();
return system->supportsCursorWarp();
}
void GHOST_SetBacktraceHandler(GHOST_TBacktraceFn backtrace_fn)
{
GHOST_ISystem::setBacktraceFn(backtrace_fn);
}
void GHOST_UseWindowFocus(int use_focus)
{
GHOST_ISystem *system = GHOST_ISystem::getSystem();

View File

@ -31,6 +31,8 @@
GHOST_ISystem *GHOST_ISystem::m_system = nullptr;
GHOST_TBacktraceFn GHOST_ISystem::m_backtrace_fn = nullptr;
GHOST_TSuccess GHOST_ISystem::createSystem()
{
GHOST_TSuccess success;
@ -89,3 +91,13 @@ GHOST_ISystem *GHOST_ISystem::getSystem()
{
return m_system;
}
GHOST_TBacktraceFn GHOST_ISystem::getBacktraceFn()
{
return GHOST_ISystem::m_backtrace_fn;
}
void GHOST_ISystem::setBacktraceFn(GHOST_TBacktraceFn backtrace_fn)
{
GHOST_ISystem::m_backtrace_fn = backtrace_fn;
}

View File

@ -390,6 +390,11 @@ void GHOST_System::useWindowFocus(const bool use_focus)
m_windowFocus = use_focus;
}
bool GHOST_System::supportsCursorWarp()
{
return true;
}
void GHOST_System::initDebug(GHOST_Debug debug)
{
m_is_debug_enabled = debug.flags & GHOST_kDebugDefault;

View File

@ -151,10 +151,13 @@ class GHOST_System : public GHOST_ISystem {
bool useNativePixel(void);
bool m_nativePixel;
bool supportsCursorWarp(void);
/**
* Focus window after opening, or put them in the background.
*/
void useWindowFocus(const bool use_focus);
bool m_windowFocus;
/**

View File

@ -37,6 +37,13 @@
#include <unistd.h>
#include <cstring>
#include <mutex>
static GHOST_IWindow *get_window(struct wl_surface *surface);
/* -------------------------------------------------------------------- */
/** \name Private Types & Defines
* \{ */
/**
* Selected input event code defines from `linux/input-event-codes.h`
@ -67,7 +74,7 @@ struct cursor_t {
struct wl_cursor_theme *theme = nullptr;
int size;
std::string theme_name;
// outputs on which the cursor is visible
/** Outputs on which the cursor is visible. */
std::unordered_set<const output_t *> outputs;
int scale = 1;
};
@ -85,8 +92,6 @@ struct data_offer_t {
struct data_source_t {
struct wl_data_source *data_source;
/** Last device that was active. */
uint32_t source_serial;
char *buffer_out;
};
@ -112,26 +117,37 @@ struct input_t {
struct zwp_relative_pointer_v1 *relative_pointer;
struct zwp_locked_pointer_v1 *locked_pointer;
struct zwp_confined_pointer_v1 *confined_pointer;
struct xkb_context *xkb_context;
struct xkb_state *xkb_state;
struct {
/* Key repetition in character per second. */
/** Key repetition in character per second. */
int32_t rate;
/* Time (milliseconds) after which to start repeating keys. */
/** Time (milliseconds) after which to start repeating keys. */
int32_t delay;
/* Timer for key repeats. */
/** Timer for key repeats. */
GHOST_ITimerTask *timer = nullptr;
} key_repeat;
struct wl_surface *focus_pointer = nullptr;
struct wl_surface *focus_keyboard = nullptr;
struct wl_surface *focus_dnd = nullptr;
struct wl_data_device *data_device = nullptr;
struct data_offer_t *data_offer_dnd; /* Drag & Drop. */
struct data_offer_t *data_offer_copy_paste; /* Copy & Paste. */
/** Drag & Drop. */
struct data_offer_t *data_offer_dnd;
std::mutex data_offer_dnd_mutex;
/** Copy & Paste. */
struct data_offer_t *data_offer_copy_paste;
std::mutex data_offer_copy_paste_mutex;
struct data_source_t *data_source;
std::mutex data_source_mutex;
/** Last device that was active. */
uint32_t data_source_serial;
};
struct display_t {
@ -156,8 +172,34 @@ struct display_t {
std::vector<struct wl_egl_window *> os_egl_windows;
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Private Utility Functions
* \{ */
static GHOST_WindowManager *window_manager = nullptr;
/** Check this lock before accessing `GHOST_SystemWayland::selection` from a thread. */
static std::mutex system_selection_mutex;
/**
* Callback for WAYLAND to run when there is an error.
*
* \note It's useful to set a break-point on this function as some errors are fatal
* (for all intents and purposes) but don't crash the process.
*/
static void ghost_wayland_log_handler(const char *msg, va_list arg)
{
fprintf(stderr, "GHOST/Wayland: ");
vfprintf(stderr, msg, arg); /* Includes newline. */
GHOST_TBacktraceFn backtrace_fn = GHOST_ISystem::getBacktraceFn();
if (backtrace_fn) {
backtrace_fn(stderr); /* Includes newline. */
}
}
static void display_destroy(display_t *d)
{
if (d->data_device_manager) {
@ -429,8 +471,10 @@ static const std::vector<std::string> mime_send = {
"text/plain",
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Interface Callbacks
/** \name Listener (Relative Motion), #zwp_relative_pointer_v1_listener
*
* These callbacks are registered for Wayland interfaces and called when
* an event is received from the compositor.
@ -466,11 +510,18 @@ static const zwp_relative_pointer_v1_listener relative_pointer_listener = {
relative_pointer_relative_motion,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Listener (Data Source), #wl_data_source_listener
* \{ */
static void dnd_events(const input_t *const input, const GHOST_TEventType event)
{
/* NOTE: `input->data_offer_dnd_mutex` must already be locked. */
const uint64_t time = input->system->getMilliSeconds();
GHOST_IWindow *const window = static_cast<GHOST_WindowWayland *>(
wl_surface_get_user_data(input->focus_pointer));
wl_surface_get_user_data(input->focus_dnd));
for (const std::string &type : mime_preference_order) {
input->system->pushEvent(new GHOST_EventDragnDrop(time,
event,
@ -482,7 +533,9 @@ static void dnd_events(const input_t *const input, const GHOST_TEventType event)
}
}
static std::string read_pipe(data_offer_t *data_offer, const std::string mime_receive)
static std::string read_pipe(data_offer_t *data_offer,
const std::string mime_receive,
std::mutex *mutex)
{
int pipefd[2];
if (pipe(pipefd) != 0) {
@ -491,6 +544,13 @@ static std::string read_pipe(data_offer_t *data_offer, const std::string mime_re
wl_data_offer_receive(data_offer->id, mime_receive.c_str(), pipefd[1]);
close(pipefd[1]);
data_offer->in_use.store(false);
if (mutex) {
mutex->unlock();
}
/* WARNING: `data_offer` may be freed from now on. */
std::string data;
ssize_t len;
char buffer[4096];
@ -498,7 +558,6 @@ static std::string read_pipe(data_offer_t *data_offer, const std::string mime_re
data.insert(data.end(), buffer, buffer + len);
}
close(pipefd[0]);
data_offer->in_use.store(false);
return data;
}
@ -521,7 +580,10 @@ static void data_source_send(void *data,
const char * /*mime_type*/,
int32_t fd)
{
const char *const buffer = static_cast<char *>(data);
input_t *input = static_cast<input_t *>(data);
std::lock_guard lock{input->data_source_mutex};
const char *const buffer = input->data_source->buffer_out;
if (write(fd, buffer, strlen(buffer)) < 0) {
GHOST_PRINT("error writing to clipboard: " << std::strerror(errno) << std::endl);
}
@ -581,6 +643,12 @@ static const struct wl_data_source_listener data_source_listener = {
data_source_action,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Listener (Data Offer), #wl_data_offer_listener
* \{ */
static void data_offer_offer(void *data,
struct wl_data_offer * /*wl_data_offer*/,
const char *mime_type)
@ -608,6 +676,12 @@ static const struct wl_data_offer_listener data_offer_listener = {
data_offer_action,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Listener (Data Device), #wl_data_device_listener
* \{ */
static void data_device_data_offer(void * /*data*/,
struct wl_data_device * /*wl_data_device*/,
struct wl_data_offer *id)
@ -620,12 +694,14 @@ static void data_device_data_offer(void * /*data*/,
static void data_device_enter(void *data,
struct wl_data_device * /*wl_data_device*/,
uint32_t serial,
struct wl_surface * /*surface*/,
struct wl_surface *surface,
wl_fixed_t x,
wl_fixed_t y,
struct wl_data_offer *id)
{
input_t *input = static_cast<input_t *>(data);
std::lock_guard lock{input->data_offer_dnd_mutex};
input->data_offer_dnd = static_cast<data_offer_t *>(wl_data_offer_get_user_data(id));
data_offer_t *data_offer = input->data_offer_dnd;
@ -642,14 +718,17 @@ static void data_device_enter(void *data,
wl_data_offer_accept(id, serial, type.c_str());
}
input->focus_dnd = surface;
dnd_events(input, GHOST_kEventDraggingEntered);
}
static void data_device_leave(void *data, struct wl_data_device * /*wl_data_device*/)
{
input_t *input = static_cast<input_t *>(data);
std::lock_guard lock{input->data_offer_dnd_mutex};
dnd_events(input, GHOST_kEventDraggingExited);
input->focus_dnd = nullptr;
if (input->data_offer_dnd && !input->data_offer_dnd->in_use.load()) {
wl_data_offer_destroy(input->data_offer_dnd->id);
@ -665,6 +744,8 @@ static void data_device_motion(void *data,
wl_fixed_t y)
{
input_t *input = static_cast<input_t *>(data);
std::lock_guard lock{input->data_offer_dnd_mutex};
input->data_offer_dnd->dnd.x = wl_fixed_to_int(x);
input->data_offer_dnd->dnd.y = wl_fixed_to_int(y);
dnd_events(input, GHOST_kEventDraggingUpdated);
@ -673,6 +754,8 @@ static void data_device_motion(void *data,
static void data_device_drop(void *data, struct wl_data_device * /*wl_data_device*/)
{
input_t *input = static_cast<input_t *>(data);
std::lock_guard lock{input->data_offer_dnd_mutex};
data_offer_t *data_offer = input->data_offer_dnd;
const std::string mime_receive = *std::find_first_of(mime_preference_order.begin(),
@ -682,11 +765,12 @@ static void data_device_drop(void *data, struct wl_data_device * /*wl_data_devic
auto read_uris = [](input_t *const input,
data_offer_t *data_offer,
wl_surface *surface,
const std::string mime_receive) {
const int x = data_offer->dnd.x;
const int y = data_offer->dnd.y;
const std::string data = read_pipe(data_offer, mime_receive);
const std::string data = read_pipe(data_offer, mime_receive, nullptr);
wl_data_offer_finish(data_offer->id);
wl_data_offer_destroy(data_offer->id);
@ -700,6 +784,9 @@ static void data_device_drop(void *data, struct wl_data_device * /*wl_data_devic
static constexpr const char *file_proto = "file://";
static constexpr const char *crlf = "\r\n";
GHOST_WindowWayland *win = static_cast<GHOST_WindowWayland *>(get_window(surface));
GHOST_ASSERT(win != nullptr, "Unable to find window for drop event from surface");
std::vector<std::string> uris;
size_t pos = 0;
@ -723,8 +810,6 @@ static void data_device_drop(void *data, struct wl_data_device * /*wl_data_devic
flist->strings[i] = static_cast<uint8_t *>(malloc((uris[i].size() + 1) * sizeof(uint8_t)));
memcpy(flist->strings[i], uris[i].data(), uris[i].size() + 1);
}
GHOST_IWindow *win = static_cast<GHOST_WindowWayland *>(
wl_surface_get_user_data(input->focus_pointer));
system->pushEvent(new GHOST_EventDragnDrop(system->getMilliSeconds(),
GHOST_kEventDraggingDropDone,
GHOST_kDragnDropTypeFilenames,
@ -740,7 +825,9 @@ static void data_device_drop(void *data, struct wl_data_device * /*wl_data_devic
wl_display_roundtrip(system->display());
};
std::thread read_thread(read_uris, input, data_offer, mime_receive);
/* Pass in `input->focus_dnd` instead of accessing it from `input` since the leave callback
* (#data_device_leave) will clear the value once this function starts. */
std::thread read_thread(read_uris, input, data_offer, input->focus_dnd, mime_receive);
read_thread.detach();
}
@ -749,6 +836,9 @@ static void data_device_selection(void *data,
struct wl_data_offer *id)
{
input_t *input = static_cast<input_t *>(data);
std::lock_guard lock{input->data_offer_copy_paste_mutex};
data_offer_t *data_offer = input->data_offer_copy_paste;
/* Delete old data offer. */
@ -766,22 +856,28 @@ static void data_device_selection(void *data,
data_offer = static_cast<data_offer_t *>(wl_data_offer_get_user_data(id));
input->data_offer_copy_paste = data_offer;
std::string mime_receive;
for (const std::string type : {mime_text_utf8, mime_text_plain}) {
if (data_offer->types.count(type)) {
mime_receive = type;
break;
}
}
auto read_selection = [](input_t *input) {
GHOST_SystemWayland *const system = input->system;
input->data_offer_copy_paste_mutex.lock();
auto read_selection = [](GHOST_SystemWayland *const system,
data_offer_t *data_offer,
const std::string mime_receive) {
const std::string data = read_pipe(data_offer, mime_receive);
system->setSelection(data);
data_offer_t *data_offer = input->data_offer_copy_paste;
std::string mime_receive;
for (const std::string type : {mime_text_utf8, mime_text_plain}) {
if (data_offer->types.count(type)) {
mime_receive = type;
break;
}
}
const std::string data = read_pipe(
data_offer, mime_receive, &input->data_offer_copy_paste_mutex);
{
std::lock_guard lock{system_selection_mutex};
system->setSelection(data);
}
};
std::thread read_thread(read_selection, input->system, data_offer, mime_receive);
std::thread read_thread(read_selection, input);
read_thread.detach();
}
@ -794,6 +890,12 @@ static const struct wl_data_device_listener data_device_listener = {
data_device_selection,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Listener (Surface), #wl_surface_listener
* \{ */
static void cursor_buffer_release(void *data, struct wl_buffer *wl_buffer)
{
cursor_t *cursor = static_cast<cursor_t *>(data);
@ -874,6 +976,12 @@ struct wl_surface_listener cursor_surface_listener = {
cursor_surface_leave,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Listener (Pointer), #wl_pointer_listener
* \{ */
static void pointer_enter(void *data,
struct wl_pointer * /*wl_pointer*/,
uint32_t serial,
@ -995,7 +1103,7 @@ static void pointer_button(void *data,
break;
}
input->data_source->source_serial = serial;
input->data_source_serial = serial;
input->buttons.set(ebutton, state == WL_POINTER_BUTTON_STATE_PRESSED);
input->system->pushEvent(new GHOST_EventButton(
input->system->getMilliSeconds(), etype, win, ebutton, GHOST_TABLET_DATA_NONE));
@ -1031,6 +1139,12 @@ static const struct wl_pointer_listener pointer_listener = {
pointer_axis,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Listener (Keyboard), #wl_keyboard_listener
* \{ */
static void keyboard_keymap(
void *data, struct wl_keyboard * /*wl_keyboard*/, uint32_t format, int32_t fd, uint32_t size)
{
@ -1164,7 +1278,7 @@ static void keyboard_key(void *data,
key_data.utf8_buf[0] = '\0';
}
input->data_source->source_serial = serial;
input->data_source_serial = serial;
GHOST_IWindow *win = static_cast<GHOST_WindowWayland *>(
wl_surface_get_user_data(input->focus_keyboard));
@ -1236,6 +1350,12 @@ static const struct wl_keyboard_listener keyboard_listener = {
keyboard_repeat_info,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Listener (Seat), #wl_seat_listener
* \{ */
static void seat_capabilities(void *data, struct wl_seat *wl_seat, uint32_t capabilities)
{
input_t *input = static_cast<input_t *>(data);
@ -1272,6 +1392,12 @@ static const struct wl_seat_listener seat_listener = {
seat_name,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Listener (Output), #wl_output_listener
* \{ */
static void output_geometry(void *data,
struct wl_output * /*wl_output*/,
int32_t /*x*/,
@ -1327,6 +1453,12 @@ static const struct wl_output_listener output_listener = {
output_scale,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Listener (XDG WM Base), #xdg_wm_base_listener
* \{ */
static void shell_ping(void * /*data*/, struct xdg_wm_base *xdg_wm_base, uint32_t serial)
{
xdg_wm_base_pong(xdg_wm_base, serial);
@ -1336,6 +1468,12 @@ static const struct xdg_wm_base_listener shell_listener = {
shell_ping,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Listener (Registry), #wl_registry_listener
* \{ */
static void global_add(void *data,
struct wl_registry *wl_registry,
uint32_t name,
@ -1376,6 +1514,7 @@ static void global_add(void *data,
input->data_source->buffer_out = nullptr;
input->relative_pointer = nullptr;
input->locked_pointer = nullptr;
input->confined_pointer = nullptr;
input->seat = static_cast<wl_seat *>(
wl_registry_bind(wl_registry, name, &wl_seat_interface, 4));
display->inputs.push_back(input);
@ -1387,7 +1526,7 @@ static void global_add(void *data,
}
else if (!strcmp(interface, wl_data_device_manager_interface.name)) {
display->data_device_manager = static_cast<wl_data_device_manager *>(
wl_registry_bind(wl_registry, name, &wl_data_device_manager_interface, 1));
wl_registry_bind(wl_registry, name, &wl_data_device_manager_interface, 3));
}
else if (!strcmp(interface, zwp_relative_pointer_manager_v1_interface.name)) {
display->relative_pointer_manager = static_cast<zwp_relative_pointer_manager_v1 *>(
@ -1427,6 +1566,8 @@ static const struct wl_registry_listener registry_listener = {
GHOST_SystemWayland::GHOST_SystemWayland() : GHOST_System(), d(new display_t)
{
wl_log_set_handler_client(ghost_wayland_log_handler);
d->system = this;
/* Connect to the Wayland server. */
d->display = wl_display_connect(nullptr);
@ -1485,42 +1626,47 @@ int GHOST_SystemWayland::setConsoleWindowState(GHOST_TConsoleWindowState /*actio
GHOST_TSuccess GHOST_SystemWayland::getModifierKeys(GHOST_ModifierKeys &keys) const
{
if (!d->inputs.empty()) {
static const xkb_state_component mods_all = xkb_state_component(
XKB_STATE_MODS_DEPRESSED | XKB_STATE_MODS_LATCHED | XKB_STATE_MODS_LOCKED |
XKB_STATE_MODS_EFFECTIVE);
keys.set(GHOST_kModifierKeyLeftShift,
xkb_state_mod_name_is_active(d->inputs[0]->xkb_state, XKB_MOD_NAME_SHIFT, mods_all) ==
1);
keys.set(GHOST_kModifierKeyRightShift,
xkb_state_mod_name_is_active(d->inputs[0]->xkb_state, XKB_MOD_NAME_SHIFT, mods_all) ==
1);
keys.set(GHOST_kModifierKeyLeftAlt,
xkb_state_mod_name_is_active(d->inputs[0]->xkb_state, "LAlt", mods_all) == 1);
keys.set(GHOST_kModifierKeyRightAlt,
xkb_state_mod_name_is_active(d->inputs[0]->xkb_state, "RAlt", mods_all) == 1);
keys.set(GHOST_kModifierKeyLeftControl,
xkb_state_mod_name_is_active(d->inputs[0]->xkb_state, "LControl", mods_all) == 1);
keys.set(GHOST_kModifierKeyRightControl,
xkb_state_mod_name_is_active(d->inputs[0]->xkb_state, "RControl", mods_all) == 1);
keys.set(GHOST_kModifierKeyOS,
xkb_state_mod_name_is_active(d->inputs[0]->xkb_state, "Super", mods_all) == 1);
keys.set(GHOST_kModifierKeyNumMasks,
xkb_state_mod_name_is_active(d->inputs[0]->xkb_state, "NumLock", mods_all) == 1);
return GHOST_kSuccess;
if (d->inputs.empty()) {
return GHOST_kFailure;
}
return GHOST_kFailure;
static const xkb_state_component mods_all = xkb_state_component(
XKB_STATE_MODS_DEPRESSED | XKB_STATE_MODS_LATCHED | XKB_STATE_MODS_LOCKED |
XKB_STATE_MODS_EFFECTIVE);
bool val;
/* NOTE: XKB doesn't seem to differentiate between left/right modifiers. */
val = xkb_state_mod_name_is_active(d->inputs[0]->xkb_state, XKB_MOD_NAME_SHIFT, mods_all) == 1;
keys.set(GHOST_kModifierKeyLeftShift, val);
keys.set(GHOST_kModifierKeyRightShift, val);
val = xkb_state_mod_name_is_active(d->inputs[0]->xkb_state, XKB_MOD_NAME_ALT, mods_all) == 1;
keys.set(GHOST_kModifierKeyLeftAlt, val);
keys.set(GHOST_kModifierKeyRightAlt, val);
val = xkb_state_mod_name_is_active(d->inputs[0]->xkb_state, XKB_MOD_NAME_CTRL, mods_all) == 1;
keys.set(GHOST_kModifierKeyLeftControl, val);
keys.set(GHOST_kModifierKeyRightControl, val);
val = xkb_state_mod_name_is_active(d->inputs[0]->xkb_state, XKB_MOD_NAME_LOGO, mods_all) == 1;
keys.set(GHOST_kModifierKeyOS, val);
val = xkb_state_mod_name_is_active(d->inputs[0]->xkb_state, XKB_MOD_NAME_NUM, mods_all) == 1;
keys.set(GHOST_kModifierKeyNumMasks, val);
return GHOST_kSuccess;
}
GHOST_TSuccess GHOST_SystemWayland::getButtons(GHOST_Buttons &buttons) const
{
if (!d->inputs.empty()) {
buttons = d->inputs[0]->buttons;
return GHOST_kSuccess;
if (d->inputs.empty()) {
return GHOST_kFailure;
}
return GHOST_kFailure;
buttons = d->inputs[0]->buttons;
return GHOST_kSuccess;
}
char *GHOST_SystemWayland::getClipboard(bool /*selection*/) const
@ -1536,25 +1682,29 @@ void GHOST_SystemWayland::putClipboard(const char *buffer, bool /*selection*/) c
return;
}
data_source_t *data_source = d->inputs[0]->data_source;
input_t *input = d->inputs[0];
std::lock_guard lock{input->data_source_mutex};
data_source_t *data_source = input->data_source;
/* Copy buffer. */
free(data_source->buffer_out);
const size_t buffer_size = strlen(buffer) + 1;
data_source->buffer_out = static_cast<char *>(malloc(buffer_size));
std::memcpy(data_source->buffer_out, buffer, buffer_size);
data_source->data_source = wl_data_device_manager_create_data_source(d->data_device_manager);
wl_data_source_add_listener(
data_source->data_source, &data_source_listener, data_source->buffer_out);
wl_data_source_add_listener(data_source->data_source, &data_source_listener, input);
for (const std::string &type : mime_send) {
wl_data_source_offer(data_source->data_source, type.c_str());
}
if (!d->inputs.empty() && d->inputs[0]->data_device) {
if (input->data_device) {
wl_data_device_set_selection(
d->inputs[0]->data_device, data_source->data_source, data_source->source_serial);
input->data_device, data_source->data_source, input->data_source_serial);
}
}
@ -1736,14 +1886,20 @@ static void set_cursor_buffer(input_t *input, wl_buffer *buffer)
c->visible = (buffer != nullptr);
wl_surface_attach(c->surface, buffer, 0, 0);
const int32_t image_size_x = int32_t(c->image.width);
const int32_t image_size_y = int32_t(c->image.height);
const int32_t hotspot_x = int32_t(c->image.hotspot_x) / c->scale;
const int32_t hotspot_y = int32_t(c->image.hotspot_y) / c->scale;
wl_surface_attach(c->surface, buffer, 0, 0);
wl_surface_damage(c->surface, 0, 0, image_size_x, image_size_y);
wl_surface_damage(c->surface, 0, 0, int32_t(c->image.width), int32_t(c->image.height));
wl_pointer_set_cursor(input->pointer,
input->pointer_serial,
c->visible ? c->surface : nullptr,
int32_t(c->image.hotspot_x) / c->scale,
int32_t(c->image.hotspot_y) / c->scale);
hotspot_x,
hotspot_y);
wl_surface_commit(c->surface);
}
@ -1918,6 +2074,11 @@ GHOST_TSuccess GHOST_SystemWayland::setCursorVisibility(bool visible)
return GHOST_kSuccess;
}
bool GHOST_SystemWayland::supportsCursorWarp()
{
return false;
}
GHOST_TSuccess GHOST_SystemWayland::setCursorGrab(const GHOST_TGrabCursorMode mode,
const GHOST_TGrabCursorMode mode_current,
@ -1939,42 +2100,111 @@ GHOST_TSuccess GHOST_SystemWayland::setCursorGrab(const GHOST_TGrabCursorMode mo
input_t *input = d->inputs[0];
if (mode_current == GHOST_kGrabHide) {
#define MODE_NEEDS_LOCK(m) ((m) == GHOST_kGrabWrap || (m) == GHOST_kGrabHide)
#define MODE_NEEDS_HIDE(m) ((m) == GHOST_kGrabHide)
#define MODE_NEEDS_CONFINE(m) ((m) == GHOST_kGrabNormal)
const bool was_lock = MODE_NEEDS_LOCK(mode_current);
const bool use_lock = MODE_NEEDS_LOCK(mode);
/* Check for wrap as #supportsCursorWarp isn't supported. */
const bool was_hide = MODE_NEEDS_HIDE(mode_current) || (mode_current == GHOST_kGrabWrap);
const bool use_hide = MODE_NEEDS_HIDE(mode) || (mode == GHOST_kGrabWrap);
const bool was_confine = MODE_NEEDS_CONFINE(mode_current);
const bool use_confine = MODE_NEEDS_CONFINE(mode);
#undef MODE_NEEDS_LOCK
#undef MODE_NEEDS_HIDE
#undef MODE_NEEDS_CONFINE
if (!use_hide) {
setCursorVisibility(true);
}
if ((mode == GHOST_kGrabDisable) ||
/* Switching from one grab mode to another,
* in this case disable the current locks as it makes logic confusing,
* postpone changing the cursor to avoid flickering. */
(mode_current != GHOST_kGrabDisable)) {
/* Switching from one grab mode to another,
* in this case disable the current locks as it makes logic confusing,
* postpone changing the cursor to avoid flickering. */
if (!use_lock) {
if (input->relative_pointer) {
zwp_relative_pointer_v1_destroy(input->relative_pointer);
input->relative_pointer = nullptr;
}
if (input->locked_pointer) {
/* Request location to restore to. */
if (mode_current == GHOST_kGrabWrap) {
GHOST_WindowWayland *win = static_cast<GHOST_WindowWayland *>(get_window(surface));
GHOST_Rect bounds;
int x_new = input->x, y_new = input->y;
/* Fallback to window bounds. */
if (win->getCursorGrabBounds(bounds) == GHOST_kFailure) {
((GHOST_Window *)win)->getClientBounds(bounds);
}
bounds.wrapPoint(x_new, y_new, 0, win->getCursorGrabAxis());
/* Push an event so the new location is registered. */
if ((x_new != input->x) || (y_new != input->y)) {
input->system->pushEvent(new GHOST_EventCursor(input->system->getMilliSeconds(),
GHOST_kEventCursorMove,
win,
x_new,
y_new,
GHOST_TABLET_DATA_NONE));
}
input->x = x_new;
input->y = y_new;
const int scale = win->scale();
zwp_locked_pointer_v1_set_cursor_position_hint(input->locked_pointer,
wl_fixed_from_int(x_new) / scale,
wl_fixed_from_int(y_new) / scale);
wl_surface_commit(surface);
}
zwp_locked_pointer_v1_destroy(input->locked_pointer);
input->locked_pointer = nullptr;
}
}
if (mode != GHOST_kGrabDisable) {
/* TODO(@campbellbarton): As WAYLAND does not support warping the pointer it may not be
* possible to support #GHOST_kGrabWrap by pragmatically settings it's coordinates.
* An alternative could be to draw the cursor in software (and hide the real cursor),
* or just accept a locked cursor on WAYLAND. */
input->relative_pointer = zwp_relative_pointer_manager_v1_get_relative_pointer(
d->relative_pointer_manager, input->pointer);
zwp_relative_pointer_v1_add_listener(
input->relative_pointer, &relative_pointer_listener, input);
input->locked_pointer = zwp_pointer_constraints_v1_lock_pointer(
d->pointer_constraints,
surface,
input->pointer,
nullptr,
ZWP_POINTER_CONSTRAINTS_V1_LIFETIME_PERSISTENT);
if (!use_confine) {
if (input->confined_pointer) {
zwp_confined_pointer_v1_destroy(input->confined_pointer);
input->confined_pointer = nullptr;
}
}
if (mode == GHOST_kGrabHide) {
if (mode != GHOST_kGrabDisable) {
if (use_lock) {
if (!was_lock) {
/* TODO(@campbellbarton): As WAYLAND does not support warping the pointer it may not be
* possible to support #GHOST_kGrabWrap by pragmatically settings it's coordinates.
* An alternative could be to draw the cursor in software (and hide the real cursor),
* or just accept a locked cursor on WAYLAND. */
input->relative_pointer = zwp_relative_pointer_manager_v1_get_relative_pointer(
d->relative_pointer_manager, input->pointer);
zwp_relative_pointer_v1_add_listener(
input->relative_pointer, &relative_pointer_listener, input);
input->locked_pointer = zwp_pointer_constraints_v1_lock_pointer(
d->pointer_constraints,
surface,
input->pointer,
nullptr,
ZWP_POINTER_CONSTRAINTS_V1_LIFETIME_PERSISTENT);
}
}
else if (use_confine) {
if (!was_confine) {
input->confined_pointer = zwp_pointer_constraints_v1_confine_pointer(
d->pointer_constraints,
surface,
input->pointer,
nullptr,
ZWP_POINTER_CONSTRAINTS_V1_LIFETIME_PERSISTENT);
}
}
if (use_hide && !was_hide) {
setCursorVisibility(false);
}
}

View File

@ -103,6 +103,8 @@ class GHOST_SystemWayland : public GHOST_System {
GHOST_TSuccess setCursorVisibility(bool visible);
bool supportsCursorWarp();
GHOST_TSuccess setCursorGrab(const GHOST_TGrabCursorMode mode,
const GHOST_TGrabCursorMode mode_current,
wl_surface *surface);

View File

@ -155,10 +155,31 @@ GHOST_TSuccess GHOST_Window::setCursorGrab(GHOST_TGrabCursorMode mode,
GHOST_TSuccess GHOST_Window::getCursorGrabBounds(GHOST_Rect &bounds)
{
if (m_cursorGrab != GHOST_kGrabWrap) {
return GHOST_kFailure;
}
bounds = m_cursorGrabBounds;
return (bounds.m_l == -1 && bounds.m_r == -1) ? GHOST_kFailure : GHOST_kSuccess;
}
void GHOST_Window::getCursorGrabState(GHOST_TGrabCursorMode &mode,
GHOST_TAxisFlag &wrap_axis,
GHOST_Rect &bounds)
{
mode = m_cursorGrab;
if (m_cursorGrab == GHOST_kGrabWrap) {
bounds = m_cursorGrabBounds;
wrap_axis = m_cursorGrabAxis;
}
else {
bounds.m_l = -1;
bounds.m_r = -1;
bounds.m_t = -1;
bounds.m_b = -1;
wrap_axis = GHOST_kGrabAxisNone;
}
}
GHOST_TSuccess GHOST_Window::setCursorShape(GHOST_TStandardCursor cursorShape)
{
if (setWindowCursorShape(cursorShape)) {

View File

@ -152,6 +152,10 @@ class GHOST_Window : public GHOST_IWindow {
*/
GHOST_TSuccess getCursorGrabBounds(GHOST_Rect &bounds);
void getCursorGrabState(GHOST_TGrabCursorMode &mode,
GHOST_TAxisFlag &axis_flag,
GHOST_Rect &bounds);
/**
* Sets the progress bar value displayed in the window/application icon
* \param progress: The progress percentage (0.0 to 1.0).

View File

@ -15,28 +15,56 @@
#include <wayland-egl.h>
#include <algorithm> /* For `std::find`. */
static constexpr size_t base_dpi = 96;
struct window_t {
GHOST_WindowWayland *w;
wl_surface *surface;
/* Outputs on which the window is currently shown on. */
std::unordered_set<const output_t *> outputs;
uint16_t dpi = 0;
int scale = 1;
/**
* Outputs on which the window is currently shown on.
*
* This is an ordered set (whoever adds to this is responsible for keeping members unique).
* In practice this is rarely manipulated and is limited by the number of physical displays.
*/
std::vector<const output_t *> outputs;
/** The scale value written to #wl_surface_set_buffer_scale. */
int scale;
/** The DPI (currently always `scale * base_dpi`). */
uint16_t dpi;
struct xdg_surface *xdg_surface;
struct xdg_toplevel *xdg_toplevel;
struct zxdg_toplevel_decoration_v1 *xdg_toplevel_decoration = nullptr;
enum zxdg_toplevel_decoration_v1_mode decoration_mode;
wl_egl_window *egl_window;
int32_t pending_width, pending_height;
bool is_maximised;
bool is_fullscreen;
bool is_active;
bool is_dialog;
int32_t width, height;
int32_t size[2];
int32_t size_pending[2];
};
/* -------------------------------------------------------------------- */
/** \name Internal Utilities
* \{ */
static int outputs_max_scale_or_default(const std::vector<output_t *> &outputs,
const int scale_default)
{
int scale_max = 0;
for (const output_t *reg_output : outputs) {
scale_max = std::max(scale_max, reg_output->scale);
}
return scale_max ? scale_max : scale_default;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Wayland Interface Callbacks
*
@ -48,8 +76,8 @@ static void toplevel_configure(
void *data, xdg_toplevel * /*xdg_toplevel*/, int32_t width, int32_t height, wl_array *states)
{
window_t *win = static_cast<window_t *>(data);
win->pending_width = width;
win->pending_height = height;
win->size_pending[0] = win->scale * width;
win->size_pending[1] = win->scale * height;
win->is_maximised = false;
win->is_fullscreen = false;
@ -107,12 +135,12 @@ static void surface_configure(void *data, xdg_surface *xdg_surface, uint32_t ser
return;
}
if (win->pending_width != 0 && win->pending_height != 0) {
win->width = win->scale * win->pending_width;
win->height = win->scale * win->pending_height;
wl_egl_window_resize(win->egl_window, win->width, win->height, 0, 0);
win->pending_width = 0;
win->pending_height = 0;
if (win->size_pending[0] != 0 && win->size_pending[1] != 0) {
win->size[0] = win->size_pending[0];
win->size[1] = win->size_pending[1];
wl_egl_window_resize(win->egl_window, win->size[0], win->size[1], 0, 0);
win->size_pending[0] = 0;
win->size_pending[1] = 0;
win->w->notify_size();
}
@ -130,46 +158,38 @@ static const xdg_surface_listener surface_listener = {
surface_configure,
};
static bool update_scale(GHOST_WindowWayland *window)
{
int scale = 0;
for (const output_t *output : window->outputs_active()) {
if (output->scale > scale) {
scale = output->scale;
}
}
if (scale > 0 && window->scale() != scale) {
window->scale() = scale;
/* Using the real DPI will cause wrong scaling of the UI
* use a multiplier for the default DPI as workaround. */
window->dpi() = scale * base_dpi;
wl_surface_set_buffer_scale(window->surface(), scale);
return true;
}
return false;
}
static void surface_enter(void *data, struct wl_surface * /*wl_surface*/, struct wl_output *output)
{
GHOST_WindowWayland *w = static_cast<GHOST_WindowWayland *>(data);
for (const output_t *reg_output : w->outputs()) {
if (reg_output->output == output) {
w->outputs_active().insert(reg_output);
}
output_t *reg_output = w->output_find_by_wl(output);
if (reg_output == nullptr) {
return;
}
update_scale(w);
std::vector<const output_t *> &outputs = w->outputs();
auto it = std::find(outputs.begin(), outputs.end(), reg_output);
if (it != outputs.end()) {
return;
}
outputs.push_back(reg_output);
w->outputs_changed_update_scale();
}
static void surface_leave(void *data, struct wl_surface * /*wl_surface*/, struct wl_output *output)
{
GHOST_WindowWayland *w = static_cast<GHOST_WindowWayland *>(data);
for (const output_t *reg_output : w->outputs()) {
if (reg_output->output == output) {
w->outputs_active().erase(reg_output);
}
output_t *reg_output = w->output_find_by_wl(output);
if (reg_output == nullptr) {
return;
}
update_scale(w);
std::vector<const output_t *> &outputs = w->outputs();
auto it = std::find(outputs.begin(), outputs.end(), reg_output);
if (it == outputs.end()) {
return;
}
outputs.erase(it);
w->outputs_changed_update_scale();
}
struct wl_surface_listener wl_surface_listener = {
@ -208,20 +228,40 @@ GHOST_WindowWayland::GHOST_WindowWayland(GHOST_SystemWayland *system,
{
w->w = this;
w->width = int32_t(width);
w->height = int32_t(height);
w->size[0] = int32_t(width);
w->size[1] = int32_t(height);
w->is_dialog = is_dialog;
/* NOTE(@campbellbarton): The scale set here to avoid flickering on startup.
* When all monitors use the same scale (which is quite common) there aren't any problems.
*
* When monitors have different scales there may still be a visible window resize on startup.
* Ideally it would be possible to know the scale this window will use however that's only
* known once #surface_enter callback runs (which isn't guaranteed to run at all).
*
* Using the maximum scale is best as it results in the window first being smaller,
* avoiding a large window flashing before it's made smaller. */
w->scale = outputs_max_scale_or_default(this->m_system->outputs(), 1);
w->dpi = w->scale * base_dpi;
/* Window surfaces. */
w->surface = wl_compositor_create_surface(m_system->compositor());
wl_surface_set_buffer_scale(this->surface(), w->scale);
wl_surface_add_listener(w->surface, &wl_surface_listener, this);
w->egl_window = wl_egl_window_create(w->surface, int(width), int(height));
w->egl_window = wl_egl_window_create(w->surface, int(w->size[0]), int(w->size[1]));
w->xdg_surface = xdg_wm_base_get_xdg_surface(m_system->shell(), w->surface);
w->xdg_toplevel = xdg_surface_get_toplevel(w->xdg_surface);
/* NOTE: The limit is in points (not pixels) so Hi-DPI will limit to larger number of pixels.
* This has the advantage that the size limit is the same when moving the window between monitors
* with different scales set. If it was important to limit in pixels it could be re-calculated
* when the `w->scale` changed. */
xdg_toplevel_set_min_size(w->xdg_toplevel, 320, 240);
if (m_system->decoration_manager()) {
w->xdg_toplevel_decoration = zxdg_decoration_manager_v1_get_toplevel_decoration(
m_system->decoration_manager(), w->xdg_toplevel);
@ -299,22 +339,53 @@ wl_surface *GHOST_WindowWayland::surface() const
return w->surface;
}
const std::vector<output_t *> &GHOST_WindowWayland::outputs() const
{
return m_system->outputs();
}
std::unordered_set<const output_t *> &GHOST_WindowWayland::outputs_active()
std::vector<const output_t *> &GHOST_WindowWayland::outputs()
{
return w->outputs;
}
uint16_t &GHOST_WindowWayland::dpi()
output_t *GHOST_WindowWayland::output_find_by_wl(struct wl_output *output)
{
for (output_t *reg_output : this->m_system->outputs()) {
if (reg_output->output == output) {
return reg_output;
}
}
return nullptr;
}
bool GHOST_WindowWayland::outputs_changed_update_scale()
{
const int scale_next = outputs_max_scale_or_default(this->m_system->outputs(), 0);
if (scale_next == 0) {
return false;
}
window_t *win = this->w;
const int scale_curr = win->scale;
if (scale_next == scale_curr) {
return false;
}
/* Unlikely but possible there is a pending size change is set. */
win->size_pending[0] = (win->size_pending[0] / scale_curr) * scale_next;
win->size_pending[1] = (win->size_pending[1] / scale_curr) * scale_next;
win->scale = scale_next;
wl_surface_set_buffer_scale(this->surface(), scale_next);
/* Using the real DPI will cause wrong scaling of the UI
* use a multiplier for the default DPI as workaround. */
win->dpi = scale_next * base_dpi;
return true;
}
uint16_t GHOST_WindowWayland::dpi()
{
return w->dpi;
}
int &GHOST_WindowWayland::scale()
int GHOST_WindowWayland::scale()
{
return w->scale;
}
@ -356,22 +427,32 @@ void GHOST_WindowWayland::getWindowBounds(GHOST_Rect &bounds) const
void GHOST_WindowWayland::getClientBounds(GHOST_Rect &bounds) const
{
bounds.set(0, 0, w->width, w->height);
bounds.set(0, 0, w->size[0], w->size[1]);
}
GHOST_TSuccess GHOST_WindowWayland::setClientWidth(uint32_t width)
{
return setClientSize(width, uint32_t(w->height));
return setClientSize(width, uint32_t(w->size[1]));
}
GHOST_TSuccess GHOST_WindowWayland::setClientHeight(uint32_t height)
{
return setClientSize(uint32_t(w->width), height);
return setClientSize(uint32_t(w->size[0]), height);
}
GHOST_TSuccess GHOST_WindowWayland::setClientSize(uint32_t width, uint32_t height)
{
wl_egl_window_resize(w->egl_window, int(width), int(height), 0, 0);
/* Override any pending size that may be set. */
w->size_pending[0] = 0;
w->size_pending[1] = 0;
w->size[0] = width;
w->size[1] = height;
notify_size();
return GHOST_kSuccess;
}
@ -494,7 +575,7 @@ void GHOST_WindowWayland::setOpaque() const
/* Make the window opaque. */
region = wl_compositor_create_region(m_system->compositor());
wl_region_add(region, 0, 0, w->width, w->height);
wl_region_add(region, 0, 0, w->size[0], w->size[1]);
wl_surface_set_opaque_region(w->surface, region);
wl_region_destroy(region);
}

View File

@ -50,13 +50,15 @@ class GHOST_WindowWayland : public GHOST_Window {
wl_surface *surface() const;
const std::vector<output_t *> &outputs() const;
std::vector<const output_t *> &outputs();
std::unordered_set<const output_t *> &outputs_active();
output_t *output_find_by_wl(struct wl_output *output);
uint16_t &dpi();
bool outputs_changed_update_scale();
int &scale();
uint16_t dpi();
int scale();
protected:
GHOST_TSuccess setWindowCursorGrab(GHOST_TGrabCursorMode mode) override;

View File

@ -1711,7 +1711,7 @@ uint16_t GHOST_WindowX11::getDPIHint()
XrmDestroyDatabase(xrdb);
}
/* Fallback to calculating DPI using X reported DPI, set using xrandr --dpi */
/* Fallback to calculating DPI using X reported DPI, set using `xrandr --dpi`. */
XWindowAttributes attr;
if (!XGetWindowAttributes(m_display, m_window, &attr)) {
/* Failed to get window attributes, return X11 default DPI */

View File

@ -332,7 +332,7 @@ void GHOST_Wintab::getInput(std::vector<GHOST_WintabInfoWin32> &outWintabInfo)
outWintabInfo.reserve(numPackets);
for (int i = 0; i < numPackets; i++) {
PACKET pkt = m_pkts[i];
const PACKET pkt = m_pkts[i];
GHOST_WintabInfoWin32 out;
/* % 3 for multiple devices ("DualTrack"). */
@ -389,11 +389,12 @@ void GHOST_Wintab::getInput(std::vector<GHOST_WintabInfoWin32> &outWintabInfo)
/* Some Wintab libraries don't handle relative button input, so we track button presses
* manually. */
DWORD buttonsChanged = m_buttons ^ pkt.pkButtons;
WORD buttonIndex = 0;
/* We only needed the prior button state to compare to current, so we can overwrite it now. */
m_buttons = pkt.pkButtons;
while (buttonsChanged) {
/* Iterate over button flag indices until all flags are clear. */
for (WORD buttonIndex = 0; buttonsChanged; buttonIndex++, buttonsChanged >>= 1) {
if (buttonsChanged & 1) {
/* Find the index for the changed button from the button map. */
GHOST_TButtonMask button = mapWintabToGhostButton(pkt.pkCursor, buttonIndex);
if (button != GHOST_kButtonMaskNone) {
@ -403,15 +404,11 @@ void GHOST_Wintab::getInput(std::vector<GHOST_WintabInfoWin32> &outWintabInfo)
}
out.button = button;
out.type = buttonsChanged & pkt.pkButtons ? GHOST_kEventButtonDown :
GHOST_kEventButtonUp;
DWORD buttonFlag = 1 << buttonIndex;
out.type = pkt.pkButtons & buttonFlag ? GHOST_kEventButtonDown : GHOST_kEventButtonUp;
}
m_buttons ^= 1 << buttonIndex;
}
buttonsChanged >>= 1;
buttonIndex++;
}
outWintabInfo.push_back(out);

View File

@ -187,7 +187,7 @@ class GHOST_Wintab {
bool m_focused = false;
/** Pressed button map. */
uint8_t m_buttons = 0;
DWORD m_buttons = 0;
/** Range of a coordinate space. */
struct Range {

View File

@ -197,7 +197,7 @@ colorspaces:
equalitygroup:
bitdepth: 32f
description: |
sRGB displace space with Filmic view transform
sRGB display space with Filmic view transform
isdata: false
from_reference: !<GroupTransform>
children:

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-or-later
"""
Utilities relating to text mode console interations.
Utilities relating to text mode console interactions.
"""

View File

@ -56,7 +56,7 @@ LANGUAGES = (
(21, "Arabic (ﺔﻴﺑﺮﻌﻟﺍ)", "ar_EG"),
(22, "Bulgarian (Български)", "bg_BG"),
(23, "Greek (Ελληνικά)", "el_GR"),
(24, "Korean (한국어)", "ko_KR"),
(24, "Korean (한국어)", "ko_KR"),
(25, "Nepali (नेपाली)", "ne_NP"),
# Using the utf8 flipped form of Persian (فارسی).
(26, "Persian (ﯽﺳﺭﺎﻓ)", "fa_IR"),

View File

@ -88,6 +88,10 @@ _km_hierarchy = [
_km_expand_from_toolsystem('VIEW_3D', 'SCULPT'),
]),
('Sculpt Curves', 'EMPTY', 'WINDOW', [
_km_expand_from_toolsystem('VIEW_3D', 'CURVES_SCULPT'),
]),
('Particle', 'EMPTY', 'WINDOW', [
_km_expand_from_toolsystem('VIEW_3D', 'PARTICLE'),
]),

View File

@ -12,24 +12,65 @@
# int SDNAnr, nr;
# } BHead;
__all__ = (
"read_blend_rend_chunk",
)
def read_blend_rend_chunk(path):
class RawBlendFileReader:
"""
Return a file handle to the raw blend file data (abstracting compressed formats).
"""
__slots__ = (
# The path to load.
"_filepath",
# The file base file handler or None (only set for compressed formats).
"_blendfile_base",
# The file handler to return to the caller (always uncompressed data).
"_blendfile",
)
def __init__(self, filepath):
self._filepath = filepath
self._blendfile_base = None
self._blendfile = None
def __enter__(self):
blendfile = open(self._filepath, "rb")
blendfile_base = None
head = blendfile.read(4)
blendfile.seek(0)
if head[0:2] == b'\x1f\x8b': # GZIP magic.
import gzip
blendfile_base = blendfile
blendfile = gzip.open(blendfile, "rb")
elif head[0:4] == b'\x28\xb5\x2f\xfd': # Z-standard magic.
import zstandard
blendfile_base = blendfile
blendfile = zstandard.open(blendfile, "rb")
self._blendfile_base = blendfile_base
self._blendfile = blendfile
return self._blendfile
def __exit__(self, exc_type, exc_value, exc_traceback):
self._blendfile.close()
if self._blendfile_base is not None:
self._blendfile_base.close()
return False
def _read_blend_rend_chunk_from_file(blendfile, filepath):
import struct
import sys
blendfile = open(path, "rb")
from os import SEEK_CUR
head = blendfile.read(7)
if head[0:2] == b'\x1f\x8b': # gzip magic
import gzip
blendfile.seek(0)
blendfile = gzip.open(blendfile, "rb")
head = blendfile.read(7)
if head != b'BLENDER':
print("not a blend file:", path)
blendfile.close()
sys.stderr.write("Not a blend file: %s\n" % filepath)
return []
is_64_bit = (blendfile.read(1) == b'-')
@ -37,47 +78,52 @@ def read_blend_rend_chunk(path):
# true for PPC, false for X86
is_big_endian = (blendfile.read(1) == b'V')
# Now read the bhead chunk!!!
blendfile.read(3) # skip the version
# Now read the bhead chunk!
blendfile.seek(3, SEEK_CUR) # Skip the version.
scenes = []
sizeof_bhead = 24 if is_64_bit else 20
while blendfile.read(4) == b'REND':
sizeof_bhead_left = sizeof_bhead - 4
while len(bhead_id := blendfile.read(4)) == 4:
sizeof_data_left = struct.unpack('>i' if is_big_endian else '<i', blendfile.read(4))[0]
# 4 from the `head_id`, another 4 for the size of the BHEAD.
sizeof_bhead_left = sizeof_bhead - 8
struct.unpack('>i' if is_big_endian else '<i', blendfile.read(4))[0]
sizeof_bhead_left -= 4
# The remainder of the BHEAD struct is not used.
blendfile.seek(sizeof_bhead_left, SEEK_CUR)
# We don't care about the rest of the bhead struct
blendfile.read(sizeof_bhead_left)
if bhead_id == b'REND':
# Now we want the scene name, start and end frame. this is 32bits long.
start_frame, end_frame = struct.unpack('>2i' if is_big_endian else '<2i', blendfile.read(8))
sizeof_data_left -= 8
# Now we want the scene name, start and end frame. this is 32bites long
start_frame, end_frame = struct.unpack('>2i' if is_big_endian else '<2i', blendfile.read(8))
scene_name = blendfile.read(64)
sizeof_data_left -= 64
scene_name = blendfile.read(64)
scene_name = scene_name[:scene_name.index(b'\0')]
# It's possible old blend files are not UTF8 compliant, use `surrogateescape`.
scene_name = scene_name.decode("utf8", errors='surrogateescape')
scene_name = scene_name[:scene_name.index(b'\0')]
scenes.append((start_frame, end_frame, scene_name))
try:
scene_name = str(scene_name, "utf8")
except TypeError:
pass
scenes.append((start_frame, end_frame, scene_name))
blendfile.close()
if sizeof_data_left != 0:
blendfile.seek(sizeof_data_left, SEEK_CUR)
return scenes
def read_blend_rend_chunk(filepath):
with RawBlendFileReader(filepath) as blendfile:
return _read_blend_rend_chunk_from_file(blendfile, filepath)
def main():
import sys
for arg in sys.argv[1:]:
if arg.lower().endswith('.blend'):
for value in read_blend_rend_chunk(arg):
print("%d %d %s" % value)
for filepath in sys.argv[1:]:
for value in read_blend_rend_chunk(filepath):
print("%d %d %s" % value)
if __name__ == '__main__':

View File

@ -463,6 +463,7 @@ def _template_items_tool_select(
# Always use the cursor operator where possible,
# needed for time-line views where we always want to be able to scrub time.
cursor_prioritize=False,
operator_props=(),
fallback=False,
):
if not params.legacy and not fallback:
@ -479,11 +480,11 @@ def _template_items_tool_select(
if select_passthrough:
return [
(operator, {"type": 'LEFTMOUSE', "value": 'PRESS'},
{"properties": [("deselect_all", True), ("select_passthrough", True)]}),
{"properties": [("deselect_all", True), ("select_passthrough", True), *operator_props]}),
(operator, {"type": 'LEFTMOUSE', "value": 'CLICK'},
{"properties": [("deselect_all", True)]}),
{"properties": [("deselect_all", True), *operator_props]}),
(operator, {"type": 'LEFTMOUSE', "value": 'PRESS', "shift": True},
{"properties": [("deselect_all", False), ("toggle", True)]}),
{"properties": [("deselect_all", False), ("toggle", True), *operator_props]}),
("transform.translate", {"type": 'LEFTMOUSE', "value": 'CLICK_DRAG'},
{"properties": [("release_confirm", True)]}),
]
@ -497,9 +498,9 @@ def _template_items_tool_select(
# the tool without selecting elements under the cursor.
return [
(operator, {"type": 'LEFTMOUSE', "value": 'CLICK' if fallback else 'PRESS'},
{"properties": [("deselect_all", True)]}),
{"properties": [("deselect_all", True), *operator_props]}),
(operator, {"type": 'LEFTMOUSE', "value": 'CLICK' if fallback else 'PRESS', "shift": True},
{"properties": [("toggle", True)]}),
{"properties": [("toggle", True), *operator_props]}),
# Fallback key-map must transform as the primary tool is expected
# to be accessed via gizmos in this case. See: T96885.
@ -5981,6 +5982,8 @@ def km_standard_modal_map(_params):
("APPLY", {"type": 'NUMPAD_ENTER', "value": 'PRESS', "any": True}, None),
("SNAP", {"type": 'LEFT_CTRL', "value": 'PRESS', "any": True}, None),
("SNAP_OFF", {"type": 'LEFT_CTRL', "value": 'RELEASE', "any": True}, None),
("SNAP", {"type": 'RIGHT_CTRL', "value": 'PRESS', "any": True}, None),
("SNAP_OFF", {"type": 'RIGHT_CTRL', "value": 'RELEASE', "any": True}, None),
])
return keymap
@ -6127,10 +6130,16 @@ def km_view3d_fly_modal(_params):
("AXIS_LOCK_Z", {"type": 'Z', "value": 'PRESS'}, None),
("PRECISION_ENABLE", {"type": 'LEFT_ALT', "value": 'PRESS', "any": True}, None),
("PRECISION_DISABLE", {"type": 'LEFT_ALT', "value": 'RELEASE', "any": True}, None),
("PRECISION_ENABLE", {"type": 'RIGHT_ALT', "value": 'PRESS', "any": True}, None),
("PRECISION_DISABLE", {"type": 'RIGHT_ALT', "value": 'RELEASE', "any": True}, None),
("PRECISION_ENABLE", {"type": 'LEFT_SHIFT', "value": 'PRESS', "any": True}, None),
("PRECISION_DISABLE", {"type": 'LEFT_SHIFT', "value": 'RELEASE', "any": True}, None),
("PRECISION_ENABLE", {"type": 'RIGHT_SHIFT', "value": 'PRESS', "any": True}, None),
("PRECISION_DISABLE", {"type": 'RIGHT_SHIFT', "value": 'RELEASE', "any": True}, None),
("FREELOOK_ENABLE", {"type": 'LEFT_CTRL', "value": 'PRESS', "any": True}, None),
("FREELOOK_DISABLE", {"type": 'LEFT_CTRL', "value": 'RELEASE', "any": True}, None),
("FREELOOK_ENABLE", {"type": 'RIGHT_CTRL', "value": 'PRESS', "any": True}, None),
("FREELOOK_DISABLE", {"type": 'RIGHT_CTRL', "value": 'RELEASE', "any": True}, None),
])
return keymap
@ -6152,8 +6161,12 @@ def km_view3d_walk_modal(_params):
("CONFIRM", {"type": 'NUMPAD_ENTER', "value": 'PRESS', "any": True}, None),
("FAST_ENABLE", {"type": 'LEFT_SHIFT', "value": 'PRESS', "any": True}, None),
("FAST_DISABLE", {"type": 'LEFT_SHIFT', "value": 'RELEASE', "any": True}, None),
("FAST_ENABLE", {"type": 'RIGHT_SHIFT', "value": 'PRESS', "any": True}, None),
("FAST_DISABLE", {"type": 'RIGHT_SHIFT', "value": 'RELEASE', "any": True}, None),
("SLOW_ENABLE", {"type": 'LEFT_ALT', "value": 'PRESS', "any": True}, None),
("SLOW_DISABLE", {"type": 'LEFT_ALT', "value": 'RELEASE', "any": True}, None),
("SLOW_ENABLE", {"type": 'RIGHT_ALT', "value": 'PRESS', "any": True}, None),
("SLOW_DISABLE", {"type": 'RIGHT_ALT', "value": 'RELEASE', "any": True}, None),
("FORWARD", {"type": 'W', "value": 'PRESS', "any": True}, None),
("BACKWARD", {"type": 'S', "value": 'PRESS', "any": True}, None),
("LEFT", {"type": 'A', "value": 'PRESS', "any": True}, None),
@ -6203,6 +6216,8 @@ def km_view3d_rotate_modal(_params):
("CONFIRM", {"type": 'ESC', "value": 'PRESS', "any": True}, None),
("AXIS_SNAP_ENABLE", {"type": 'LEFT_ALT', "value": 'PRESS', "any": True}, None),
("AXIS_SNAP_DISABLE", {"type": 'LEFT_ALT', "value": 'RELEASE', "any": True}, None),
("AXIS_SNAP_ENABLE", {"type": 'RIGHT_ALT', "value": 'PRESS', "any": True}, None),
("AXIS_SNAP_DISABLE", {"type": 'RIGHT_ALT', "value": 'RELEASE', "any": True}, None),
])
return keymap
@ -6292,6 +6307,7 @@ def km_sculpt_expand_modal(_params):
*((e, {"type": NUMBERS_1[i], "value": 'PRESS', "any": True}, None) for i, e in enumerate(
("FALLOFF_GEODESICS", "FALLOFF_TOPOLOGY", "FALLOFF_TOPOLOGY_DIAGONALS", "FALLOFF_SPHERICAL"))),
("SNAP_TOGGLE", {"type": 'LEFT_CTRL', "value": 'ANY'}, None),
("SNAP_TOGGLE", {"type": 'RIGHT_CTRL', "value": 'ANY'}, None),
("LOOP_COUNT_INCREASE", {"type": 'W', "value": 'PRESS', "any": True, "repeat": True}, None),
("LOOP_COUNT_DECREASE", {"type": 'Q', "value": 'PRESS', "any": True, "repeat": True}, None),
("BRUSH_GRADIENT_TOGGLE", {"type": 'B', "value": 'PRESS', "any": True}, None),
@ -6713,12 +6729,17 @@ def km_3d_view_tool_cursor(params):
def km_3d_view_tool_select(params, *, fallback):
if params.use_tweak_select_passthrough:
operator_props = (("vert_without_handles", True),)
else:
operator_props = ()
return (
_fallback_id("3D View Tool: Tweak", fallback),
{"space_type": 'VIEW_3D', "region_type": 'WINDOW'},
{"items": [
*([] if (fallback and (params.select_mouse == 'RIGHTMOUSE')) else _template_items_tool_select(
params, "view3d.select", "view3d.cursor3d", fallback=fallback)),
params, "view3d.select", "view3d.cursor3d", operator_props=operator_props, fallback=fallback)),
*([] if (not params.use_fallback_tool_rmb) else _template_view3d_select(
type=params.select_mouse,
value=params.select_mouse_value,

View File

@ -129,6 +129,15 @@ class NodeAddOperator:
return result
@classmethod
def description(cls, context, properties):
nodetype = properties["type"]
bl_rna = bpy.types.Node.bl_rna_get_subclass(nodetype)
if bl_rna is not None:
return bl_rna.description
else:
return ""
# Simple basic operator for adding a node
class NODE_OT_add_node(NodeAddOperator, Operator):

View File

@ -855,6 +855,37 @@ class DupliOffsetFromCursor(Operator):
return {'FINISHED'}
class DupliOffsetToCursor(Operator):
"""Set cursor position to the offset used for collection instances"""
bl_idname = "object.instance_offset_to_cursor"
bl_label = "Set Cursor to Offset"
bl_options = {'INTERNAL', 'UNDO'}
def execute(self, context):
scene = context.scene
collection = context.collection
scene.cursor.location = collection.instance_offset
return {'FINISHED'}
class DupliOffsetFromObject(Operator):
"""Set offset used for collection instances based on the active object position"""
bl_idname = "object.instance_offset_from_object"
bl_label = "Set Offset from Object"
bl_options = {'INTERNAL', 'UNDO'}
@classmethod
def poll(cls, context):
return (context.active_object is not None)
def execute(self, context):
ob_eval = context.active_object.evaluated_get(context.view_layer.depsgraph)
world_loc = ob_eval.matrix_world.to_translation()
collection = context.collection
collection.instance_offset = world_loc
return {'FINISHED'}
class LoadImageAsEmpty:
bl_options = {'REGISTER', 'UNDO'}
@ -976,6 +1007,8 @@ class OBJECT_OT_assign_property_defaults(Operator):
classes = (
ClearAllRestrictRender,
DupliOffsetFromCursor,
DupliOffsetToCursor,
DupliOffsetFromObject,
IsolateTypeRender,
JoinUVs,
LoadBackgroundImage,

View File

@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-or-later
from bpy.types import Panel
from bpy.types import Panel, Menu
class CollectionButtonsPanel:
@ -42,6 +42,16 @@ class COLLECTION_PT_collection_flags(CollectionButtonsPanel, Panel):
col.prop(vlc, "indirect_only", toggle=False)
class COLLECTION_MT_context_menu_instance_offset(Menu):
bl_label = "Instance Offset"
def draw(self, _context):
layout = self.layout
layout.operator("object.instance_offset_from_cursor")
layout.operator("object.instance_offset_from_object")
layout.operator("object.instance_offset_to_cursor")
class COLLECTION_PT_instancing(CollectionButtonsPanel, Panel):
bl_label = "Instancing"
@ -51,8 +61,9 @@ class COLLECTION_PT_instancing(CollectionButtonsPanel, Panel):
layout.use_property_decorate = False
collection = context.collection
row = layout.row()
row = layout.row(align=True)
row.prop(collection, "instance_offset")
row.menu("COLLECTION_MT_context_menu_instance_offset", icon='DOWNARROW_HLT', text="")
class COLLECTION_PT_lineart_collection(CollectionButtonsPanel, Panel):
@ -80,6 +91,7 @@ class COLLECTION_PT_lineart_collection(CollectionButtonsPanel, Panel):
classes = (
COLLECTION_MT_context_menu_instance_offset,
COLLECTION_PT_collection_flags,
COLLECTION_PT_instancing,
COLLECTION_PT_lineart_collection,

View File

@ -44,6 +44,7 @@ class DATA_PT_curves_surface(DataButtonsPanel, Panel):
layout.use_property_split = True
layout.prop(ob.data, "surface")
layout.prop(ob.data, "surface_uv_map", text="UV Map")
class CURVES_MT_add_attribute(Menu):

View File

@ -64,6 +64,18 @@ class MESH_MT_shape_key_context_menu(Menu):
layout.operator("object.shape_key_move", icon='TRIA_DOWN_BAR', text="Move to Bottom").type = 'BOTTOM'
class MESH_MT_color_attribute_context_menu(Menu):
bl_label = "Color Attribute Specials"
def draw(self, _context):
layout = self.layout
props = layout.operator(
"geometry.color_attribute_duplicate",
icon='DUPLICATE',
)
class MESH_MT_attribute_context_menu(Menu):
bl_label = "Attribute Specials"
@ -661,12 +673,17 @@ class DATA_PT_vertex_colors(DATA_PT_mesh_attributes, Panel):
col.operator("geometry.color_attribute_add", icon='ADD', text="")
col.operator("geometry.color_attribute_remove", icon='REMOVE', text="")
col.separator()
col.menu("MESH_MT_color_attribute_context_menu", icon='DOWNARROW_HLT', text="")
self.draw_attribute_warnings(context, layout)
classes = (
MESH_MT_vertex_group_context_menu,
MESH_MT_shape_key_context_menu,
MESH_MT_color_attribute_context_menu,
MESH_MT_attribute_context_menu,
MESH_UL_vgroups,
MESH_UL_fmaps,

View File

@ -2087,10 +2087,9 @@ class SEQUENCER_PT_adjust_transform(SequencerButtonsPanel, Panel):
col = layout.column(align=True)
col.prop(strip.transform, "origin")
row = layout.row(heading="Mirror")
sub = row.row(align=True)
sub.prop(strip, "use_flip_x", text="X", toggle=True)
sub.prop(strip, "use_flip_y", text="Y", toggle=True)
col = layout.column(heading="Mirror", align=True)
col.prop(strip, "use_flip_x", text="X", toggle=True)
col.prop(strip, "use_flip_y", text="Y", toggle=True)
class SEQUENCER_PT_adjust_video(SequencerButtonsPanel, Panel):

View File

@ -453,8 +453,13 @@ class TOPBAR_MT_file_import(Menu):
self.layout.operator(
"wm.usd_import", text="Universal Scene Description (.usd, .usdc, .usda)")
self.layout.operator("wm.gpencil_import_svg", text="SVG as Grease Pencil")
self.layout.operator("wm.obj_import", text="Wavefront (.obj) (experimental)")
if bpy.app.build_options.io_gpencil:
self.layout.operator("wm.gpencil_import_svg", text="SVG as Grease Pencil")
if bpy.app.build_options.io_wavefront_obj:
self.layout.operator("wm.obj_import", text="Wavefront (.obj) (experimental)")
if bpy.app.build_options.io_stl:
self.layout.operator("wm.stl_import", text="STL (.stl) (experimental)")
class TOPBAR_MT_file_export(Menu):
@ -471,14 +476,16 @@ class TOPBAR_MT_file_export(Menu):
self.layout.operator(
"wm.usd_export", text="Universal Scene Description (.usd, .usdc, .usda)")
# Pugixml lib dependency
if bpy.app.build_options.pugixml:
self.layout.operator("wm.gpencil_export_svg", text="Grease Pencil as SVG")
# Haru lib dependency
if bpy.app.build_options.haru:
self.layout.operator("wm.gpencil_export_pdf", text="Grease Pencil as PDF")
if bpy.app.build_options.io_gpencil:
# Pugixml lib dependency
if bpy.app.build_options.pugixml:
self.layout.operator("wm.gpencil_export_svg", text="Grease Pencil as SVG")
# Haru lib dependency
if bpy.app.build_options.haru:
self.layout.operator("wm.gpencil_export_pdf", text="Grease Pencil as PDF")
self.layout.operator("wm.obj_export", text="Wavefront (.obj) (experimental)")
if bpy.app.build_options.io_wavefront_obj:
self.layout.operator("wm.obj_export", text="Wavefront (.obj) (experimental)")
class TOPBAR_MT_file_external_data(Menu):

View File

@ -183,6 +183,24 @@ def geometry_input_node_items(context):
yield NodeItem("GeometryNodeInputSceneTime")
# Custom Menu for Geometry Node Instance Nodes.
def geometry_instance_node_items(context):
if context is None:
return
space = context.space_data
if not space:
return
yield NodeItem("GeometryNodeInstanceOnPoints")
yield NodeItem("GeometryNodeInstancesToPoints")
yield NodeItem("GeometryNodeRealizeInstances")
yield NodeItem("GeometryNodeRotateInstances")
yield NodeItem("GeometryNodeScaleInstances")
yield NodeItem("GeometryNodeTranslateInstances")
yield NodeItemCustom(draw=lambda self, layout, context: layout.separator())
yield NodeItem("GeometryNodeInputInstanceRotation")
yield NodeItem("GeometryNodeInputInstanceScale")
# Custom Menu for Material Nodes.
def geometry_material_node_items(context):
if context is None:
@ -635,14 +653,7 @@ geometry_node_categories = [
]),
GeometryNodeCategory("GEO_GEOMETRY", "Geometry", items=geometry_node_items),
GeometryNodeCategory("GEO_INPUT", "Input", items=geometry_input_node_items),
GeometryNodeCategory("GEO_INSTANCE", "Instances", items=[
NodeItem("GeometryNodeInstanceOnPoints"),
NodeItem("GeometryNodeInstancesToPoints"),
NodeItem("GeometryNodeRealizeInstances"),
NodeItem("GeometryNodeRotateInstances"),
NodeItem("GeometryNodeScaleInstances"),
NodeItem("GeometryNodeTranslateInstances"),
]),
GeometryNodeCategory("GEO_INSTANCE", "Instances", items=geometry_instance_node_items),
GeometryNodeCategory("GEO_MATERIAL", "Material", items=geometry_material_node_items),
GeometryNodeCategory("GEO_MESH", "Mesh", items=mesh_node_items),
GeometryNodeCategory("GEO_PRIMITIVES_MESH", "Mesh Primitives", items=[

View File

@ -50,16 +50,21 @@ typedef enum eAttrDomainMask {
/* Attributes. */
bool BKE_id_attributes_supported(struct ID *id);
bool BKE_id_attributes_supported(const struct ID *id);
bool BKE_attribute_allow_procedural_access(const char *attribute_name);
/** Create a new attribute layer.
*/
struct CustomDataLayer *BKE_id_attribute_new(
struct ID *id, const char *name, int type, eAttrDomain domain, struct ReportList *reports);
bool BKE_id_attribute_remove(struct ID *id,
struct CustomDataLayer *layer,
struct ReportList *reports);
bool BKE_id_attribute_remove(struct ID *id, const char *name, struct ReportList *reports);
/**
* Creates a duplicate attribute layer.
*/
struct CustomDataLayer *BKE_id_attribute_duplicate(struct ID *id,
struct CustomDataLayer *layer,
struct ReportList *reports);
struct CustomDataLayer *BKE_id_attribute_find(const struct ID *id,
const char *name,
@ -73,9 +78,9 @@ struct CustomDataLayer *BKE_id_attribute_search(const struct ID *id,
eAttrDomain BKE_id_attribute_domain(const struct ID *id, const struct CustomDataLayer *layer);
int BKE_id_attribute_data_length(struct ID *id, struct CustomDataLayer *layer);
bool BKE_id_attribute_required(struct ID *id, struct CustomDataLayer *layer);
bool BKE_id_attribute_required(const struct ID *id, const char *name);
bool BKE_id_attribute_rename(struct ID *id,
struct CustomDataLayer *layer,
const char *old_name,
const char *new_name,
struct ReportList *reports);

View File

@ -100,6 +100,9 @@ typedef enum {
BKE_CB_EVT_OBJECT_BAKE_PRE,
BKE_CB_EVT_OBJECT_BAKE_COMPLETE,
BKE_CB_EVT_OBJECT_BAKE_CANCEL,
BKE_CB_EVT_COMPOSITE_PRE,
BKE_CB_EVT_COMPOSITE_POST,
BKE_CB_EVT_COMPOSITE_CANCEL,
BKE_CB_EVT_TOT,
} eCbEvent;

View File

@ -119,7 +119,7 @@ bool BKE_collection_object_add(struct Main *bmain,
/**
* Add object to given collection, similar to #BKE_collection_object_add.
*
* However, it additionnally ensures that the selected collection is also part of the given
* However, it additionally ensures that the selected collection is also part of the given
* `view_layer`, if non-NULL. Otherwise, the object is not added to any collection.
*/
bool BKE_collection_viewlayer_object_add(struct Main *bmain,

View File

@ -307,6 +307,25 @@ void BKE_constraint_target_matrix_get(struct Depsgraph *depsgraph,
void *ownerdata,
float mat[4][4],
float ctime);
/**
* Retrieves the list of all constraint targets, including the custom space target.
* Must be followed by a call to BKE_constraint_targets_flush to free memory.
*
* \param r_targets Pointer to the list to be initialized with target data.
* \returns the number of targets stored in the list.
*/
int BKE_constraint_targets_get(struct bConstraint *con, struct ListBase *r_targets);
/**
* Copies changed data from the list produced by BKE_constraint_targets_get back to the constraint
* data structures and frees memory.
*
* \param targets List of targets filled by BKE_constraint_targets_get.
* \param no_copy Only free memory without copying changes (read-only mode).
*/
void BKE_constraint_targets_flush(struct bConstraint *con, struct ListBase *targets, bool no_copy);
/**
* Get the list of targets required for solving a constraint.
*/

View File

@ -222,9 +222,10 @@ void CTX_wm_operator_poll_msg_clear(struct bContext *C);
/* Data Context
*
* - listbases consist of CollectionPointerLink items and must be
* freed with BLI_freelistN!
* - the dir listbase consists of LinkData items */
* - #ListBase consists of #CollectionPointerLink items and must be
* freed with #BLI_freelistN!
* - The dir #ListBase consists of #LinkData items.
*/
/* data type, needed so we can tell between a NULL pointer and an empty list */
enum {

View File

@ -25,7 +25,7 @@ void *BKE_curves_add(struct Main *bmain, const char *name);
struct BoundBox *BKE_curves_boundbox_get(struct Object *ob);
bool BKE_curves_customdata_required(struct Curves *curves, struct CustomDataLayer *layer);
bool BKE_curves_customdata_required(const struct Curves *curves, const char *name);
/* Depsgraph */

View File

@ -182,6 +182,7 @@ class CurvesGeometry : public ::CurvesGeometry {
void update_curve_types();
bool has_curve_with_type(CurveType type) const;
bool has_curve_with_type(Span<CurveType> types) const;
/** Return true if all of the curves have the provided type. */
bool is_single_type(CurveType type) const;
/** Return the number of curves with each type. */
@ -264,22 +265,10 @@ class CurvesGeometry : public ::CurvesGeometry {
MutableSpan<float> nurbs_weights_for_write();
/**
* The index of a triangle (#MLoopTri) that a curve is attached to.
* The index is -1, if the curve is not attached.
* UV coordinate for each curve that encodes where the curve is attached to the surface mesh.
*/
VArray<int> surface_triangle_indices() const;
MutableSpan<int> surface_triangle_indices_for_write();
/**
* Barycentric coordinates of the attachment point within a triangle.
* Only the first two coordinates are stored. The third coordinate can be derived because the sum
* of the three coordinates is 1.
*
* When the triangle index is -1, this coordinate should be ignored.
* The span can be empty, when all triangle indices are -1.
*/
Span<float2> surface_triangle_coords() const;
MutableSpan<float2> surface_triangle_coords_for_write();
Span<float2> surface_uv_coords() const;
MutableSpan<float2> surface_uv_coords_for_write();
VArray<float> selection_point_float() const;
MutableSpan<float> selection_point_float_for_write();
@ -398,6 +387,7 @@ class CurvesGeometry : public ::CurvesGeometry {
void update_customdata_pointers();
void remove_points(IndexMask points_to_delete);
void remove_curves(IndexMask curves_to_delete);
/**
@ -406,6 +396,11 @@ class CurvesGeometry : public ::CurvesGeometry {
*/
void reverse_curves(IndexMask curves_to_reverse);
/**
* Remove any attributes that are unused based on the types in the curves.
*/
void remove_attributes_based_on_types();
/* --------------------------------------------------------------------
* Attributes.
*/
@ -722,6 +717,12 @@ inline bool CurvesGeometry::has_curve_with_type(const CurveType type) const
return this->curve_type_counts()[type] > 0;
}
inline bool CurvesGeometry::has_curve_with_type(const Span<CurveType> types) const
{
return std::any_of(
types.begin(), types.end(), [&](CurveType type) { return this->has_curve_with_type(type); });
}
inline const std::array<int, CURVE_TYPES_NUM> &CurvesGeometry::curve_type_counts() const
{
BLI_assert(this->runtime->type_counts == calculate_type_counts(this->curve_types()));

View File

@ -9,8 +9,52 @@
* \brief Low-level operations for curves.
*/
#include "BLI_function_ref.hh"
#include "BLI_generic_pointer.hh"
namespace blender::bke::curves {
/**
* Copy the provided point attribute values between all curves in the #curve_ranges index
* ranges, assuming that all curves have the same number of control points in #src_curves
* and #dst_curves.
*/
void copy_point_data(const CurvesGeometry &src_curves,
const CurvesGeometry &dst_curves,
Span<IndexRange> curve_ranges,
GSpan src,
GMutableSpan dst);
void copy_point_data(const CurvesGeometry &src_curves,
const CurvesGeometry &dst_curves,
IndexMask src_curve_selection,
GSpan src,
GMutableSpan dst);
template<typename T>
void copy_point_data(const CurvesGeometry &src_curves,
const CurvesGeometry &dst_curves,
const IndexMask src_curve_selection,
const Span<T> src,
MutableSpan<T> dst)
{
copy_point_data(src_curves, dst_curves, src_curve_selection, GSpan(src), GMutableSpan(dst));
}
void fill_points(const CurvesGeometry &curves,
IndexMask curve_selection,
GPointer value,
GMutableSpan dst);
template<typename T>
void fill_points(const CurvesGeometry &curves,
const IndexMask curve_selection,
const T &value,
MutableSpan<T> dst)
{
fill_points(curves, curve_selection, &value, dst);
}
/**
* Copy the size of every curve in #curve_ranges to the corresponding index in #counts.
*/
@ -23,4 +67,18 @@ void fill_curve_counts(const bke::CurvesGeometry &curves,
*/
void accumulate_counts_to_offsets(MutableSpan<int> counts_to_offsets, int start_offset = 0);
IndexMask indices_for_type(const VArray<int8_t> &types,
const std::array<int, CURVE_TYPES_NUM> &type_counts,
const CurveType type,
const IndexMask selection,
Vector<int64_t> &r_indices);
void foreach_curve_by_type(const VArray<int8_t> &types,
const std::array<int, CURVE_TYPES_NUM> &type_counts,
IndexMask selection,
FunctionRef<void(IndexMask)> catmull_rom_fn,
FunctionRef<void(IndexMask)> poly_fn,
FunctionRef<void(IndexMask)> bezier_fn,
FunctionRef<void(IndexMask)> nurbs_fn);
} // namespace blender::bke::curves

View File

@ -231,6 +231,7 @@ void *CustomData_add_layer_anonymous(struct CustomData *data,
* In edit-mode, use #EDBM_data_layer_free instead of this function.
*/
bool CustomData_free_layer(struct CustomData *data, int type, int totelem, int index);
bool CustomData_free_layer_named(struct CustomData *data, const char *name, const int totelem);
/**
* Frees the layer index with the give type.

View File

@ -267,14 +267,14 @@ struct FCurve *BKE_fcurve_iter_step(struct FCurve *fcu_iter, const char rna_path
* If there is an action assigned to the `id`'s #AnimData, it will be searched for a matching
* F-curve first. Drivers are searched only if no valid action F-curve could be found.
*
* \note: Return pointer parameter (`r_driven`) is optional and may be NULL.
* \note Return pointer parameter (`r_driven`) is optional and may be NULL.
*
* \warning: In case no animation (from an Action) F-curve is found, returned value is always NULL.
* \warning In case no animation (from an Action) F-curve is found, returned value is always NULL.
* This means that this function will set `r_driven` to True in case a valid driver F-curve is
* found, but will not return said F-curve. In other words:
* - Animated with FCurve: returns the `FCurve*` and `*r_driven = false`.
* - Animated with driver: returns `NULL` and `*r_driven = true`.
* - Not animated: returns `NULL` and `*r_driven = false`.
* - Animated with FCurve: returns the `FCurve*` and `*r_driven = false`.
* - Animated with driver: returns `NULL` and `*r_driven = true`.
* - Not animated: returns `NULL` and `*r_driven = false`.
*/
struct FCurve *id_data_find_fcurve(
ID *id, void *data, struct StructRNA *type, const char *prop_name, int index, bool *r_driven);
@ -300,11 +300,11 @@ int BKE_fcurves_filter(ListBase *dst, ListBase *src, const char *dataPrefix, con
* If there is an action assigned to the `animdata`, it will be searched for a matching F-curve
* first. Drivers are searched only if no valid action F-curve could be found.
*
* \note: Typically, indices in RNA arrays are stored separately in F-curves, so the rna_path
* \note Typically, indices in RNA arrays are stored separately in F-curves, so the rna_path
* should not include them (e.g. `rna_path='location[0]'` will not match any F-Curve on an Object,
* but `rna_path='location', rna_index=0` will if it exists).
*
* \note: Return pointer parameters (`r_action`, `r_driven` and `r_special`) are all optional and
* \note Return pointer parameters (`r_action`, `r_driven` and `r_special`) are all optional and
* may be NULL.
*/
struct FCurve *BKE_animadata_fcurve_find_by_rna_path(struct AnimData *animdata,

View File

@ -76,13 +76,13 @@ struct bGPdata;
void BKE_gpencil_free_point_weights(struct MDeformVert *dvert);
void BKE_gpencil_free_stroke_weights(struct bGPDstroke *gps);
void BKE_gpencil_free_stroke_editcurve(struct bGPDstroke *gps);
/* free stroke, doesn't unlink from any listbase */
/** Free stroke, doesn't unlink from any #ListBase. */
void BKE_gpencil_free_stroke(struct bGPDstroke *gps);
/* Free strokes belonging to a gp-frame */
/** Free strokes belonging to a gp-frame. */
bool BKE_gpencil_free_strokes(struct bGPDframe *gpf);
/* Free all of a gp-layer's frames */
/** Free all of a gp-layer's frames. */
void BKE_gpencil_free_frames(struct bGPDlayer *gpl);
/* Free all of the gp-layers for a viewport (list should be &gpd->layers or so) */
/** Free all of the gp-layers for a viewport (list should be `&gpd->layers` or so). */
void BKE_gpencil_free_layers(struct ListBase *list);
/** Free (or release) any data used by this grease pencil (does not free the gpencil itself). */
void BKE_gpencil_free_data(struct bGPdata *gpd, bool free_all);
@ -108,9 +108,9 @@ void BKE_gpencil_batch_cache_free(struct bGPdata *gpd);
*/
void BKE_gpencil_stroke_sync_selection(struct bGPdata *gpd, struct bGPDstroke *gps);
void BKE_gpencil_curve_sync_selection(struct bGPdata *gpd, struct bGPDstroke *gps);
/* Assign unique stroke ID for selection. */
/** Assign unique stroke ID for selection. */
void BKE_gpencil_stroke_select_index_set(struct bGPdata *gpd, struct bGPDstroke *gps);
/* Reset unique stroke ID for selection. */
/** Reset unique stroke ID for selection. */
void BKE_gpencil_stroke_select_index_reset(struct bGPDstroke *gps);
/**

View File

@ -69,6 +69,19 @@ bool BKE_lib_override_library_is_user_edited(const struct ID *id);
*/
bool BKE_lib_override_library_is_system_defined(const struct Main *bmain, const struct ID *id);
/**
* Check if given Override Property for given ID is animated (through a F-Curve in an Action, or
* from a driver).
*
* \param override_rna_prop if not NULL, the RNA property matching the given path in the
* `override_prop`.
* \param rnaprop_index Array in the RNA property, 0 if unknown or irrelevant.
*/
bool BKE_lib_override_library_property_is_animated(const ID *id,
const IDOverrideLibraryProperty *override_prop,
const struct PropertyRNA *override_rna_prop,
const int rnaprop_index);
/**
* Check if given ID is a leaf in its liboverride hierarchy (i.e. if it does not use any other
* override ID).
@ -281,11 +294,14 @@ void BKE_lib_override_library_property_delete(struct IDOverrideLibrary *override
*
* \param idpoin: Pointer to the override ID.
* \param library_prop: The library override property to find the matching RNA property for.
* \param r_index: The RNA array flat index (i.e. flatened index in case of multi-dimensional array
* properties). See #RNA_path_resolve_full familly of functions for details.
*/
bool BKE_lib_override_rna_property_find(struct PointerRNA *idpoin,
const struct IDOverrideLibraryProperty *library_prop,
struct PointerRNA *r_override_poin,
struct PropertyRNA **r_override_prop);
struct PropertyRNA **r_override_prop,
int *r_index);
/**
* Find override property operation from given sub-item(s), if it exists.

View File

@ -1493,6 +1493,8 @@ struct TexResult;
#define GEO_NODE_STORE_NAMED_ATTRIBUTE 1156
#define GEO_NODE_INPUT_NAMED_ATTRIBUTE 1157
#define GEO_NODE_REMOVE_ATTRIBUTE 1158
#define GEO_NODE_INPUT_INSTANCE_ROTATION 1159
#define GEO_NODE_INPUT_INSTANCE_SCALE 1160
/** \} */

View File

@ -450,7 +450,8 @@ void BKE_pbvh_draw_cb(PBVH *pbvh,
PBVHFrustumPlanes *update_frustum,
PBVHFrustumPlanes *draw_frustum,
void (*draw_fn)(void *user_data, struct GPU_PBVH_Buffers *buffers),
void *user_data);
void *user_data,
bool full_render);
void BKE_pbvh_draw_debug_cb(PBVH *pbvh,
void (*draw_fn)(void *user_data,
@ -1200,3 +1201,5 @@ SculptPMap *BKE_pbvh_make_pmap(const struct Mesh *me);
void BKE_pbvh_pmap_aquire(SculptPMap *pmap);
bool BKE_pbvh_pmap_release(SculptPMap *pmap);
void BKE_pbvh_clear_cache(PBVH *preserve);
void BKE_pbvh_need_full_render_set(PBVH *pbvh, bool state);

View File

@ -30,8 +30,7 @@ struct BoundBox *BKE_pointcloud_boundbox_get(struct Object *ob);
bool BKE_pointcloud_minmax(const struct PointCloud *pointcloud, float r_min[3], float r_max[3]);
void BKE_pointcloud_update_customdata_pointers(struct PointCloud *pointcloud);
bool BKE_pointcloud_customdata_required(struct PointCloud *pointcloud,
struct CustomDataLayer *layer);
bool BKE_pointcloud_customdata_required(const struct PointCloud *pointcloud, const char *name);
/* Dependency Graph */

View File

@ -19,6 +19,10 @@ struct UnitSettings;
*/
size_t BKE_unit_value_as_string_adaptive(
char *str, int len_max, double value, int prec, int system, int type, bool split, bool pad);
/**
* Representation of a value in units. Negative precision is used to disable stripping of zeroes.
* This reduces text jumping when changing values.
*/
size_t BKE_unit_value_as_string(char *str,
int len_max,
double value,

View File

@ -628,14 +628,14 @@ static void mesh_calc_modifier_final_normals(const Mesh *mesh_input,
/* without this, drawing ngon tri's faces will show ugly tessellated face
* normals and will also have to calculate normals on the fly, try avoid
* this where possible since calculating polygon normals isn't fast,
* note that this isn't a problem for subsurf (only quads) or editmode
* note that this isn't a problem for subsurf (only quads) or edit-mode
* which deals with drawing differently. */
BKE_mesh_ensure_normals_for_display(mesh_final);
}
/* Some modifiers, like data-transfer, may generate those data as temp layer,
* we do not want to keep them, as they are used by display code when available
* (i.e. even if autosmooth is disabled). */
* (i.e. even if auto-smooth is disabled). */
if (CustomData_has_layer(&mesh_final->ldata, CD_NORMAL)) {
CustomData_free_layers(&mesh_final->ldata, CD_NORMAL, mesh_final->totloop);
}

View File

@ -980,13 +980,10 @@ void BKE_pose_channels_remove(Object *ob,
else {
/* Maybe something the bone references is being removed instead? */
for (con = pchan->constraints.first; con; con = con->next) {
const bConstraintTypeInfo *cti = BKE_constraint_typeinfo_get(con);
ListBase targets = {NULL, NULL};
bConstraintTarget *ct;
if (cti && cti->get_constraint_targets) {
cti->get_constraint_targets(con, &targets);
if (BKE_constraint_targets_get(con, &targets)) {
for (ct = targets.first; ct; ct = ct->next) {
if (ct->tar == ob) {
if (ct->subtarget[0]) {
@ -998,9 +995,7 @@ void BKE_pose_channels_remove(Object *ob,
}
}
if (cti->flush_constraint_targets) {
cti->flush_constraint_targets(con, &targets, 0);
}
BKE_constraint_targets_flush(con, &targets, 0);
}
}

View File

@ -4074,7 +4074,7 @@ void BKE_animsys_evaluate_all_animation(Main *main, Depsgraph *depsgraph, float
/* objects */
/* ADT_RECALC_ANIM doesn't need to be supplied here, since object AnimData gets
* this tagged by Depsgraph on framechange. This optimization means that objects
* this tagged by Depsgraph on frame-change. This optimization means that objects
* linked from other (not-visible) scenes will not need their data calculated.
*/
EVAL_ANIM_IDS(main->objects.first, 0);

View File

@ -89,23 +89,7 @@ static void get_domains(const ID *id, DomainInfo info[ATTR_DOMAIN_NUM])
}
}
static CustomData *attribute_customdata_find(ID *id, CustomDataLayer *layer)
{
DomainInfo info[ATTR_DOMAIN_NUM];
get_domains(id, info);
for (const int domain : IndexRange(ATTR_DOMAIN_NUM)) {
CustomData *customdata = info[domain].customdata;
if (customdata &&
ARRAY_HAS_ITEM((CustomDataLayer *)layer, customdata->layers, customdata->totlayer)) {
return customdata;
}
}
return nullptr;
}
bool BKE_id_attributes_supported(ID *id)
bool BKE_id_attributes_supported(const ID *id)
{
DomainInfo info[ATTR_DOMAIN_NUM];
get_domains(id, info);
@ -123,23 +107,26 @@ bool BKE_attribute_allow_procedural_access(const char *attribute_name)
}
bool BKE_id_attribute_rename(ID *id,
CustomDataLayer *layer,
const char *old_name,
const char *new_name,
ReportList *reports)
{
if (BKE_id_attribute_required(id, layer)) {
if (BKE_id_attribute_required(id, old_name)) {
BLI_assert_msg(0, "Required attribute name is not editable");
return false;
}
CustomData *customdata = attribute_customdata_find(id, layer);
if (customdata == nullptr) {
CustomDataLayer *layer = BKE_id_attribute_search(
id, old_name, CD_MASK_PROP_ALL, ATTR_DOMAIN_MASK_ALL);
if (layer == nullptr) {
BKE_report(reports, RPT_ERROR, "Attribute is not part of this geometry");
return false;
}
BLI_strncpy_utf8(layer->name, new_name, sizeof(layer->name));
CustomData_set_layer_unique_name(customdata, layer - customdata->layers);
char result_name[MAX_CUSTOMDATA_LAYER_NAME];
BKE_id_attribute_calc_unique_name(id, new_name, result_name);
BLI_strncpy_utf8(layer->name, result_name, sizeof(layer->name));
return true;
}
@ -221,44 +208,90 @@ CustomDataLayer *BKE_id_attribute_new(
return (index == -1) ? nullptr : &(customdata->layers[index]);
}
bool BKE_id_attribute_remove(ID *id, CustomDataLayer *layer, ReportList *reports)
CustomDataLayer *BKE_id_attribute_duplicate(ID *id, CustomDataLayer *layer, ReportList *reports)
{
CustomData *customdata = attribute_customdata_find(id, layer);
const int index = (customdata) ?
CustomData_get_named_layer_index(customdata, layer->type, layer->name) :
-1;
DomainInfo info[ATTR_DOMAIN_NUM];
get_domains(id, info);
if (index == -1) {
BKE_report(reports, RPT_ERROR, "Attribute is not part of this geometry");
return false;
eCustomDataType type = (eCustomDataType)layer->type;
eAttrDomain domain = BKE_id_attribute_domain(id, layer);
CustomData *customdata = info[domain].customdata;
if (customdata == nullptr) {
BKE_report(reports, RPT_ERROR, "Attribute domain not supported by this geometry type");
return nullptr;
}
if (BKE_id_attribute_required(id, layer)) {
BKE_report(reports, RPT_ERROR, "Attribute is required and can't be removed");
return false;
}
char name[MAX_CUSTOMDATA_LAYER_NAME];
char uniquename[MAX_CUSTOMDATA_LAYER_NAME];
/* Make a copy of name in case CustomData API reallocates the layers. */
BLI_strncpy(name, layer->name, MAX_CUSTOMDATA_LAYER_NAME);
BKE_id_attribute_calc_unique_name(id, layer->name, uniquename);
switch (GS(id->name)) {
case ID_ME: {
Mesh *me = (Mesh *)id;
BMEditMesh *em = me->edit_mesh;
if (em != nullptr) {
BM_data_layer_free(em->bm, customdata, layer->type);
BM_data_layer_add_named(em->bm, customdata, type, uniquename);
}
else {
const int length = BKE_id_attribute_data_length(id, layer);
CustomData_free_layer(customdata, layer->type, length, index);
CustomData_add_layer_named(
customdata, type, CD_DEFAULT, nullptr, info[domain].length, uniquename);
}
break;
}
default: {
const int length = BKE_id_attribute_data_length(id, layer);
CustomData_free_layer(customdata, layer->type, length, index);
CustomData_add_layer_named(
customdata, type, CD_DEFAULT, nullptr, info[domain].length, uniquename);
break;
}
}
return true;
int from_index = CustomData_get_named_layer_index(customdata, type, name);
int to_index = CustomData_get_named_layer_index(customdata, type, uniquename);
CustomData_copy_data_layer(
customdata, customdata, from_index, to_index, 0, 0, info[domain].length);
return (to_index == -1) ? nullptr : &(customdata->layers[to_index]);
}
bool BKE_id_attribute_remove(ID *id, const char *name, ReportList *reports)
{
if (BKE_id_attribute_required(id, name)) {
BKE_report(reports, RPT_ERROR, "Attribute is required and can't be removed");
return false;
}
DomainInfo info[ATTR_DOMAIN_NUM];
get_domains(id, info);
switch (GS(id->name)) {
case ID_ME: {
Mesh *mesh = reinterpret_cast<Mesh *>(id);
if (BMEditMesh *em = mesh->edit_mesh) {
for (const int domain : IndexRange(ATTR_DOMAIN_NUM)) {
if (CustomData *data = info[domain].customdata) {
if (BM_data_layer_free_named(em->bm, data, name)) {
return true;
}
}
}
return false;
}
ATTR_FALLTHROUGH;
}
default:
for (const int domain : IndexRange(ATTR_DOMAIN_NUM)) {
if (CustomData *data = info[domain].customdata) {
if (CustomData_free_layer_named(data, name, info[domain].length)) {
return true;
}
}
}
return false;
}
}
CustomDataLayer *BKE_id_attribute_find(const ID *id,
@ -299,8 +332,8 @@ CustomDataLayer *BKE_id_attribute_search(const ID *id,
}
CustomData *customdata = info[domain].customdata;
if (customdata == NULL) {
return NULL;
if (customdata == nullptr) {
continue;
}
for (int i = 0; i < customdata->totlayer; i++) {
@ -311,7 +344,7 @@ CustomDataLayer *BKE_id_attribute_search(const ID *id,
}
}
return NULL;
return nullptr;
}
int BKE_id_attributes_length(const ID *id,
@ -383,14 +416,14 @@ int BKE_id_attribute_data_length(ID *id, CustomDataLayer *layer)
return 0;
}
bool BKE_id_attribute_required(ID *id, CustomDataLayer *layer)
bool BKE_id_attribute_required(const ID *id, const char *name)
{
switch (GS(id->name)) {
case ID_PT: {
return BKE_pointcloud_customdata_required((PointCloud *)id, layer);
return BKE_pointcloud_customdata_required((const PointCloud *)id, name);
}
case ID_CV: {
return BKE_curves_customdata_required((Curves *)id, layer);
return BKE_curves_customdata_required((const Curves *)id, name);
}
default:
return false;
@ -556,9 +589,9 @@ int BKE_id_attribute_to_index(const ID *id,
continue;
}
CustomData *cdata = info[domains[i]].customdata;
const CustomData *cdata = info[domains[i]].customdata;
for (int j = 0; j < cdata->totlayer; j++) {
CustomDataLayer *layer_iter = cdata->layers + j;
const CustomDataLayer *layer_iter = cdata->layers + j;
if (!(CD_TYPE_AS_MASK(layer_iter->type) & layer_mask) ||
(layer_iter->flag & CD_FLAG_TEMPORARY)) {

View File

@ -6191,6 +6191,40 @@ bool BKE_constraint_is_nonlocal_in_liboverride(const Object *ob, const bConstrai
/* -------- Target-Matrix Stuff ------- */
int BKE_constraint_targets_get(struct bConstraint *con, struct ListBase *r_targets)
{
BLI_listbase_clear(r_targets);
const bConstraintTypeInfo *cti = BKE_constraint_typeinfo_get(con);
if (!cti) {
return 0;
}
int count = 0;
/* Constraint-specific targets. */
if (cti->get_constraint_targets) {
count = cti->get_constraint_targets(con, r_targets);
}
return count;
}
void BKE_constraint_targets_flush(struct bConstraint *con, struct ListBase *targets, bool no_copy)
{
const bConstraintTypeInfo *cti = BKE_constraint_typeinfo_get(con);
if (!cti) {
return;
}
/* Release the constraint-specific targets. */
if (cti->flush_constraint_targets) {
cti->flush_constraint_targets(con, targets, no_copy);
}
}
void BKE_constraint_target_matrix_get(struct Depsgraph *depsgraph,
Scene *scene,
bConstraint *con,

View File

@ -44,7 +44,6 @@
#include "BKE_lib_query.h"
#include "BKE_main.h"
#include "BKE_object.h"
#include "BKE_spline.hh"
#include "BKE_vfont.h"
#include "DEG_depsgraph.h"

View File

@ -89,6 +89,10 @@ static void curves_copy_data(Main *UNUSED(bmain), ID *id_dst, const ID *id_src,
dst.curve_offsets = static_cast<int *>(MEM_dupallocN(src.curve_offsets));
if (curves_src->surface_uv_map != nullptr) {
curves_dst->surface_uv_map = BLI_strdup(curves_src->surface_uv_map);
}
dst.runtime = MEM_new<bke::CurvesGeometryRuntime>(__func__);
dst.runtime->type_counts = src.runtime->type_counts;
@ -108,6 +112,7 @@ static void curves_free_data(ID *id)
BKE_curves_batch_cache_free(curves);
MEM_SAFE_FREE(curves->mat);
MEM_SAFE_FREE(curves->surface_uv_map);
}
static void curves_foreach_id(ID *id, LibraryForeachIDData *data)
@ -148,6 +153,8 @@ static void curves_blend_write(BlendWriter *writer, ID *id, const void *id_addre
BLO_write_int32_array(writer, curves->geometry.curve_num + 1, curves->geometry.curve_offsets);
BLO_write_string(writer, curves->surface_uv_map);
BLO_write_pointer_array(writer, curves->totcol, curves->mat);
if (curves->adt) {
BKE_animdata_blend_write(writer, curves->adt);
@ -167,6 +174,8 @@ static void curves_blend_read_data(BlendDataReader *reader, ID *id)
BLO_read_int32_array(reader, curves->geometry.curve_num + 1, &curves->geometry.curve_offsets);
BLO_read_data_address(reader, &curves->surface_uv_map);
curves->geometry.runtime = MEM_new<blender::bke::CurvesGeometryRuntime>(__func__);
/* Recalculate curve type count cache that isn't saved in files. */
@ -199,8 +208,8 @@ IDTypeInfo IDType_ID_CV = {
/*id_filter */ FILTER_ID_CV,
/*main_listbase_index */ INDEX_ID_CV,
/*struct_size */ sizeof(Curves),
/*name */ "Hair Curves",
/*name_plural */ "Hair Curves",
/*name */ "Curves",
/*name_plural */ "curves",
/*translation_context */ BLT_I18NCONTEXT_ID_CURVES,
/*flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE,
/*asset_type_info */ nullptr,
@ -264,9 +273,9 @@ BoundBox *BKE_curves_boundbox_get(Object *ob)
return ob->runtime.bb;
}
bool BKE_curves_customdata_required(Curves *UNUSED(curves), CustomDataLayer *layer)
bool BKE_curves_customdata_required(const Curves *UNUSED(curves), const char *name)
{
return layer->type == CD_PROP_FLOAT3 && STREQ(layer->name, ATTR_POSITION);
return STREQ(name, ATTR_POSITION);
}
Curves *BKE_curves_copy_for_eval(Curves *curves_src, bool reference)

View File

@ -18,6 +18,7 @@
#include "BKE_attribute_math.hh"
#include "BKE_curves.hh"
#include "BKE_curves_utils.hh"
namespace blender::bke {
@ -35,10 +36,9 @@ static const std::string ATTR_HANDLE_POSITION_RIGHT = "handle_right";
static const std::string ATTR_NURBS_ORDER = "nurbs_order";
static const std::string ATTR_NURBS_WEIGHT = "nurbs_weight";
static const std::string ATTR_NURBS_KNOTS_MODE = "knots_mode";
static const std::string ATTR_SURFACE_TRIANGLE_INDEX = "surface_triangle_index";
static const std::string ATTR_SURFACE_TRIANGLE_COORDINATE = "surface_triangle_coordinate";
static const std::string ATTR_SELECTION_POINT_FLOAT = ".selection_point_float";
static const std::string ATTR_SELECTION_CURVE_FLOAT = ".selection_curve_float";
static const std::string ATTR_SURFACE_UV_COORDINATE = "surface_uv_coordinate";
/* -------------------------------------------------------------------- */
/** \name Constructors/Destructor
@ -419,24 +419,14 @@ MutableSpan<int8_t> CurvesGeometry::nurbs_knots_modes_for_write()
return get_mutable_attribute<int8_t>(*this, ATTR_DOMAIN_CURVE, ATTR_NURBS_KNOTS_MODE, 0);
}
VArray<int> CurvesGeometry::surface_triangle_indices() const
Span<float2> CurvesGeometry::surface_uv_coords() const
{
return get_varray_attribute<int>(*this, ATTR_DOMAIN_CURVE, ATTR_SURFACE_TRIANGLE_INDEX, -1);
return get_span_attribute<float2>(*this, ATTR_DOMAIN_CURVE, ATTR_SURFACE_UV_COORDINATE);
}
MutableSpan<int> CurvesGeometry::surface_triangle_indices_for_write()
MutableSpan<float2> CurvesGeometry::surface_uv_coords_for_write()
{
return get_mutable_attribute<int>(*this, ATTR_DOMAIN_CURVE, ATTR_SURFACE_TRIANGLE_INDEX, -1);
}
Span<float2> CurvesGeometry::surface_triangle_coords() const
{
return get_span_attribute<float2>(*this, ATTR_DOMAIN_CURVE, ATTR_SURFACE_TRIANGLE_COORDINATE);
}
MutableSpan<float2> CurvesGeometry::surface_triangle_coords_for_write()
{
return get_mutable_attribute<float2>(*this, ATTR_DOMAIN_CURVE, ATTR_SURFACE_TRIANGLE_COORDINATE);
return get_mutable_attribute<float2>(*this, ATTR_DOMAIN_CURVE, ATTR_SURFACE_UV_COORDINATE);
}
VArray<float> CurvesGeometry::selection_point_float() const
@ -561,16 +551,8 @@ IndexMask CurvesGeometry::indices_for_curve_type(const CurveType type,
const IndexMask selection,
Vector<int64_t> &r_indices) const
{
if (this->curve_type_counts()[type] == this->curves_num()) {
return selection;
}
const VArray<int8_t> types = this->curve_types();
if (types.is_single()) {
return types.get_internal_single() == type ? IndexMask(this->curves_num()) : IndexMask(0);
}
Span<int8_t> types_span = types.get_internal_span();
return index_mask_ops::find_indices_based_on_predicate(
selection, 1024, r_indices, [&](const int index) { return types_span[index] == type; });
return curves::indices_for_type(
this->curve_types(), this->curve_type_counts(), type, selection, r_indices);
}
void CurvesGeometry::ensure_nurbs_basis_cache() const
@ -1118,6 +1100,165 @@ static void *ensure_customdata_layer(CustomData &custom_data,
&custom_data, data_type, CD_DEFAULT, nullptr, tot_elements, name.c_str());
}
static void copy_between_buffers(const CPPType &type,
const void *src_buffer,
void *dst_buffer,
const IndexRange src_range,
const IndexRange dst_range)
{
BLI_assert(src_range.size() == dst_range.size());
type.copy_construct_n(POINTER_OFFSET(src_buffer, type.size() * src_range.start()),
POINTER_OFFSET(dst_buffer, type.size() * dst_range.start()),
src_range.size());
}
template<typename T>
static void copy_with_map(const Span<T> src, const Span<int> map, MutableSpan<T> dst)
{
threading::parallel_for(map.index_range(), 1024, [&](const IndexRange range) {
for (const int i : range) {
dst[i] = src[map[i]];
}
});
}
static void copy_with_map(const GSpan src, const Span<int> map, GMutableSpan dst)
{
attribute_math::convert_to_static_type(src.type(), [&](auto dummy) {
using T = decltype(dummy);
copy_with_map(src.typed<T>(), map, dst.typed<T>());
});
}
/**
* Builds an array that for every point, contains the corresponding curve index.
*/
static Array<int> build_point_to_curve_map(const CurvesGeometry &curves)
{
Array<int> point_to_curve_map(curves.points_num());
threading::parallel_for(curves.curves_range(), 1024, [&](const IndexRange curves_range) {
for (const int i_curve : curves_range) {
point_to_curve_map.as_mutable_span().slice(curves.points_for_curve(i_curve)).fill(i_curve);
}
});
return point_to_curve_map;
}
static CurvesGeometry copy_with_removed_points(const CurvesGeometry &curves,
const IndexMask points_to_delete)
{
/* Use a map from points to curves to facilitate using an #IndexMask input. */
const Array<int> point_to_curve_map = build_point_to_curve_map(curves);
const Vector<IndexRange> copy_point_ranges = points_to_delete.extract_ranges_invert(
curves.points_range());
/* For every range of points to copy, find the offset in the result curves point layers. */
int new_point_count = 0;
Array<int> copy_point_range_dst_offsets(copy_point_ranges.size());
for (const int i : copy_point_ranges.index_range()) {
copy_point_range_dst_offsets[i] = new_point_count;
new_point_count += copy_point_ranges[i].size();
}
BLI_assert(new_point_count == (curves.points_num() - points_to_delete.size()));
/* Find out how many non-deleted points there are in every curve. */
Array<int> curve_point_counts(curves.curves_num(), 0);
for (const IndexRange range : copy_point_ranges) {
for (const int point_i : range) {
curve_point_counts[point_to_curve_map[point_i]]++;
}
}
/* Build the offsets for the new curve points, skipping curves that had all points deleted.
* Also store the original indices of the corresponding input curves, to facilitate parallel
* copying of curve domain data. */
int new_curve_count = 0;
int curve_point_offset = 0;
Vector<int> new_curve_offsets;
Vector<int> new_curve_orig_indices;
new_curve_offsets.append(0);
for (const int i : curve_point_counts.index_range()) {
if (curve_point_counts[i] > 0) {
curve_point_offset += curve_point_counts[i];
new_curve_offsets.append(curve_point_offset);
new_curve_count++;
new_curve_orig_indices.append(i);
}
}
CurvesGeometry new_curves{new_point_count, new_curve_count};
threading::parallel_invoke(
/* Initialize curve offsets. */
[&]() { new_curves.offsets_for_write().copy_from(new_curve_offsets); },
/* Copy over point attributes. */
[&]() {
const CustomData &old_point_data = curves.point_data;
CustomData &new_point_data = new_curves.point_data;
for (const int layer_i : IndexRange(old_point_data.totlayer)) {
const CustomDataLayer &old_layer = old_point_data.layers[layer_i];
const eCustomDataType data_type = static_cast<eCustomDataType>(old_layer.type);
const CPPType &type = *bke::custom_data_type_to_cpp_type(data_type);
void *dst_buffer = ensure_customdata_layer(
new_point_data, old_layer.name, data_type, new_point_count);
threading::parallel_for(
copy_point_ranges.index_range(), 128, [&](const IndexRange ranges_range) {
for (const int range_i : ranges_range) {
const IndexRange src_range = copy_point_ranges[range_i];
copy_between_buffers(type,
old_layer.data,
dst_buffer,
src_range,
{copy_point_range_dst_offsets[range_i], src_range.size()});
}
});
}
},
/* Copy over curve attributes.
* In some cases points are just dissolved, so the the number of
* curves will be the same. That could be optimized in the future. */
[&]() {
const CustomData &old_curve_data = curves.curve_data;
CustomData &new_curve_data = new_curves.curve_data;
for (const int layer_i : IndexRange(old_curve_data.totlayer)) {
const CustomDataLayer &old_layer = old_curve_data.layers[layer_i];
const eCustomDataType data_type = static_cast<eCustomDataType>(old_layer.type);
const CPPType &type = *bke::custom_data_type_to_cpp_type(data_type);
void *dst_buffer = ensure_customdata_layer(
new_curve_data, old_layer.name, data_type, new_curve_count);
if (new_curves.curves_num() == curves.curves_num()) {
type.copy_construct_n(old_layer.data, dst_buffer, new_curves.curves_num());
}
else {
copy_with_map({type, old_layer.data, curves.curves_num()},
new_curve_orig_indices,
{type, dst_buffer, new_curves.curves_num()});
}
}
});
new_curves.update_curve_types();
return new_curves;
}
void CurvesGeometry::remove_points(const IndexMask points_to_delete)
{
if (points_to_delete.is_empty()) {
return;
}
if (points_to_delete.size() == this->points_num()) {
*this = {};
}
*this = copy_with_removed_points(*this, points_to_delete);
}
static CurvesGeometry copy_with_removed_curves(const CurvesGeometry &curves,
const IndexMask curves_to_delete)
{
@ -1177,20 +1318,17 @@ static CurvesGeometry copy_with_removed_curves(const CurvesGeometry &curves,
const eCustomDataType data_type = static_cast<eCustomDataType>(old_layer.type);
const CPPType &type = *bke::custom_data_type_to_cpp_type(data_type);
const void *src_buffer = old_layer.data;
void *dst_buffer = ensure_customdata_layer(
new_point_data, old_layer.name, data_type, new_tot_points);
threading::parallel_for(
old_curve_ranges.index_range(), 128, [&](const IndexRange ranges_range) {
for (const int range_i : ranges_range) {
const IndexRange old_point_range = old_point_ranges[range_i];
const IndexRange new_point_range = new_point_ranges[range_i];
type.copy_construct_n(
POINTER_OFFSET(src_buffer, type.size() * old_point_range.start()),
POINTER_OFFSET(dst_buffer, type.size() * new_point_range.start()),
old_point_range.size());
copy_between_buffers(type,
old_layer.data,
dst_buffer,
old_point_ranges[range_i],
new_point_ranges[range_i]);
}
});
}
@ -1204,20 +1342,17 @@ static CurvesGeometry copy_with_removed_curves(const CurvesGeometry &curves,
const eCustomDataType data_type = static_cast<eCustomDataType>(old_layer.type);
const CPPType &type = *bke::custom_data_type_to_cpp_type(data_type);
const void *src_buffer = old_layer.data;
void *dst_buffer = ensure_customdata_layer(
new_curve_data, old_layer.name, data_type, new_tot_points);
new_curve_data, old_layer.name, data_type, new_tot_curves);
threading::parallel_for(
old_curve_ranges.index_range(), 128, [&](const IndexRange ranges_range) {
for (const int range_i : ranges_range) {
const IndexRange old_curve_range = old_curve_ranges[range_i];
const IndexRange new_curve_range = new_curve_ranges[range_i];
type.copy_construct_n(
POINTER_OFFSET(src_buffer, type.size() * old_curve_range.start()),
POINTER_OFFSET(dst_buffer, type.size() * new_curve_range.start()),
old_curve_range.size());
copy_between_buffers(type,
old_layer.data,
dst_buffer,
old_curve_ranges[range_i],
new_curve_ranges[range_i]);
}
});
}
@ -1230,6 +1365,12 @@ static CurvesGeometry copy_with_removed_curves(const CurvesGeometry &curves,
void CurvesGeometry::remove_curves(const IndexMask curves_to_delete)
{
if (curves_to_delete.is_empty()) {
return;
}
if (curves_to_delete.size() == this->curves_num()) {
*this = {};
}
*this = copy_with_removed_curves(*this, curves_to_delete);
}
@ -1333,6 +1474,27 @@ void CurvesGeometry::reverse_curves(const IndexMask curves_to_reverse)
this->tag_topology_changed();
}
void CurvesGeometry::remove_attributes_based_on_types()
{
const int points_num = this->points_num();
const int curves_num = this->curves_num();
if (!this->has_curve_with_type(CURVE_TYPE_BEZIER)) {
CustomData_free_layer_named(&this->point_data, ATTR_HANDLE_TYPE_LEFT.c_str(), points_num);
CustomData_free_layer_named(&this->point_data, ATTR_HANDLE_TYPE_RIGHT.c_str(), points_num);
CustomData_free_layer_named(&this->point_data, ATTR_HANDLE_POSITION_LEFT.c_str(), points_num);
CustomData_free_layer_named(&this->point_data, ATTR_HANDLE_POSITION_RIGHT.c_str(), points_num);
}
if (!this->has_curve_with_type(CURVE_TYPE_NURBS)) {
CustomData_free_layer_named(&this->point_data, ATTR_NURBS_WEIGHT.c_str(), points_num);
CustomData_free_layer_named(&this->curve_data, ATTR_NURBS_ORDER.c_str(), curves_num);
CustomData_free_layer_named(&this->curve_data, ATTR_NURBS_KNOTS_MODE.c_str(), curves_num);
}
if (!this->has_curve_with_type({CURVE_TYPE_BEZIER, CURVE_TYPE_CATMULL_ROM, CURVE_TYPE_NURBS})) {
CustomData_free_layer_named(&this->curve_data, ATTR_RESOLUTION.c_str(), curves_num);
}
this->update_customdata_pointers();
}
/** \} */
/* -------------------------------------------------------------------- */

View File

@ -4,6 +4,8 @@
* \ingroup bke
*/
#include "BLI_index_mask_ops.hh"
#include "BKE_curves_utils.hh"
namespace blender::bke::curves {
@ -35,4 +37,86 @@ void accumulate_counts_to_offsets(MutableSpan<int> counts_to_offsets, const int
counts_to_offsets.last() = offset;
}
void copy_point_data(const CurvesGeometry &src_curves,
const CurvesGeometry &dst_curves,
const Span<IndexRange> curve_ranges,
const GSpan src,
GMutableSpan dst)
{
threading::parallel_for(curve_ranges.index_range(), 512, [&](IndexRange range) {
for (const IndexRange range : curve_ranges.slice(range)) {
const IndexRange src_points = src_curves.points_for_curves(range);
const IndexRange dst_points = dst_curves.points_for_curves(range);
/* The arrays might be large, so a threaded copy might make sense here too. */
dst.slice(dst_points).copy_from(src.slice(src_points));
}
});
}
void copy_point_data(const CurvesGeometry &src_curves,
const CurvesGeometry &dst_curves,
const IndexMask src_curve_selection,
const GSpan src,
GMutableSpan dst)
{
threading::parallel_for(src_curve_selection.index_range(), 512, [&](IndexRange range) {
for (const int i : src_curve_selection.slice(range)) {
const IndexRange src_points = src_curves.points_for_curve(i);
const IndexRange dst_points = dst_curves.points_for_curve(i);
/* The arrays might be large, so a threaded copy might make sense here too. */
dst.slice(dst_points).copy_from(src.slice(src_points));
}
});
}
void fill_points(const CurvesGeometry &curves,
const IndexMask curve_selection,
const GPointer value,
GMutableSpan dst)
{
BLI_assert(*value.type() == dst.type());
const CPPType &type = dst.type();
threading::parallel_for(curve_selection.index_range(), 512, [&](IndexRange range) {
for (const int i : curve_selection.slice(range)) {
const IndexRange points = curves.points_for_curve(i);
type.fill_assign_n(value.get(), dst.slice(curves.points_for_curve(i)).data(), points.size());
}
});
}
IndexMask indices_for_type(const VArray<int8_t> &types,
const std::array<int, CURVE_TYPES_NUM> &type_counts,
const CurveType type,
const IndexMask selection,
Vector<int64_t> &r_indices)
{
if (type_counts[type] == types.size()) {
return selection;
}
if (types.is_single()) {
return types.get_internal_single() == type ? IndexMask(types.size()) : IndexMask(0);
}
Span<int8_t> types_span = types.get_internal_span();
return index_mask_ops::find_indices_based_on_predicate(
selection, 4096, r_indices, [&](const int index) { return types_span[index] == type; });
}
void foreach_curve_by_type(const VArray<int8_t> &types,
const std::array<int, CURVE_TYPES_NUM> &counts,
const IndexMask selection,
FunctionRef<void(IndexMask)> catmull_rom_fn,
FunctionRef<void(IndexMask)> poly_fn,
FunctionRef<void(IndexMask)> bezier_fn,
FunctionRef<void(IndexMask)> nurbs_fn)
{
Vector<int64_t> catmull_rom;
Vector<int64_t> poly;
Vector<int64_t> bezier;
Vector<int64_t> nurbs;
catmull_rom_fn(indices_for_type(types, counts, CURVE_TYPE_CATMULL_ROM, selection, catmull_rom));
poly_fn(indices_for_type(types, counts, CURVE_TYPE_POLY, selection, poly));
bezier_fn(indices_for_type(types, counts, CURVE_TYPE_BEZIER, selection, bezier));
nurbs_fn(indices_for_type(types, counts, CURVE_TYPE_NURBS, selection, nurbs));
}
} // namespace blender::bke::curves

View File

@ -22,6 +22,7 @@
#include "BLI_color.hh"
#include "BLI_compiler_attrs.h"
#include "BLI_endian_switch.h"
#include "BLI_index_range.hh"
#include "BLI_math.h"
#include "BLI_math_color_blend.h"
#include "BLI_math_vector.hh"
@ -29,6 +30,7 @@
#include "BLI_path_util.h"
#include "BLI_span.hh"
#include "BLI_string.h"
#include "BLI_string_ref.hh"
#include "BLI_string_utils.h"
#include "BLI_utildefines.h"
@ -59,6 +61,7 @@
using blender::IndexRange;
using blender::Span;
using blender::StringRef;
using blender::Vector;
/* number of layers to add when growing a CustomData object */
@ -291,8 +294,8 @@ static void layerInterp_mdeformvert(const void **sources,
};
MDeformVert *dvert = static_cast<MDeformVert *>(dest);
struct MDeformWeight_Link *dest_dwlink = nullptr;
struct MDeformWeight_Link *node;
MDeformWeight_Link *dest_dwlink = nullptr;
MDeformWeight_Link *node;
/* build a list of unique def_nrs for dest */
int totweight = 0;
@ -319,7 +322,7 @@ static void layerInterp_mdeformvert(const void **sources,
/* if this def_nr is not in the list, add it */
if (!node) {
struct MDeformWeight_Link *tmp_dwlink = static_cast<MDeformWeight_Link *>(
MDeformWeight_Link *tmp_dwlink = static_cast<MDeformWeight_Link *>(
alloca(sizeof(*tmp_dwlink)));
tmp_dwlink->dw.def_nr = dw->def_nr;
tmp_dwlink->dw.weight = weight;
@ -2515,8 +2518,8 @@ void CustomData_copy_all_layout(const struct CustomData *source, struct CustomDa
CustomData_regen_active_refs(dest);
}
bool CustomData_merge(const struct CustomData *source,
struct CustomData *dest,
bool CustomData_merge(const CustomData *source,
CustomData *dest,
eCustomDataMask mask,
eCDAllocType alloctype,
int totelem)
@ -2623,8 +2626,8 @@ void CustomData_realloc(CustomData *data, int totelem)
}
}
void CustomData_copy(const struct CustomData *source,
struct CustomData *dest,
void CustomData_copy(const CustomData *source,
CustomData *dest,
eCustomDataMask mask,
eCDAllocType alloctype,
int totelem)
@ -2687,7 +2690,7 @@ void CustomData_free(CustomData *data, int totelem)
CustomData_reset(data);
}
void CustomData_free_typemask(struct CustomData *data, int totelem, eCustomDataMask mask)
void CustomData_free_typemask(CustomData *data, int totelem, eCustomDataMask mask)
{
for (int i = 0; i < data->totlayer; i++) {
CustomDataLayer *layer = &data->layers[i];
@ -2823,7 +2826,7 @@ int CustomData_get_layer_index(const CustomData *data, int type)
return data->typemap[type];
}
int CustomData_get_layer_index_n(const struct CustomData *data, int type, int n)
int CustomData_get_layer_index_n(const CustomData *data, int type, int n)
{
BLI_assert(n >= 0);
int i = CustomData_get_layer_index(data, type);
@ -2880,7 +2883,7 @@ int CustomData_get_stencil_layer_index(const CustomData *data, int type)
/* -------------------------------------------------------------------- */
/* index values per layer type */
int CustomData_get_named_layer(const struct CustomData *data, int type, const char *name)
int CustomData_get_named_layer(const CustomData *data, int type, const char *name)
{
const int named_index = CustomData_get_named_layer_index(data, type, name);
const int layer_index = data->typemap[type];
@ -2916,7 +2919,7 @@ int CustomData_get_stencil_layer(const CustomData *data, int type)
return (layer_index != -1) ? data->layers[layer_index].active_mask : -1;
}
const char *CustomData_get_active_layer_name(const struct CustomData *data, const int type)
const char *CustomData_get_active_layer_name(const CustomData *data, const int type)
{
/* Get the layer index of the active layer of this type. */
const int layer_index = CustomData_get_active_layer_index(data, type);
@ -3007,7 +3010,7 @@ void CustomData_set_layer_stencil_index(CustomData *data, int type, int n)
}
}
void CustomData_set_layer_flag(struct CustomData *data, int type, int flag)
void CustomData_set_layer_flag(CustomData *data, int type, int flag)
{
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
@ -3016,7 +3019,7 @@ void CustomData_set_layer_flag(struct CustomData *data, int type, int flag)
}
}
void CustomData_clear_layer_flag(struct CustomData *data, int type, int flag)
void CustomData_clear_layer_flag(CustomData *data, int type, int flag)
{
const int nflag = ~flag;
@ -3191,7 +3194,7 @@ void *CustomData_add_layer_named(CustomData *data,
return nullptr;
}
void *CustomData_add_layer_anonymous(struct CustomData *data,
void *CustomData_add_layer_anonymous(CustomData *data,
int type,
eCDAllocType alloctype,
void *layerdata,
@ -3264,6 +3267,18 @@ bool CustomData_free_layer(CustomData *data, int type, int totelem, int index)
return true;
}
bool CustomData_free_layer_named(CustomData *data, const char *name, const int totelem)
{
for (const int i : IndexRange(data->totlayer)) {
const CustomDataLayer &layer = data->layers[i];
if (StringRef(layer.name) == name) {
CustomData_free_layer(data, layer.type, totelem, i);
return true;
}
}
return false;
}
bool CustomData_free_layer_active(CustomData *data, int type, int totelem)
{
const int index = CustomData_get_active_layer_index(data, type);
@ -3402,7 +3417,7 @@ void CustomData_duplicate_referenced_layers(CustomData *data, int totelem)
}
}
bool CustomData_is_referenced_layer(struct CustomData *data, int type)
bool CustomData_is_referenced_layer(CustomData *data, int type)
{
/* get the layer index of the first layer of type */
int layer_index = CustomData_get_active_layer_index(data, type);
@ -3472,7 +3487,7 @@ void CustomData_free_temporary(CustomData *data, int totelem)
}
}
void CustomData_set_only_copy(const struct CustomData *data, eCustomDataMask mask)
void CustomData_set_only_copy(const CustomData *data, eCustomDataMask mask)
{
for (int i = 0; i < data->totlayer; i++) {
if (!(mask & CD_TYPE_AS_MASK(data->layers[i].type))) {
@ -3727,7 +3742,7 @@ void CustomData_interp(const CustomData *source,
}
}
void CustomData_swap_corners(struct CustomData *data, int index, const int *corner_indices)
void CustomData_swap_corners(CustomData *data, int index, const int *corner_indices)
{
for (int i = 0; i < data->totlayer; i++) {
const LayerTypeInfo *typeInfo = layerType_getInfo(data->layers[i].type);
@ -3740,7 +3755,7 @@ void CustomData_swap_corners(struct CustomData *data, int index, const int *corn
}
}
void CustomData_swap(struct CustomData *data, const int index_a, const int index_b)
void CustomData_swap(CustomData *data, const int index_a, const int index_b)
{
char buff_static[256];
@ -3819,7 +3834,7 @@ void *CustomData_get_layer_n(const CustomData *data, int type, int n)
return data->layers[layer_index].data;
}
void *CustomData_get_layer_named(const struct CustomData *data, int type, const char *name)
void *CustomData_get_layer_named(const CustomData *data, int type, const char *name)
{
int layer_index = CustomData_get_named_layer_index(data, type, name);
if (layer_index == -1) {
@ -3893,7 +3908,7 @@ void *CustomData_set_layer(const CustomData *data, int type, void *ptr)
return ptr;
}
void *CustomData_set_layer_n(const struct CustomData *data, int type, int n, void *ptr)
void *CustomData_set_layer_n(const CustomData *data, int type, int n, void *ptr)
{
/* get the layer index of the first layer of type */
int layer_index = CustomData_get_layer_index_n(data, type, n);
@ -4593,7 +4608,7 @@ void *CustomData_bmesh_get_layer_n(const CustomData *data, void *block, int n)
return POINTER_OFFSET(block, data->layers[n].offset);
}
bool CustomData_layer_has_math(const struct CustomData *data, int layer_n)
bool CustomData_layer_has_math(const CustomData *data, int layer_n)
{
const LayerTypeInfo *typeInfo = layerType_getInfo(data->layers[layer_n].type);
@ -4605,7 +4620,7 @@ bool CustomData_layer_has_math(const struct CustomData *data, int layer_n)
return false;
}
bool CustomData_layer_has_interp(const struct CustomData *data, int layer_n)
bool CustomData_layer_has_interp(const CustomData *data, int layer_n)
{
const LayerTypeInfo *typeInfo = layerType_getInfo(data->layers[layer_n].type);
@ -4616,7 +4631,7 @@ bool CustomData_layer_has_interp(const struct CustomData *data, int layer_n)
return false;
}
bool CustomData_has_math(const struct CustomData *data)
bool CustomData_has_math(const CustomData *data)
{
/* interpolates a layer at a time */
for (int i = 0; i < data->totlayer; i++) {
@ -4628,7 +4643,7 @@ bool CustomData_has_math(const struct CustomData *data)
return false;
}
bool CustomData_bmesh_has_free(const struct CustomData *data)
bool CustomData_bmesh_has_free(const CustomData *data)
{
for (int i = 0; i < data->totlayer; i++) {
if (!(data->layers[i].flag & CD_FLAG_NOFREE)) {
@ -4641,7 +4656,7 @@ bool CustomData_bmesh_has_free(const struct CustomData *data)
return false;
}
bool CustomData_has_interp(const struct CustomData *data)
bool CustomData_has_interp(const CustomData *data)
{
/* interpolates a layer at a time */
for (int i = 0; i < data->totlayer; i++) {
@ -4653,7 +4668,7 @@ bool CustomData_has_interp(const struct CustomData *data)
return false;
}
bool CustomData_has_referenced(const struct CustomData *data)
bool CustomData_has_referenced(const CustomData *data)
{
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].flag & CD_FLAG_NOFREE) {
@ -5163,7 +5178,7 @@ void CustomData_validate_layer_name(const CustomData *data,
}
}
bool CustomData_verify_versions(struct CustomData *data, int index)
bool CustomData_verify_versions(CustomData *data, int index)
{
const LayerTypeInfo *typeInfo;
CustomDataLayer *layer = &data->layers[index];

View File

@ -2024,13 +2024,13 @@ static Mesh *dynamicPaint_Modifier_apply(DynamicPaintModifierData *pmd, Object *
settings.use_threading = (sData->total_points > 1000);
BLI_task_parallel_range(
0, sData->total_points, &data, dynamic_paint_apply_surface_wave_cb, &settings);
BKE_mesh_normals_tag_dirty(mesh);
BKE_mesh_normals_tag_dirty(result);
}
/* displace */
if (surface->type == MOD_DPAINT_SURFACE_T_DISPLACE) {
dynamicPaint_applySurfaceDisplace(surface, result);
BKE_mesh_normals_tag_dirty(mesh);
BKE_mesh_normals_tag_dirty(result);
}
}
}

View File

@ -256,12 +256,11 @@ FCurve *BKE_fcurve_find(ListBase *list, const char rna_path[], const int array_i
/* Check paths of curves, then array indices... */
for (fcu = list->first; fcu; fcu = fcu->next) {
/* Check indices first, much cheaper than a string comparison. */
/* Simple string-compare (this assumes that they have the same root...) */
if (fcu->rna_path && STREQ(fcu->rna_path, rna_path)) {
/* Now check indices. */
if (fcu->array_index == array_index) {
return fcu;
}
if (UNLIKELY(fcu->array_index == array_index && fcu->rna_path &&
fcu->rna_path[0] == rna_path[0] && STREQ(fcu->rna_path, rna_path))) {
return fcu;
}
}
@ -1025,9 +1024,8 @@ static void UNUSED_FUNCTION(bezt_add_to_cfra_elem)(ListBase *lb, BezTriple *bezt
* \{ */
/* Some utilities for working with FPoints (i.e. 'sampled' animation curve data, such as
* data imported from BVH/Mocap files), which are specialized for use with high density datasets,
* which BezTriples/Keyframe data are ill equipped to do.
*/
* data imported from BVH/motion-capture files), which are specialized for use with high density
* datasets, which BezTriples/Keyframe data are ill equipped to do. */
float fcurve_samplingcb_evalcurve(FCurve *fcu, void *UNUSED(data), float evaltime)
{

View File

@ -12,7 +12,6 @@
#include "BKE_geometry_fields.hh"
#include "BKE_geometry_set.hh"
#include "BKE_lib_id.h"
#include "BKE_spline.hh"
#include "attribute_access_intern.hh"

View File

@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BLI_bounds.hh"
#include "BLI_map.hh"
#include "BLI_task.hh"
@ -15,7 +16,6 @@
#include "BKE_mesh_wrapper.h"
#include "BKE_modifier.h"
#include "BKE_pointcloud.h"
#include "BKE_spline.hh"
#include "BKE_volume.h"
#include "DNA_collection_types.h"
@ -175,6 +175,7 @@ Vector<const GeometryComponent *> GeometrySet::get_components_for_read() const
bool GeometrySet::compute_boundbox_without_instances(float3 *r_min, float3 *r_max) const
{
using namespace blender;
bool have_minmax = false;
if (const PointCloud *pointcloud = this->get_pointcloud_for_read()) {
have_minmax |= BKE_pointcloud_minmax(pointcloud, *r_min, *r_max);
@ -185,10 +186,16 @@ bool GeometrySet::compute_boundbox_without_instances(float3 *r_min, float3 *r_ma
if (const Volume *volume = this->get_volume_for_read()) {
have_minmax |= BKE_volume_min_max(volume, *r_min, *r_max);
}
if (const Curves *curves = this->get_curves_for_read()) {
std::unique_ptr<CurveEval> curve = curves_to_curve_eval(*curves);
if (const Curves *curves_id = this->get_curves_for_read()) {
const bke::CurvesGeometry &curves = bke::CurvesGeometry::wrap(curves_id->geometry);
/* Using the evaluated positions is somewhat arbitrary, but it is probably expected. */
have_minmax |= curve->bounds_min_max(*r_min, *r_max, true);
std::optional<bounds::MinMaxResult<float3>> min_max = bounds::min_max(
curves.evaluated_positions());
if (min_max) {
have_minmax = true;
*r_min = math::min(*r_min, min_max->min);
*r_max = math::max(*r_max, min_max->max);
}
}
return have_minmax;
}

View File

@ -168,7 +168,7 @@ void IDP_ResizeIDPArray(IDProperty *prop, int newlen)
/* NOTE: This code comes from python, here's the corresponding comment. */
/* This over-allocates proportional to the list size, making room
* for additional growth. The over-allocation is mild, but is
* for additional growth. The over-allocation is mild, but is
* enough to give linear-time amortized behavior over a long
* sequence of appends() in the presence of a poorly-performing
* system realloc().

View File

@ -3895,7 +3895,7 @@ static ImBuf *image_load_movie_file(Image *ima, ImageUser *iuser, int frame)
}
if (BKE_image_is_stereo(ima) && ima->views_format == R_IMF_VIEWS_STEREO_3D) {
IMB_ImBufFromStereo3d(ima->stereo3d_format, ibuf_arr[0], &ibuf_arr[0], &ibuf_arr[1]);
IMB_ImBufFromStereo3d(ima->stereo3d_format, ibuf_arr[0], ibuf_arr.data(), &ibuf_arr[1]);
}
for (int i = 0; i < totviews; i++) {
@ -4064,7 +4064,7 @@ static ImBuf *image_load_image_file(
/* multi-views/multi-layers OpenEXR files directly populate ima, and return null ibuf... */
if (BKE_image_is_stereo(ima) && ima->views_format == R_IMF_VIEWS_STEREO_3D && ibuf_arr[0] &&
tot_viewfiles == 1 && totviews >= 2) {
IMB_ImBufFromStereo3d(ima->stereo3d_format, ibuf_arr[0], &ibuf_arr[0], &ibuf_arr[1]);
IMB_ImBufFromStereo3d(ima->stereo3d_format, ibuf_arr[0], ibuf_arr.data(), &ibuf_arr[1]);
}
/* return the original requested ImBuf */

View File

@ -718,12 +718,31 @@ static void gpu_texture_update_from_ibuf(
int tex_offset = ibuf->channels * (y * ibuf->x + x);
const bool store_premultiplied = BKE_image_has_gpu_texture_premultiplied_alpha(ima, ibuf);
if (rect_float == nullptr) {
/* Byte pixels. */
if (!IMB_colormanagement_space_is_data(ibuf->rect_colorspace)) {
const bool compress_as_srgb = !IMB_colormanagement_space_is_scene_linear(
ibuf->rect_colorspace);
if (rect_float) {
/* Float image is already in scene linear colorspace or non-color data by
* convention, no colorspace conversion needed. But we do require 4 channels
* currently. */
if (ibuf->channels != 4 || scaled || !store_premultiplied) {
rect_float = (float *)MEM_mallocN(sizeof(float[4]) * w * h, __func__);
if (rect_float == nullptr) {
return;
}
tex_stride = w;
tex_offset = 0;
IMB_colormanagement_imbuf_to_float_texture(
rect_float, x, y, w, h, ibuf, store_premultiplied);
}
}
else {
/* Byte image is in original colorspace from the file, and may need conversion. */
if (IMB_colormanagement_space_is_data(ibuf->rect_colorspace) ||
IMB_colormanagement_space_is_scene_linear(ibuf->rect_colorspace)) {
/* Non-color data, just store buffer as is. */
}
else if (IMB_colormanagement_space_is_srgb(ibuf->rect_colorspace)) {
/* sRGB or scene linear, store as byte texture that the GPU can decode directly. */
rect = (uchar *)MEM_mallocN(sizeof(uchar[4]) * w * h, __func__);
if (rect == nullptr) {
return;
@ -734,13 +753,10 @@ static void gpu_texture_update_from_ibuf(
/* Convert to scene linear with sRGB compression, and premultiplied for
* correct texture interpolation. */
IMB_colormanagement_imbuf_to_byte_texture(
rect, x, y, w, h, ibuf, compress_as_srgb, store_premultiplied);
IMB_colormanagement_imbuf_to_byte_texture(rect, x, y, w, h, ibuf, store_premultiplied);
}
}
else {
/* Float pixels. */
if (ibuf->channels != 4 || scaled || !store_premultiplied) {
else {
/* Other colorspace, store as float texture to avoid precision loss. */
rect_float = (float *)MEM_mallocN(sizeof(float[4]) * w * h, __func__);
if (rect_float == nullptr) {
return;

View File

@ -274,6 +274,12 @@ static void image_save_post(ReportList *reports,
if (opts->do_newpath) {
BLI_strncpy(ibuf->name, filepath, sizeof(ibuf->name));
BLI_strncpy(ima->filepath, filepath, sizeof(ima->filepath));
/* only image path, never ibuf */
if (opts->relative) {
const char *relbase = ID_BLEND_PATH(opts->bmain, &ima->id);
BLI_path_rel(ima->filepath, relbase); /* only after saving */
}
}
ibuf->userflags &= ~IB_BITMAPDIRTY;
@ -303,12 +309,6 @@ static void image_save_post(ReportList *reports,
ima->type = IMA_TYPE_IMAGE;
}
/* only image path, never ibuf */
if (opts->relative) {
const char *relbase = ID_BLEND_PATH(opts->bmain, &ima->id);
BLI_path_rel(ima->filepath, relbase); /* only after saving */
}
/* Update image file color space when saving to another color space. */
const bool linear_float_output = BKE_imtype_requires_linear_float(opts->im_format.imtype);

View File

@ -21,8 +21,10 @@
#include "DEG_depsgraph.h"
#include "DEG_depsgraph_build.h"
#include "BKE_anim_data.h"
#include "BKE_armature.h"
#include "BKE_collection.h"
#include "BKE_fcurve.h"
#include "BKE_global.h"
#include "BKE_idtype.h"
#include "BKE_key.h"
@ -312,7 +314,6 @@ bool BKE_lib_override_library_is_user_edited(const ID *id)
bool BKE_lib_override_library_is_system_defined(const Main *bmain, const ID *id)
{
if (ID_IS_OVERRIDE_LIBRARY(id)) {
const ID *override_owner_id;
lib_override_get(bmain, id, &override_owner_id);
@ -322,6 +323,34 @@ bool BKE_lib_override_library_is_system_defined(const Main *bmain, const ID *id)
return false;
}
bool BKE_lib_override_library_property_is_animated(const ID *id,
const IDOverrideLibraryProperty *override_prop,
const PropertyRNA *override_rna_prop,
const int rnaprop_index)
{
AnimData *anim_data = BKE_animdata_from_id(id);
if (anim_data != nullptr) {
struct FCurve *fcurve;
char *index_token_start = const_cast<char *>(
RNA_path_array_index_token_find(override_prop->rna_path, override_rna_prop));
if (index_token_start != nullptr) {
const char index_token_start_backup = *index_token_start;
*index_token_start = '\0';
fcurve = BKE_animadata_fcurve_find_by_rna_path(
anim_data, override_prop->rna_path, rnaprop_index, nullptr, nullptr);
*index_token_start = index_token_start_backup;
}
else {
fcurve = BKE_animadata_fcurve_find_by_rna_path(
anim_data, override_prop->rna_path, 0, nullptr, nullptr);
}
if (fcurve != nullptr) {
return true;
}
}
return false;
}
static int foreachid_is_hierarchy_leaf_fn(LibraryIDLinkCallbackData *cb_data)
{
ID *id_owner = cb_data->id_owner;
@ -2423,7 +2452,7 @@ static int lib_override_sort_libraries_func(LibraryIDLinkCallbackData *cb_data)
if (id != nullptr && ID_IS_LINKED(id) && id->lib != id_owner->lib) {
const int owner_library_indirect_level = ID_IS_LINKED(id_owner) ? id_owner->lib->temp_index :
0;
if (owner_library_indirect_level > 200) {
if (owner_library_indirect_level > 100) {
CLOG_ERROR(&LOG,
"Levels of indirect usages of libraries is way too high, there are most likely "
"dependency loops, skipping further building loops (involves at least '%s' from "
@ -2434,6 +2463,16 @@ static int lib_override_sort_libraries_func(LibraryIDLinkCallbackData *cb_data)
id->lib->filepath);
return IDWALK_RET_NOP;
}
if (owner_library_indirect_level > 90) {
CLOG_WARN(
&LOG,
"Levels of indirect usages of libraries is suspiciously too high, there are most likely "
"dependency loops (involves at least '%s' from '%s' and '%s' from '%s')",
id_owner->name,
id_owner->lib->filepath,
id->name,
id->lib->filepath);
}
if (owner_library_indirect_level >= id->lib->temp_index) {
id->lib->temp_index = owner_library_indirect_level + 1;
@ -2667,11 +2706,12 @@ IDOverrideLibraryProperty *BKE_lib_override_library_property_get(IDOverrideLibra
bool BKE_lib_override_rna_property_find(PointerRNA *idpoin,
const IDOverrideLibraryProperty *library_prop,
PointerRNA *r_override_poin,
PropertyRNA **r_override_prop)
PropertyRNA **r_override_prop,
int *r_index)
{
BLI_assert(RNA_struct_is_ID(idpoin->type) && ID_IS_OVERRIDE_LIBRARY(idpoin->data));
return RNA_path_resolve_property(
idpoin, library_prop->rna_path, r_override_poin, r_override_prop);
return RNA_path_resolve_property_full(
idpoin, library_prop->rna_path, r_override_poin, r_override_prop, r_index);
}
void lib_override_library_property_copy(IDOverrideLibraryProperty *op_dst,

View File

@ -454,7 +454,7 @@ static void libblock_remap_reset_remapping_status_callback(ID *old_id,
* \param old_id: the data-block to dereference (may be NULL if \a id is non-NULL).
* \param new_id: the new data-block to replace \a old_id references with (may be NULL).
* \param r_id_remap_data: if non-NULL, the IDRemap struct to use
* (uselful to retrieve info about remapping process).
* (useful to retrieve info about remapping process).
*/
ATTR_NONNULL(1)
static void libblock_remap_data(Main *bmain,

View File

@ -55,7 +55,7 @@ static void linestyle_copy_data(Main *bmain, ID *id_dst, const ID *id_src, const
FreestyleLineStyle *linestyle_dst = (FreestyleLineStyle *)id_dst;
const FreestyleLineStyle *linestyle_src = (const FreestyleLineStyle *)id_src;
/* We never handle usercount here for own data. */
/* We never handle user-count here for own data. */
const int flag_subdata = flag | LIB_ID_CREATE_NO_USER_REFCOUNT;
/* We always need allocation of our private ID data. */
const int flag_private_id_data = flag & ~LIB_ID_CREATE_NO_ALLOCATE;

View File

@ -1448,7 +1448,7 @@ static bool fill_texpaint_slots_cb(bNode *node, void *userdata)
slot->attribute_name = storage->name;
if (storage->type == SHD_ATTRIBUTE_GEOMETRY) {
const Mesh *mesh = (const Mesh *)fill_data->ob->data;
CustomDataLayer *layer = BKE_id_attributes_color_find(&mesh->id, storage->name);
const CustomDataLayer *layer = BKE_id_attributes_color_find(&mesh->id, storage->name);
slot->valid = layer != NULL;
}

View File

@ -81,7 +81,7 @@ static void add_existing_edges_to_hash_maps(Mesh *mesh,
{
/* Assume existing edges are valid. */
threading::parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
const int task_index = &edge_map - &edge_maps[0];
const int task_index = &edge_map - edge_maps.data();
for (const MEdge &edge : Span(mesh->medge, mesh->totedge)) {
OrderedEdge ordered_edge{edge.v1, edge.v2};
/* Only add the edge when it belongs into this map. */
@ -98,7 +98,7 @@ static void add_polygon_edges_to_hash_maps(Mesh *mesh,
{
const Span<MLoop> loops{mesh->mloop, mesh->totloop};
threading::parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
const int task_index = &edge_map - &edge_maps[0];
const int task_index = &edge_map - edge_maps.data();
for (const MPoly &poly : Span(mesh->mpoly, mesh->totpoly)) {
Span<MLoop> poly_loops = loops.slice(poly.loopstart, poly.totloop);
const MLoop *prev_loop = &poly_loops.last();
@ -131,7 +131,7 @@ static void serialize_and_initialize_deduplicated_edges(MutableSpan<EdgeMap> edg
}
threading::parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
const int task_index = &edge_map - &edge_maps[0];
const int task_index = &edge_map - edge_maps.data();
int new_edge_index = edge_index_offsets[task_index];
for (EdgeMap::MutableItem item : edge_map.items()) {

View File

@ -41,7 +41,6 @@
#include "BKE_mesh_runtime.h"
#include "BKE_mesh_wrapper.h"
#include "BKE_modifier.h"
#include "BKE_spline.hh"
/* these 2 are only used by conversion functions */
#include "BKE_curve.h"
/* -- */

View File

@ -345,7 +345,7 @@ void BKE_mesh_calc_normals_poly_and_vertex(const MVert *mvert,
const float (*BKE_mesh_vertex_normals_ensure(const Mesh *mesh))[3]
{
if (!(BKE_mesh_vertex_normals_are_dirty(mesh) || BKE_mesh_poly_normals_are_dirty(mesh))) {
if (!BKE_mesh_vertex_normals_are_dirty(mesh)) {
BLI_assert(mesh->runtime.vert_normals != nullptr || mesh->totvert == 0);
return mesh->runtime.vert_normals;
}
@ -356,7 +356,7 @@ const float (*BKE_mesh_vertex_normals_ensure(const Mesh *mesh))[3]
ThreadMutex *normals_mutex = (ThreadMutex *)mesh->runtime.normals_mutex;
BLI_mutex_lock(normals_mutex);
if (!(BKE_mesh_vertex_normals_are_dirty(mesh) || BKE_mesh_poly_normals_are_dirty(mesh))) {
if (!BKE_mesh_vertex_normals_are_dirty(mesh)) {
BLI_assert(mesh->runtime.vert_normals != nullptr);
BLI_mutex_unlock(normals_mutex);
return mesh->runtime.vert_normals;

View File

@ -4763,6 +4763,8 @@ static void registerGeometryNodes()
register_node_type_geo_input_curve_tilt();
register_node_type_geo_input_id();
register_node_type_geo_input_index();
register_node_type_geo_input_instance_rotation();
register_node_type_geo_input_instance_scale();
register_node_type_geo_input_material_index();
register_node_type_geo_input_material();
register_node_type_geo_input_mesh_edge_angle();

View File

@ -2492,21 +2492,16 @@ static void copy_object_pose(Object *obn, const Object *ob, const int flag)
* BKE_library_remap stuff, but...
* the flush_constraint_targets callback am not sure about, so will delay that for now. */
LISTBASE_FOREACH (bConstraint *, con, &chan->constraints) {
const bConstraintTypeInfo *cti = BKE_constraint_typeinfo_get(con);
ListBase targets = {nullptr, nullptr};
if (cti && cti->get_constraint_targets) {
cti->get_constraint_targets(con, &targets);
if (BKE_constraint_targets_get(con, &targets)) {
LISTBASE_FOREACH (bConstraintTarget *, ct, &targets) {
if (ct->tar == ob) {
ct->tar = obn;
}
}
if (cti->flush_constraint_targets) {
cti->flush_constraint_targets(con, &targets, false);
}
BKE_constraint_targets_flush(con, &targets, false);
}
}
}
@ -5499,11 +5494,9 @@ bool BKE_object_modifier_update_subframe(Depsgraph *depsgraph,
/* also update constraint targets */
LISTBASE_FOREACH (bConstraint *, con, &ob->constraints) {
const bConstraintTypeInfo *cti = BKE_constraint_typeinfo_get(con);
ListBase targets = {nullptr, nullptr};
if (cti && cti->get_constraint_targets) {
cti->get_constraint_targets(con, &targets);
if (BKE_constraint_targets_get(con, &targets)) {
LISTBASE_FOREACH (bConstraintTarget *, ct, &targets) {
if (ct->tar) {
BKE_object_modifier_update_subframe(
@ -5511,9 +5504,7 @@ bool BKE_object_modifier_update_subframe(Depsgraph *depsgraph,
}
}
/* free temp targets */
if (cti->flush_constraint_targets) {
cti->flush_constraint_targets(con, &targets, false);
}
BKE_constraint_targets_flush(con, &targets, false);
}
}
}

View File

@ -2411,7 +2411,7 @@ void BKE_sculpt_ensure_orig_mesh_data(Scene *scene, Object *object)
/* Copy the current mesh visibility to the Face Sets. */
BKE_sculpt_face_sets_ensure_from_base_mesh_visibility(mesh);
if (object->sculpt != NULL) {
/* If a sculpt session is active, ensure we have its faceset data porperly up-to-date. */
/* If a sculpt session is active, ensure we have its face-set data properly up-to-date. */
object->sculpt->face_sets = CustomData_get_layer(&mesh->pdata, CD_SCULPT_FACE_SETS);
/* NOTE: In theory we could add that on the fly when required by sculpt code.
@ -2836,7 +2836,7 @@ void BKE_sculpt_bvh_update_from_ccg(PBVH *pbvh, SubdivCCG *subdiv_ccg)
subdiv_ccg->grid_hidden);
}
bool BKE_sculptsession_use_pbvh_draw(const Object *ob, const View3D *v3d)
bool BKE_sculptsession_use_pbvh_draw(const Object *ob, const View3D *UNUSED(v3d))
{
SculptSession *ss = ob->sculpt;
if (ss == NULL || ss->pbvh == NULL || ss->mode_type != OB_MODE_SCULPT) {
@ -2851,6 +2851,7 @@ bool BKE_sculptsession_use_pbvh_draw(const Object *ob, const View3D *v3d)
if (BKE_pbvh_type(ss->pbvh) == PBVH_FACES) {
/* Regular mesh only draws from PBVH without modifiers and shape keys. */
return !(ss->shapekey_active || ss->deform_modifiers_active);
}

View File

@ -2276,7 +2276,7 @@ void psys_emitter_customdata_mask(ParticleSystem *psys, CustomData_MeshMasks *r_
r_cddata_masks->fmask |= CD_MASK_MTFACE;
}
/* ask for vertexgroups if we need them */
/* Ask for vertex-groups if we need them. */
for (i = 0; i < PSYS_TOT_VG; i++) {
if (psys->vgroup[i]) {
r_cddata_masks->vmask |= CD_MASK_MDEFORMVERT;

Some files were not shown because too many files have changed in this diff Show More