Merge branch 'master' into sculpt-dev

This commit is contained in:
Joseph Eagar 2021-11-14 02:35:23 -08:00
commit e1cf0657d8
310 changed files with 3904 additions and 2168 deletions

View File

@ -440,7 +440,11 @@ mark_as_advanced(WITH_CYCLES_CUDA_BUILD_SERIAL)
mark_as_advanced(WITH_CUDA_DYNLOAD)
# AMD HIP
option(WITH_CYCLES_DEVICE_HIP "Enable Cycles AMD HIP support" OFF)
if(WIN32)
option(WITH_CYCLES_DEVICE_HIP "Enable Cycles AMD HIP support" ON)
else()
option(WITH_CYCLES_DEVICE_HIP "Enable Cycles AMD HIP support" OFF)
endif()
option(WITH_CYCLES_HIP_BINARIES "Build Cycles AMD HIP binaries" OFF)
set(CYCLES_HIP_BINARIES_ARCH gfx1010 gfx1011 gfx1012 gfx1030 gfx1031 gfx1032 gfx1034 CACHE STRING "AMD HIP architectures to build binaries for")
mark_as_advanced(WITH_CYCLES_DEVICE_HIP)

View File

@ -81,4 +81,5 @@ if(NOT APPLE)
set(WITH_CYCLES_DEVICE_OPTIX ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_CUDA_BINARIES ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_CUBIN_COMPILER OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES_HIP_BINARIES ON CACHE BOOL "" FORCE)
endif()

View File

@ -325,6 +325,13 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
default=1024,
)
sample_offset: IntProperty(
name="Sample Offset",
description="Number of samples to skip when starting render",
min=0, max=(1 << 24),
default=0,
)
time_limit: FloatProperty(
name="Time Limit",
description="Limit the render time (excluding synchronization time)."
@ -1419,10 +1426,9 @@ class CyclesPreferences(bpy.types.AddonPreferences):
col.label(text="and NVIDIA driver version 470 or newer", icon='BLANK1')
elif device_type == 'HIP':
import sys
col.label(text="Requires discrete AMD GPU with RDNA2 architecture", icon='BLANK1')
# TODO: provide driver version info.
#if sys.platform[:3] == "win":
# col.label(text="and AMD driver version ??? or newer", icon='BLANK1')
col.label(text="Requires discrete AMD GPU with RDNA architecture", icon='BLANK1')
if sys.platform[:3] == "win":
col.label(text="and AMD Radeon Pro 21.Q4 driver or newer", icon='BLANK1')
return
for device in devices:

View File

@ -290,6 +290,9 @@ class CYCLES_RENDER_PT_sampling_advanced(CyclesButtonsPanel, Panel):
col.active = not (cscene.use_adaptive_sampling and cscene.use_preview_adaptive_sampling)
col.prop(cscene, "sampling_pattern", text="Pattern")
col = layout.column(align=True)
col.prop(cscene, "sample_offset")
layout.separator()
col = layout.column(align=True)
@ -1051,7 +1054,7 @@ class CYCLES_OBJECT_PT_motion_blur(CyclesButtonsPanel, Panel):
def has_geometry_visibility(ob):
return ob and ((ob.type in {'MESH', 'CURVE', 'SURFACE', 'FONT', 'META', 'LIGHT'}) or
return ob and ((ob.type in {'MESH', 'CURVE', 'SURFACE', 'FONT', 'META', 'LIGHT', 'VOLUME', 'POINTCLOUD', 'HAIR'}) or
(ob.instance_type == 'COLLECTION' and ob.instance_collection))

View File

@ -62,15 +62,15 @@ bool BlenderSync::BKE_object_is_modified(BL::Object &b_ob)
return false;
}
bool BlenderSync::object_is_geometry(BL::Object &b_ob)
bool BlenderSync::object_is_geometry(BObjectInfo &b_ob_info)
{
BL::ID b_ob_data = b_ob.data();
BL::ID b_ob_data = b_ob_info.object_data;
if (!b_ob_data) {
return false;
}
BL::Object::type_enum type = b_ob.type();
BL::Object::type_enum type = b_ob_info.iter_object.type();
if (type == BL::Object::type_VOLUME || type == BL::Object::type_HAIR) {
/* Will be exported attached to mesh. */
@ -87,6 +87,24 @@ bool BlenderSync::object_is_geometry(BL::Object &b_ob)
return b_ob_data.is_a(&RNA_Mesh);
}
bool BlenderSync::object_can_have_geometry(BL::Object &b_ob)
{
BL::Object::type_enum type = b_ob.type();
switch (type) {
case BL::Object::type_MESH:
case BL::Object::type_CURVE:
case BL::Object::type_SURFACE:
case BL::Object::type_META:
case BL::Object::type_FONT:
case BL::Object::type_HAIR:
case BL::Object::type_POINTCLOUD:
case BL::Object::type_VOLUME:
return true;
default:
return false;
}
}
bool BlenderSync::object_is_light(BL::Object &b_ob)
{
BL::ID b_ob_data = b_ob.data();
@ -189,7 +207,7 @@ Object *BlenderSync::sync_object(BL::Depsgraph &b_depsgraph,
}
/* only interested in object that we can create meshes from */
if (!object_is_geometry(b_ob)) {
if (!object_is_geometry(b_ob_info)) {
return NULL;
}
@ -276,7 +294,7 @@ Object *BlenderSync::sync_object(BL::Depsgraph &b_depsgraph,
object->set_visibility(visibility);
object->set_is_shadow_catcher(b_ob.is_shadow_catcher());
object->set_is_shadow_catcher(b_ob.is_shadow_catcher() || b_parent.is_shadow_catcher());
float shadow_terminator_shading_offset = get_float(cobject, "shadow_terminator_offset");
object->set_shadow_terminator_shading_offset(shadow_terminator_shading_offset);

View File

@ -606,6 +606,19 @@ void BlenderSession::bake(BL::Depsgraph &b_depsgraph_,
pass->set_type(bake_type_to_pass(bake_type, bake_filter));
pass->set_include_albedo((bake_filter & BL::BakeSettings::pass_filter_COLOR));
if (pass->get_type() == PASS_COMBINED) {
/* Filtering settings for combined pass. */
Integrator *integrator = scene->integrator;
integrator->set_use_direct_light((bake_filter & BL::BakeSettings::pass_filter_DIRECT) != 0);
integrator->set_use_indirect_light((bake_filter & BL::BakeSettings::pass_filter_INDIRECT) !=
0);
integrator->set_use_diffuse((bake_filter & BL::BakeSettings::pass_filter_DIFFUSE) != 0);
integrator->set_use_glossy((bake_filter & BL::BakeSettings::pass_filter_GLOSSY) != 0);
integrator->set_use_transmission((bake_filter & BL::BakeSettings::pass_filter_TRANSMISSION) !=
0);
integrator->set_use_emission((bake_filter & BL::BakeSettings::pass_filter_EMIT) != 0);
}
session->set_display_driver(nullptr);
session->set_output_driver(make_unique<BlenderOutputDriver>(b_engine));

View File

@ -162,19 +162,19 @@ void BlenderSync::sync_recalc(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d
/* Object */
else if (b_id.is_a(&RNA_Object)) {
BL::Object b_ob(b_id);
const bool is_geometry = object_is_geometry(b_ob);
const bool is_light = !is_geometry && object_is_light(b_ob);
const bool can_have_geometry = object_can_have_geometry(b_ob);
const bool is_light = !can_have_geometry && object_is_light(b_ob);
if (b_ob.is_instancer() && b_update.is_updated_shading()) {
/* Needed for e.g. object color updates on instancer. */
object_map.set_recalc(b_ob);
}
if (is_geometry || is_light) {
if (can_have_geometry || is_light) {
const bool updated_geometry = b_update.is_updated_geometry();
/* Geometry (mesh, hair, volume). */
if (is_geometry) {
if (can_have_geometry) {
if (b_update.is_updated_transform() || b_update.is_updated_shading()) {
object_map.set_recalc(b_ob);
}
@ -835,18 +835,25 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine &b_engine,
/* samples */
int samples = get_int(cscene, "samples");
int preview_samples = get_int(cscene, "preview_samples");
int sample_offset = get_int(cscene, "sample_offset");
if (background) {
params.samples = samples;
params.sample_offset = sample_offset;
}
else {
params.samples = preview_samples;
if (params.samples == 0)
if (params.samples == 0) {
params.samples = INT_MAX;
}
params.sample_offset = 0;
}
/* Clamp sample offset. */
params.sample_offset = clamp(params.sample_offset, 0, Integrator::MAX_SAMPLES);
/* Clamp samples. */
params.samples = min(params.samples, Integrator::MAX_SAMPLES);
params.samples = clamp(params.samples, 0, Integrator::MAX_SAMPLES - params.sample_offset);
/* Viewport Performance */
params.pixel_size = b_engine.get_preview_pixel_size(b_scene);

View File

@ -208,7 +208,8 @@ class BlenderSync {
/* util */
void find_shader(BL::ID &id, array<Node *> &used_shaders, Shader *default_shader);
bool BKE_object_is_modified(BL::Object &b_ob);
bool object_is_geometry(BL::Object &b_ob);
bool object_is_geometry(BObjectInfo &b_ob_info);
bool object_can_have_geometry(BL::Object &b_ob);
bool object_is_light(BL::Object &b_ob);
/* variables */

View File

@ -42,7 +42,7 @@ class CPUKernels {
IntegratorInitFunction integrator_init_from_camera;
IntegratorInitFunction integrator_init_from_bake;
IntegratorFunction integrator_intersect_closest;
IntegratorShadeFunction integrator_intersect_closest;
IntegratorFunction integrator_intersect_shadow;
IntegratorFunction integrator_intersect_subsurface;
IntegratorFunction integrator_intersect_volume_stack;

View File

@ -931,7 +931,6 @@ void CUDADevice::tex_alloc(device_texture &mem)
{
CUDAContextScope scope(this);
/* General variables for both architectures */
string bind_name = mem.name;
size_t dsize = datatype_size(mem.data_type);
size_t size = mem.memory_size();
@ -1094,7 +1093,6 @@ void CUDADevice::tex_alloc(device_texture &mem)
if (mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT &&
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT3) {
/* Kepler+, bindless textures. */
CUDA_RESOURCE_DESC resDesc;
memset(&resDesc, 0, sizeof(resDesc));

View File

@ -154,7 +154,7 @@ bool HIPDevice::support_device(const uint /*kernel_features*/)
hipDeviceProp_t props;
hipGetDeviceProperties(&props, hipDevId);
set_error(string_printf("HIP backend requires AMD RDNA2 graphics card or up, but found %s.",
set_error(string_printf("HIP backend requires AMD RDNA graphics card or up, but found %s.",
props.name));
return false;
}
@ -222,7 +222,6 @@ string HIPDevice::compile_kernel_get_common_cflags(const uint kernel_features)
const string include_path = source_path;
string cflags = string_printf(
"-m%d "
"--ptxas-options=\"-v\" "
"--use_fast_math "
"-DHIPCC "
"-I\"%s\"",
@ -234,10 +233,7 @@ string HIPDevice::compile_kernel_get_common_cflags(const uint kernel_features)
return cflags;
}
string HIPDevice::compile_kernel(const uint kernel_features,
const char *name,
const char *base,
bool force_ptx)
string HIPDevice::compile_kernel(const uint kernel_features, const char *name, const char *base)
{
/* Compute kernel name. */
int major, minor;
@ -247,7 +243,7 @@ string HIPDevice::compile_kernel(const uint kernel_features,
hipGetDeviceProperties(&props, hipDevId);
/* gcnArchName can contain tokens after the arch name with features, ie.
"gfx1010:sramecc-:xnack-" so we tokenize it to get the first part. */
* `gfx1010:sramecc-:xnack-` so we tokenize it to get the first part. */
char *arch = strtok(props.gcnArchName, ":");
if (arch == NULL) {
arch = props.gcnArchName;
@ -255,13 +251,11 @@ string HIPDevice::compile_kernel(const uint kernel_features,
/* Attempt to use kernel provided with Blender. */
if (!use_adaptive_compilation()) {
if (!force_ptx) {
const string fatbin = path_get(string_printf("lib/%s_%s.fatbin", name, arch));
VLOG(1) << "Testing for pre-compiled kernel " << fatbin << ".";
if (path_exists(fatbin)) {
VLOG(1) << "Using precompiled kernel.";
return fatbin;
}
const string fatbin = path_get(string_printf("lib/%s_%s.fatbin", name, arch));
VLOG(1) << "Testing for pre-compiled kernel " << fatbin << ".";
if (path_exists(fatbin)) {
VLOG(1) << "Using precompiled kernel.";
return fatbin;
}
}
@ -298,9 +292,9 @@ string HIPDevice::compile_kernel(const uint kernel_features,
# ifdef _WIN32
if (!use_adaptive_compilation() && have_precompiled_kernels()) {
if (major < 3) {
if (!hipSupportsDevice(hipDevId)) {
set_error(
string_printf("HIP backend requires compute capability 3.0 or up, but found %d.%d. "
string_printf("HIP backend requires compute capability 10.1 or up, but found %d.%d. "
"Your GPU is not supported.",
major,
minor));
@ -380,10 +374,9 @@ string HIPDevice::compile_kernel(const uint kernel_features,
bool HIPDevice::load_kernels(const uint kernel_features)
{
/* TODO(sergey): Support kernels re-load for CUDA devices adaptive compile.
/* TODO(sergey): Support kernels re-load for HIP devices adaptive compile.
*
* Currently re-loading kernel will invalidate memory pointers,
* causing problems in cuCtxSynchronize.
* Currently re-loading kernels will invalidate memory pointers.
*/
if (hipModule) {
if (use_adaptive_compilation()) {
@ -904,7 +897,6 @@ void HIPDevice::tex_alloc(device_texture &mem)
{
HIPContextScope scope(this);
/* General variables for both architectures */
string bind_name = mem.name;
size_t dsize = datatype_size(mem.data_type);
size_t size = mem.memory_size();
@ -1069,7 +1061,6 @@ void HIPDevice::tex_alloc(device_texture &mem)
if (mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT &&
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT3) {
/* Kepler+, bindless textures. */
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
@ -1160,6 +1151,8 @@ bool HIPDevice::should_use_graphics_interop()
* possible, but from the empiric measurements it can be considerably slower than using naive
* pixels copy. */
/* Disable graphics interop for now, because of driver bug in 21.40. See T92972 */
# if 0
HIPContextScope scope(this);
int num_all_devices = 0;
@ -1178,6 +1171,7 @@ bool HIPDevice::should_use_graphics_interop()
return true;
}
}
# endif
return false;
}

View File

@ -93,10 +93,7 @@ class HIPDevice : public Device {
virtual string compile_kernel_get_common_cflags(const uint kernel_features);
string compile_kernel(const uint kernel_features,
const char *name,
const char *base = "hip",
bool force_ptx = false);
string compile_kernel(const uint kernel_features, const char *name, const char *base = "hip");
virtual bool load_kernels(const uint kernel_features) override;
void reserve_local_memory(const uint kernel_features);

View File

@ -48,7 +48,7 @@ class HIPDeviceGraphicsInterop : public DeviceGraphicsInterop {
HIPDeviceQueue *queue_ = nullptr;
HIPDevice *device_ = nullptr;
/* OpenGL PBO which is currently registered as the destination for the CUDA buffer. */
/* OpenGL PBO which is currently registered as the destination for the HIP buffer. */
uint opengl_pbo_id_ = 0;
/* Buffer area in pixels of the corresponding PBO. */
int64_t buffer_area_ = 0;

View File

@ -64,7 +64,7 @@ static inline bool hipSupportsDevice(const int hipDevId)
hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, hipDevId);
hipDeviceGetAttribute(&minor, hipDeviceAttributeComputeCapabilityMinor, hipDevId);
return (major > 10) || (major == 10 && minor >= 3);
return (major > 10) || (major == 10 && minor >= 1);
}
CCL_NAMESPACE_END

View File

@ -48,14 +48,6 @@ OptiXDevice::Denoiser::Denoiser(OptiXDevice *device)
{
}
OptiXDevice::Denoiser::~Denoiser()
{
const CUDAContextScope scope(device);
if (optix_denoiser != nullptr) {
optixDenoiserDestroy(optix_denoiser);
}
}
OptiXDevice::OptiXDevice(const DeviceInfo &info, Stats &stats, Profiler &profiler)
: CUDADevice(info, stats, profiler),
sbt_data(this, "__sbt", MEM_READ_ONLY),
@ -133,6 +125,11 @@ OptiXDevice::~OptiXDevice()
}
}
/* Make sure denoiser is destroyed before device context! */
if (denoiser_.optix_denoiser != nullptr) {
optixDenoiserDestroy(denoiser_.optix_denoiser);
}
optixDeviceContextDestroy(context);
}
@ -884,27 +881,31 @@ bool OptiXDevice::denoise_configure_if_needed(DenoiseContext &context)
optix_assert(optixDenoiserComputeMemoryResources(
denoiser_.optix_denoiser, buffer_params.width, buffer_params.height, &sizes));
denoiser_.scratch_size = sizes.withOverlapScratchSizeInBytes;
/* Denoiser is invoked on whole images only, so no overlap needed (would be used for tiling). */
denoiser_.scratch_size = sizes.withoutOverlapScratchSizeInBytes;
denoiser_.scratch_offset = sizes.stateSizeInBytes;
/* Allocate denoiser state if tile size has changed since last setup. */
denoiser_.state.alloc_to_device(denoiser_.scratch_offset + denoiser_.scratch_size);
/* Initialize denoiser state for the current tile size. */
const OptixResult result = optixDenoiserSetup(denoiser_.optix_denoiser,
denoiser_.queue.stream(),
buffer_params.width,
buffer_params.height,
denoiser_.state.device_pointer,
denoiser_.scratch_offset,
denoiser_.state.device_pointer +
denoiser_.scratch_offset,
denoiser_.scratch_size);
const OptixResult result = optixDenoiserSetup(
denoiser_.optix_denoiser,
0, /* Work around bug in r495 drivers that causes artifacts when denoiser setup is called
on a stream that is not the default stream */
buffer_params.width,
buffer_params.height,
denoiser_.state.device_pointer,
denoiser_.scratch_offset,
denoiser_.state.device_pointer + denoiser_.scratch_offset,
denoiser_.scratch_size);
if (result != OPTIX_SUCCESS) {
set_error("Failed to set up OptiX denoiser");
return false;
}
cuda_assert(cuCtxSynchronize());
denoiser_.is_configured = true;
denoiser_.configured_size.x = buffer_params.width;
denoiser_.configured_size.y = buffer_params.height;
@ -939,8 +940,6 @@ bool OptiXDevice::denoise_run(DenoiseContext &context, const DenoisePass &pass)
color_layer.format = OPTIX_PIXEL_FORMAT_FLOAT3;
}
device_vector<float> fake_albedo(this, "fake_albedo", MEM_READ_WRITE);
/* Optional albedo and color passes. */
if (context.num_input_passes > 1) {
const device_ptr d_guiding_buffer = context.guiding_params.device_pointer;
@ -971,6 +970,7 @@ bool OptiXDevice::denoise_run(DenoiseContext &context, const DenoisePass &pass)
/* Finally run denoising. */
OptixDenoiserParams params = {}; /* All parameters are disabled/zero. */
OptixDenoiserLayer image_layers = {};
image_layers.input = color_layer;
image_layers.output = output_layer;

View File

@ -82,7 +82,6 @@ class OptiXDevice : public CUDADevice {
class Denoiser {
public:
explicit Denoiser(OptiXDevice *device);
~Denoiser();
OptiXDevice *device;
OptiXDeviceQueue queue;

View File

@ -73,7 +73,8 @@ bool OptiXDeviceQueue::enqueue(DeviceKernel kernel, const int work_size, void *a
sizeof(device_ptr),
cuda_stream_));
if (kernel == DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_RAYTRACE) {
if (kernel == DEVICE_KERNEL_INTEGRATOR_INTERSECT_CLOSEST ||
kernel == DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_RAYTRACE) {
cuda_device_assert(
cuda_device_,
cuMemcpyHtoDAsync(launch_params_ptr + offsetof(KernelParamsOptiX, render_buffer),

View File

@ -33,7 +33,10 @@ unique_ptr<Denoiser> Denoiser::create(Device *path_trace_device, const DenoisePa
return make_unique<OptiXDenoiser>(path_trace_device, params);
}
return make_unique<OIDNDenoiser>(path_trace_device, params);
/* Always fallback to OIDN. */
DenoiseParams oidn_params = params;
oidn_params.type = DENOISER_OPENIMAGEDENOISE;
return make_unique<OIDNDenoiser>(path_trace_device, oidn_params);
}
Denoiser::Denoiser(Device *path_trace_device, const DenoiseParams &params)

View File

@ -380,7 +380,10 @@ void PathTrace::path_trace(RenderWork &render_work)
PathTraceWork *path_trace_work = path_trace_works_[i].get();
PathTraceWork::RenderStatistics statistics;
path_trace_work->render_samples(statistics, render_work.path_trace.start_sample, num_samples);
path_trace_work->render_samples(statistics,
render_work.path_trace.start_sample,
num_samples,
render_work.path_trace.sample_offset);
const double work_time = time_dt() - work_start_time;
work_balance_infos_[i].time_spent += work_time;
@ -849,7 +852,8 @@ void PathTrace::progress_update_if_needed(const RenderWork &render_work)
const int2 tile_size = get_render_tile_size();
const int num_samples_added = tile_size.x * tile_size.y * render_work.path_trace.num_samples;
const int current_sample = render_work.path_trace.start_sample +
render_work.path_trace.num_samples;
render_work.path_trace.num_samples -
render_work.path_trace.sample_offset;
progress_->add_samples(num_samples_added, current_sample);
}

View File

@ -75,7 +75,10 @@ class PathTraceWork {
/* Render given number of samples as a synchronous blocking call.
* The samples are added to the render buffer associated with this work. */
virtual void render_samples(RenderStatistics &statistics, int start_sample, int samples_num) = 0;
virtual void render_samples(RenderStatistics &statistics,
int start_sample,
int samples_num,
int sample_offset) = 0;
/* Copy render result from this work to the corresponding place of the GPU display.
*

View File

@ -71,14 +71,17 @@ void PathTraceWorkCPU::init_execution()
void PathTraceWorkCPU::render_samples(RenderStatistics &statistics,
int start_sample,
int samples_num)
int samples_num,
int sample_offset)
{
const int64_t image_width = effective_buffer_params_.width;
const int64_t image_height = effective_buffer_params_.height;
const int64_t total_pixels_num = image_width * image_height;
for (CPUKernelThreadGlobals &kernel_globals : kernel_thread_globals_) {
kernel_globals.start_profiling();
if (device_->profiler.active()) {
for (CPUKernelThreadGlobals &kernel_globals : kernel_thread_globals_) {
kernel_globals.start_profiling();
}
}
tbb::task_arena local_arena = local_tbb_arena_create(device_);
@ -97,6 +100,7 @@ void PathTraceWorkCPU::render_samples(RenderStatistics &statistics,
work_tile.w = 1;
work_tile.h = 1;
work_tile.start_sample = start_sample;
work_tile.sample_offset = sample_offset;
work_tile.num_samples = 1;
work_tile.offset = effective_buffer_params_.offset;
work_tile.stride = effective_buffer_params_.stride;
@ -106,9 +110,10 @@ void PathTraceWorkCPU::render_samples(RenderStatistics &statistics,
render_samples_full_pipeline(kernel_globals, work_tile, samples_num);
});
});
for (CPUKernelThreadGlobals &kernel_globals : kernel_thread_globals_) {
kernel_globals.stop_profiling();
if (device_->profiler.active()) {
for (CPUKernelThreadGlobals &kernel_globals : kernel_thread_globals_) {
kernel_globals.stop_profiling();
}
}
statistics.occupancy = 1.0f;

View File

@ -48,7 +48,8 @@ class PathTraceWorkCPU : public PathTraceWork {
virtual void render_samples(RenderStatistics &statistics,
int start_sample,
int samples_num) override;
int samples_num,
int sample_offset) override;
virtual void copy_to_display(PathTraceDisplay *display,
PassMode pass_mode,

View File

@ -250,7 +250,8 @@ void PathTraceWorkGPU::init_execution()
void PathTraceWorkGPU::render_samples(RenderStatistics &statistics,
int start_sample,
int samples_num)
int samples_num,
int sample_offset)
{
/* Limit number of states for the tile and rely on a greedy scheduling of tiles. This allows to
* add more work (because tiles are smaller, so there is higher chance that more paths will
@ -261,6 +262,7 @@ void PathTraceWorkGPU::render_samples(RenderStatistics &statistics,
work_tile_scheduler_.reset(effective_buffer_params_,
start_sample,
samples_num,
sample_offset,
device_scene_->data.integrator.scrambling_distance);
enqueue_reset();
@ -437,7 +439,15 @@ void PathTraceWorkGPU::enqueue_path_iteration(DeviceKernel kernel, const int num
DCHECK_LE(work_size, max_num_paths_);
switch (kernel) {
case DEVICE_KERNEL_INTEGRATOR_INTERSECT_CLOSEST:
case DEVICE_KERNEL_INTEGRATOR_INTERSECT_CLOSEST: {
/* Closest ray intersection kernels with integrator state and render buffer. */
void *d_render_buffer = (void *)buffers_->buffer.device_pointer;
void *args[] = {&d_path_index, &d_render_buffer, const_cast<int *>(&work_size)};
queue_->enqueue(kernel, work_size, args);
break;
}
case DEVICE_KERNEL_INTEGRATOR_INTERSECT_SHADOW:
case DEVICE_KERNEL_INTEGRATOR_INTERSECT_SUBSURFACE:
case DEVICE_KERNEL_INTEGRATOR_INTERSECT_VOLUME_STACK: {

View File

@ -46,7 +46,8 @@ class PathTraceWorkGPU : public PathTraceWork {
virtual void render_samples(RenderStatistics &statistics,
int start_sample,
int samples_num) override;
int samples_num,
int sample_offset) override;
virtual void copy_to_display(PathTraceDisplay *display,
PassMode pass_mode,

View File

@ -88,6 +88,16 @@ int RenderScheduler::get_num_samples() const
return num_samples_;
}
void RenderScheduler::set_sample_offset(int sample_offset)
{
sample_offset_ = sample_offset;
}
int RenderScheduler::get_sample_offset() const
{
return sample_offset_;
}
void RenderScheduler::set_time_limit(double time_limit)
{
time_limit_ = time_limit;
@ -110,13 +120,15 @@ int RenderScheduler::get_num_rendered_samples() const
return state_.num_rendered_samples;
}
void RenderScheduler::reset(const BufferParams &buffer_params, int num_samples)
void RenderScheduler::reset(const BufferParams &buffer_params, int num_samples, int sample_offset)
{
buffer_params_ = buffer_params;
update_start_resolution_divider();
set_num_samples(num_samples);
set_start_sample(sample_offset);
set_sample_offset(sample_offset);
/* In background mode never do lower resolution render preview, as it is not really supported
* by the software. */
@ -171,7 +183,7 @@ void RenderScheduler::reset(const BufferParams &buffer_params, int num_samples)
void RenderScheduler::reset_for_next_tile()
{
reset(buffer_params_, num_samples_);
reset(buffer_params_, num_samples_, sample_offset_);
}
bool RenderScheduler::render_work_reschedule_on_converge(RenderWork &render_work)
@ -317,6 +329,7 @@ RenderWork RenderScheduler::get_render_work()
render_work.path_trace.start_sample = get_start_sample_to_path_trace();
render_work.path_trace.num_samples = get_num_samples_to_path_trace();
render_work.path_trace.sample_offset = get_sample_offset();
render_work.init_render_buffers = (render_work.path_trace.start_sample == get_start_sample());

View File

@ -39,6 +39,7 @@ class RenderWork {
struct {
int start_sample = 0;
int num_samples = 0;
int sample_offset = 0;
} path_trace;
struct {
@ -125,6 +126,9 @@ class RenderScheduler {
void set_num_samples(int num_samples);
int get_num_samples() const;
void set_sample_offset(int sample_offset);
int get_sample_offset() const;
/* Time limit for the path tracing tasks, in minutes.
* Zero disables the limit. */
void set_time_limit(double time_limit);
@ -150,7 +154,7 @@ class RenderScheduler {
/* Reset scheduler, indicating that rendering will happen from scratch.
* Resets current rendered state, as well as scheduling information. */
void reset(const BufferParams &buffer_params, int num_samples);
void reset(const BufferParams &buffer_params, int num_samples, int sample_offset);
/* Reset scheduler upon switching to a next tile.
* Will keep the same number of samples and full-frame render parameters, but will reset progress
@ -419,6 +423,8 @@ class RenderScheduler {
int start_sample_ = 0;
int num_samples_ = 0;
int sample_offset_ = 0;
/* Limit in seconds for how long path tracing is allowed to happen.
* Zero means no limit is applied. */
double time_limit_ = 0.0;

View File

@ -36,6 +36,7 @@ void WorkTileScheduler::set_max_num_path_states(int max_num_path_states)
void WorkTileScheduler::reset(const BufferParams &buffer_params,
int sample_start,
int samples_num,
int sample_offset,
float scrambling_distance)
{
/* Image buffer parameters. */
@ -51,6 +52,7 @@ void WorkTileScheduler::reset(const BufferParams &buffer_params,
/* Samples parameters. */
sample_start_ = sample_start;
samples_num_ = samples_num;
sample_offset_ = sample_offset;
/* Initialize new scheduling. */
reset_scheduler_state();
@ -111,6 +113,7 @@ bool WorkTileScheduler::get_work(KernelWorkTile *work_tile_, const int max_work_
work_tile.h = tile_size_.height;
work_tile.start_sample = sample_start_ + start_sample;
work_tile.num_samples = min(tile_size_.num_samples, samples_num_ - start_sample);
work_tile.sample_offset = sample_offset_;
work_tile.offset = offset_;
work_tile.stride = stride_;

View File

@ -41,6 +41,7 @@ class WorkTileScheduler {
void reset(const BufferParams &buffer_params,
int sample_start,
int samples_num,
int sample_offset,
float scrambling_distance);
/* Get work for a device.
@ -79,6 +80,7 @@ class WorkTileScheduler {
* (splitting into a smaller work tiles). */
int sample_start_ = 0;
int samples_num_ = 0;
int sample_offset_ = 0;
/* Tile size which be scheduled for rendering. */
TileSize tile_size_;

View File

@ -39,6 +39,10 @@ set(SRC_KERNEL_DEVICE_HIP
device/hip/kernel.cpp
)
set(SRC_KERNEL_DEVICE_METAL
device/metal/kernel.metal
)
set(SRC_KERNEL_DEVICE_OPTIX
device/optix/kernel.cu
device/optix/kernel_shader_raytrace.cu
@ -79,6 +83,13 @@ set(SRC_KERNEL_DEVICE_OPTIX_HEADERS
device/optix/globals.h
)
set(SRC_KERNEL_DEVICE_METAL_HEADERS
device/metal/compat.h
device/metal/context_begin.h
device/metal/context_end.h
device/metal/globals.h
)
set(SRC_KERNEL_CLOSURE_HEADERS
closure/alloc.h
closure/bsdf.h
@ -723,12 +734,14 @@ cycles_add_library(cycles_kernel "${LIB}"
${SRC_KERNEL_DEVICE_CUDA}
${SRC_KERNEL_DEVICE_HIP}
${SRC_KERNEL_DEVICE_OPTIX}
${SRC_KERNEL_DEVICE_METAL}
${SRC_KERNEL_HEADERS}
${SRC_KERNEL_DEVICE_CPU_HEADERS}
${SRC_KERNEL_DEVICE_GPU_HEADERS}
${SRC_KERNEL_DEVICE_CUDA_HEADERS}
${SRC_KERNEL_DEVICE_HIP_HEADERS}
${SRC_KERNEL_DEVICE_OPTIX_HEADERS}
${SRC_KERNEL_DEVICE_METAL_HEADERS}
)
source_group("bake" FILES ${SRC_KERNEL_BAKE_HEADERS})
@ -740,6 +753,7 @@ source_group("device\\cuda" FILES ${SRC_KERNEL_DEVICE_CUDA} ${SRC_KERNEL_DEVICE_
source_group("device\\gpu" FILES ${SRC_KERNEL_DEVICE_GPU_HEADERS})
source_group("device\\hip" FILES ${SRC_KERNEL_DEVICE_HIP} ${SRC_KERNEL_DEVICE_HIP_HEADERS})
source_group("device\\optix" FILES ${SRC_KERNEL_DEVICE_OPTIX} ${SRC_KERNEL_DEVICE_OPTIX_HEADERS})
source_group("device\\metal" FILES ${SRC_KERNEL_DEVICE_METAL} ${SRC_KERNEL_DEVICE_METAL_HEADERS})
source_group("film" FILES ${SRC_KERNEL_FILM_HEADERS})
source_group("geom" FILES ${SRC_KERNEL_GEOM_HEADERS})
source_group("integrator" FILES ${SRC_KERNEL_INTEGRATOR_HEADERS})
@ -772,6 +786,8 @@ delayed_install(${CMAKE_CURRENT_SOURCE_DIR} "${SRC_KERNEL_DEVICE_HIP}" ${CYCLES_
delayed_install(${CMAKE_CURRENT_SOURCE_DIR} "${SRC_KERNEL_DEVICE_HIP_HEADERS}" ${CYCLES_INSTALL_PATH}/source/kernel/device/hip)
delayed_install(${CMAKE_CURRENT_SOURCE_DIR} "${SRC_KERNEL_DEVICE_OPTIX}" ${CYCLES_INSTALL_PATH}/source/kernel/device/optix)
delayed_install(${CMAKE_CURRENT_SOURCE_DIR} "${SRC_KERNEL_DEVICE_OPTIX_HEADERS}" ${CYCLES_INSTALL_PATH}/source/kernel/device/optix)
delayed_install(${CMAKE_CURRENT_SOURCE_DIR} "${SRC_KERNEL_DEVICE_METAL}" ${CYCLES_INSTALL_PATH}/source/kernel/device/metal)
delayed_install(${CMAKE_CURRENT_SOURCE_DIR} "${SRC_KERNEL_DEVICE_METAL_HEADERS}" ${CYCLES_INSTALL_PATH}/source/kernel/device/metal)
delayed_install(${CMAKE_CURRENT_SOURCE_DIR} "${SRC_KERNEL_FILM_HEADERS}" ${CYCLES_INSTALL_PATH}/source/kernel/film)
delayed_install(${CMAKE_CURRENT_SOURCE_DIR} "${SRC_KERNEL_GEOM_HEADERS}" ${CYCLES_INSTALL_PATH}/source/kernel/geom)
delayed_install(${CMAKE_CURRENT_SOURCE_DIR} "${SRC_KERNEL_INTEGRATOR_HEADERS}" ${CYCLES_INSTALL_PATH}/source/kernel/integrator)

View File

@ -37,7 +37,7 @@
KERNEL_INTEGRATOR_INIT_FUNCTION(init_from_camera);
KERNEL_INTEGRATOR_INIT_FUNCTION(init_from_bake);
KERNEL_INTEGRATOR_FUNCTION(intersect_closest);
KERNEL_INTEGRATOR_SHADE_FUNCTION(intersect_closest);
KERNEL_INTEGRATOR_FUNCTION(intersect_shadow);
KERNEL_INTEGRATOR_FUNCTION(intersect_subsurface);
KERNEL_INTEGRATOR_FUNCTION(intersect_volume_stack);

View File

@ -112,7 +112,7 @@ CCL_NAMESPACE_BEGIN
DEFINE_INTEGRATOR_INIT_KERNEL(init_from_camera)
DEFINE_INTEGRATOR_INIT_KERNEL(init_from_bake)
DEFINE_INTEGRATOR_KERNEL(intersect_closest)
DEFINE_INTEGRATOR_SHADE_KERNEL(intersect_closest)
DEFINE_INTEGRATOR_KERNEL(intersect_subsurface)
DEFINE_INTEGRATOR_KERNEL(intersect_volume_stack)
DEFINE_INTEGRATOR_SHADE_KERNEL(shade_background)

View File

@ -75,6 +75,7 @@ typedef unsigned long long uint64_t;
#define ccl_gpu_block_idx_x (blockIdx.x)
#define ccl_gpu_grid_dim_x (gridDim.x)
#define ccl_gpu_warp_size (warpSize)
#define ccl_gpu_thread_mask(thread_warp) uint(0xFFFFFFFF >> (ccl_gpu_warp_size - thread_warp))
#define ccl_gpu_global_id_x() (ccl_gpu_block_idx_x * ccl_gpu_block_dim_x + ccl_gpu_thread_idx_x)
#define ccl_gpu_global_size_x() (ccl_gpu_grid_dim_x * ccl_gpu_block_dim_x)

View File

@ -92,12 +92,29 @@
/* Compute number of threads per block and minimum blocks per multiprocessor
* given the maximum number of registers per thread. */
#define ccl_gpu_kernel(block_num_threads, thread_num_registers) \
extern "C" __global__ void __launch_bounds__(block_num_threads, \
GPU_MULTIPRESSOR_MAX_REGISTERS / \
(block_num_threads * thread_num_registers))
#define ccl_gpu_kernel_threads(block_num_threads) \
extern "C" __global__ void __launch_bounds__(block_num_threads)
#define ccl_gpu_kernel_signature(name, ...) kernel_gpu_##name(__VA_ARGS__)
#define ccl_gpu_kernel_call(x) x
/* Define a function object where "func" is the lambda body, and additional parameters are used to
* specify captured state */
#define ccl_gpu_kernel_lambda(func, ...) \
struct KernelLambda { \
__VA_ARGS__; \
__device__ int operator()(const int state) \
{ \
return (func); \
} \
} ccl_gpu_kernel_lambda_pass
/* sanity checks */
#if GPU_KERNEL_BLOCK_NUM_THREADS > GPU_BLOCK_MAX_THREADS

View File

@ -65,7 +65,9 @@ ccl_device float cubic_h1(float a)
/* Fast bicubic texture lookup using 4 bilinear lookups, adapted from CUDA samples. */
template<typename T>
ccl_device_noinline T kernel_tex_image_interp_bicubic(const TextureInfo &info, float x, float y)
ccl_device_noinline T kernel_tex_image_interp_bicubic(ccl_global const TextureInfo &info,
float x,
float y)
{
ccl_gpu_tex_object tex = (ccl_gpu_tex_object)info.data;
@ -94,7 +96,7 @@ ccl_device_noinline T kernel_tex_image_interp_bicubic(const TextureInfo &info, f
/* Fast tricubic texture lookup using 8 trilinear lookups. */
template<typename T>
ccl_device_noinline T
kernel_tex_image_interp_tricubic(const TextureInfo &info, float x, float y, float z)
kernel_tex_image_interp_tricubic(ccl_global const TextureInfo &info, float x, float y, float z)
{
ccl_gpu_tex_object tex = (ccl_gpu_tex_object)info.data;
@ -169,7 +171,7 @@ ccl_device T kernel_tex_image_interp_tricubic_nanovdb(S &s, float x, float y, fl
template<typename T>
ccl_device_noinline T kernel_tex_image_interp_nanovdb(
const TextureInfo &info, float x, float y, float z, uint interpolation)
ccl_global const TextureInfo &info, float x, float y, float z, uint interpolation)
{
using namespace nanovdb;
@ -191,7 +193,7 @@ ccl_device_noinline T kernel_tex_image_interp_nanovdb(
ccl_device float4 kernel_tex_image_interp(KernelGlobals kg, int id, float x, float y)
{
const TextureInfo &info = kernel_tex_fetch(__texture_info, id);
ccl_global const TextureInfo &info = kernel_tex_fetch(__texture_info, id);
/* float4, byte4, ushort4 and half4 */
const int texture_type = info.data_type;
@ -226,7 +228,7 @@ ccl_device float4 kernel_tex_image_interp_3d(KernelGlobals kg,
float3 P,
InterpolationType interp)
{
const TextureInfo &info = kernel_tex_fetch(__texture_info, id);
ccl_global const TextureInfo &info = kernel_tex_fetch(__texture_info, id);
if (info.use_transform_3d) {
P = transform_point(&info.transform_3d, P);

File diff suppressed because it is too large Load Diff

View File

@ -31,10 +31,43 @@ CCL_NAMESPACE_BEGIN
# define GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE 512
#endif
#ifdef __KERNEL_METAL__
struct ActiveIndexContext {
ActiveIndexContext(int _thread_index,
int _global_index,
int _threadgroup_size,
int _simdgroup_size,
int _simd_lane_index,
int _simd_group_index,
int _num_simd_groups,
threadgroup int *_simdgroup_offset)
: thread_index(_thread_index),
global_index(_global_index),
blocksize(_threadgroup_size),
ccl_gpu_warp_size(_simdgroup_size),
thread_warp(_simd_lane_index),
warp_index(_simd_group_index),
num_warps(_num_simd_groups),
warp_offset(_simdgroup_offset)
{
}
const int thread_index, global_index, blocksize, ccl_gpu_warp_size, thread_warp, warp_index,
num_warps;
threadgroup int *warp_offset;
template<uint blocksizeDummy, typename IsActiveOp>
void active_index_array(const uint num_states,
ccl_global int *indices,
ccl_global int *num_indices,
IsActiveOp is_active_op)
{
const uint state_index = global_index;
#else
template<uint blocksize, typename IsActiveOp>
__device__ void gpu_parallel_active_index_array(const uint num_states,
int *indices,
int *num_indices,
ccl_global int *indices,
ccl_global int *num_indices,
IsActiveOp is_active_op)
{
extern ccl_gpu_shared int warp_offset[];
@ -45,43 +78,62 @@ __device__ void gpu_parallel_active_index_array(const uint num_states,
const uint warp_index = thread_index / ccl_gpu_warp_size;
const uint num_warps = blocksize / ccl_gpu_warp_size;
/* Test if state corresponding to this thread is active. */
const uint state_index = ccl_gpu_block_idx_x * blocksize + thread_index;
const uint is_active = (state_index < num_states) ? is_active_op(state_index) : 0;
#endif
/* For each thread within a warp compute how many other active states precede it. */
const uint thread_mask = 0xFFFFFFFF >> (ccl_gpu_warp_size - thread_warp);
const uint thread_offset = ccl_gpu_popc(ccl_gpu_ballot(is_active) & thread_mask);
/* Test if state corresponding to this thread is active. */
const uint is_active = (state_index < num_states) ? is_active_op(state_index) : 0;
/* Last thread in warp stores number of active states for each warp. */
if (thread_warp == ccl_gpu_warp_size - 1) {
warp_offset[warp_index] = thread_offset + is_active;
}
/* For each thread within a warp compute how many other active states precede it. */
const uint thread_offset = ccl_gpu_popc(ccl_gpu_ballot(is_active) &
ccl_gpu_thread_mask(thread_warp));
ccl_gpu_syncthreads();
/* Last thread in block converts per-warp sizes to offsets, increments global size of
* index array and gets offset to write to. */
if (thread_index == blocksize - 1) {
/* TODO: parallelize this. */
int offset = 0;
for (int i = 0; i < num_warps; i++) {
int num_active = warp_offset[i];
warp_offset[i] = offset;
offset += num_active;
/* Last thread in warp stores number of active states for each warp. */
if (thread_warp == ccl_gpu_warp_size - 1) {
warp_offset[warp_index] = thread_offset + is_active;
}
const uint block_num_active = warp_offset[warp_index] + thread_offset + is_active;
warp_offset[num_warps] = atomic_fetch_and_add_uint32(num_indices, block_num_active);
ccl_gpu_syncthreads();
/* Last thread in block converts per-warp sizes to offsets, increments global size of
* index array and gets offset to write to. */
if (thread_index == blocksize - 1) {
/* TODO: parallelize this. */
int offset = 0;
for (int i = 0; i < num_warps; i++) {
int num_active = warp_offset[i];
warp_offset[i] = offset;
offset += num_active;
}
const uint block_num_active = warp_offset[warp_index] + thread_offset + is_active;
warp_offset[num_warps] = atomic_fetch_and_add_uint32(num_indices, block_num_active);
}
ccl_gpu_syncthreads();
/* Write to index array. */
if (is_active) {
const uint block_offset = warp_offset[num_warps];
indices[block_offset + warp_offset[warp_index] + thread_offset] = state_index;
}
}
ccl_gpu_syncthreads();
#ifdef __KERNEL_METAL__
}; /* end class ActiveIndexContext */
/* Write to index array. */
if (is_active) {
const uint block_offset = warp_offset[num_warps];
indices[block_offset + warp_offset[warp_index] + thread_offset] = state_index;
}
}
/* inject the required thread params into a struct, and redirect to its templated member function
*/
# define gpu_parallel_active_index_array \
ActiveIndexContext(metal_local_id, \
metal_global_id, \
metal_local_size, \
simdgroup_size, \
simd_lane_index, \
simd_group_index, \
num_simd_groups, \
simdgroup_offset) \
.active_index_array
#endif
CCL_NAMESPACE_END

View File

@ -33,10 +33,12 @@ CCL_NAMESPACE_BEGIN
# define GPU_PARALLEL_PREFIX_SUM_DEFAULT_BLOCK_SIZE 512
#endif
template<uint blocksize>
__device__ void gpu_parallel_prefix_sum(int *counter, int *prefix_sum, const int num_values)
__device__ void gpu_parallel_prefix_sum(const int global_id,
ccl_global int *counter,
ccl_global int *prefix_sum,
const int num_values)
{
if (!(ccl_gpu_block_idx_x == 0 && ccl_gpu_thread_idx_x == 0)) {
if (global_id != 0) {
return;
}

View File

@ -33,16 +33,16 @@ CCL_NAMESPACE_BEGIN
#endif
#define GPU_PARALLEL_SORTED_INDEX_INACTIVE_KEY (~0)
template<uint blocksize, typename GetKeyOp>
__device__ void gpu_parallel_sorted_index_array(const uint num_states,
template<typename GetKeyOp>
__device__ void gpu_parallel_sorted_index_array(const uint state_index,
const uint num_states,
const int num_states_limit,
int *indices,
int *num_indices,
int *key_counter,
int *key_prefix_sum,
ccl_global int *indices,
ccl_global int *num_indices,
ccl_global int *key_counter,
ccl_global int *key_prefix_sum,
GetKeyOp get_key_op)
{
const uint state_index = ccl_gpu_block_idx_x * blocksize + ccl_gpu_thread_idx_x;
const int key = (state_index < num_states) ? get_key_op(state_index) :
GPU_PARALLEL_SORTED_INDEX_INACTIVE_KEY;

View File

@ -74,6 +74,7 @@ typedef unsigned long long uint64_t;
#define ccl_gpu_block_idx_x (blockIdx.x)
#define ccl_gpu_grid_dim_x (gridDim.x)
#define ccl_gpu_warp_size (warpSize)
#define ccl_gpu_thread_mask(thread_warp) uint(0xFFFFFFFF >> (ccl_gpu_warp_size - thread_warp))
#define ccl_gpu_global_id_x() (ccl_gpu_block_idx_x * ccl_gpu_block_dim_x + ccl_gpu_thread_idx_x)
#define ccl_gpu_global_size_x() (ccl_gpu_grid_dim_x * ccl_gpu_block_dim_x)

View File

@ -35,12 +35,29 @@
/* Compute number of threads per block and minimum blocks per multiprocessor
* given the maximum number of registers per thread. */
#define ccl_gpu_kernel(block_num_threads, thread_num_registers) \
extern "C" __global__ void __launch_bounds__(block_num_threads, \
GPU_MULTIPRESSOR_MAX_REGISTERS / \
(block_num_threads * thread_num_registers))
#define ccl_gpu_kernel_threads(block_num_threads) \
extern "C" __global__ void __launch_bounds__(block_num_threads)
#define ccl_gpu_kernel_signature(name, ...) kernel_gpu_##name(__VA_ARGS__)
#define ccl_gpu_kernel_call(x) x
/* Define a function object where "func" is the lambda body, and additional parameters are used to
* specify captured state */
#define ccl_gpu_kernel_lambda(func, ...) \
struct KernelLambda { \
__VA_ARGS__; \
__device__ int operator()(const int state) \
{ \
return (func); \
} \
} ccl_gpu_kernel_lambda_pass
/* sanity checks */
#if GPU_KERNEL_BLOCK_NUM_THREADS > GPU_BLOCK_MAX_THREADS

View File

@ -58,6 +58,98 @@ using namespace metal;
#define kernel_assert(cond)
#define ccl_gpu_global_id_x() metal_global_id
#define ccl_gpu_warp_size simdgroup_size
#define ccl_gpu_thread_idx_x simd_group_index
#define ccl_gpu_thread_mask(thread_warp) uint64_t((1ull << thread_warp) - 1)
#define ccl_gpu_ballot(predicate) ((uint64_t)((simd_vote::vote_t)simd_ballot(predicate)))
#define ccl_gpu_popc(x) popcount(x)
// clang-format off
/* kernel.h adapters */
#define ccl_gpu_kernel(block_num_threads, thread_num_registers)
#define ccl_gpu_kernel_threads(block_num_threads)
/* Convert a comma-separated list into a semicolon-separated list
* (so that we can generate a struct based on kernel entry-point parameters). */
#define FN0()
#define FN1(p1) p1;
#define FN2(p1, p2) p1; p2;
#define FN3(p1, p2, p3) p1; p2; p3;
#define FN4(p1, p2, p3, p4) p1; p2; p3; p4;
#define FN5(p1, p2, p3, p4, p5) p1; p2; p3; p4; p5;
#define FN6(p1, p2, p3, p4, p5, p6) p1; p2; p3; p4; p5; p6;
#define FN7(p1, p2, p3, p4, p5, p6, p7) p1; p2; p3; p4; p5; p6; p7;
#define FN8(p1, p2, p3, p4, p5, p6, p7, p8) p1; p2; p3; p4; p5; p6; p7; p8;
#define FN9(p1, p2, p3, p4, p5, p6, p7, p8, p9) p1; p2; p3; p4; p5; p6; p7; p8; p9;
#define FN10(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10) p1; p2; p3; p4; p5; p6; p7; p8; p9; p10;
#define FN11(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11) p1; p2; p3; p4; p5; p6; p7; p8; p9; p10; p11;
#define FN12(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12) p1; p2; p3; p4; p5; p6; p7; p8; p9; p10; p11; p12;
#define FN13(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13) p1; p2; p3; p4; p5; p6; p7; p8; p9; p10; p11; p12; p13;
#define FN14(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14) p1; p2; p3; p4; p5; p6; p7; p8; p9; p10; p11; p12; p13; p14;
#define FN15(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15) p1; p2; p3; p4; p5; p6; p7; p8; p9; p10; p11; p12; p13; p14; p15;
#define FN16(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16) p1; p2; p3; p4; p5; p6; p7; p8; p9; p10; p11; p12; p13; p14; p15; p16;
#define GET_LAST_ARG(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16, ...) p16
#define PARAMS_MAKER(...) GET_LAST_ARG(__VA_ARGS__, FN16, FN15, FN14, FN13, FN12, FN11, FN10, FN9, FN8, FN7, FN6, FN5, FN4, FN3, FN2, FN1, FN0)
/* Generate a struct containing the entry-point parameters and a "run"
* method which can access them implicitly via this-> */
#define ccl_gpu_kernel_signature(name, ...) \
struct kernel_gpu_##name \
{ \
PARAMS_MAKER(__VA_ARGS__)(__VA_ARGS__) \
void run(thread MetalKernelContext& context, \
threadgroup int *simdgroup_offset, \
const uint metal_global_id, \
const ushort metal_local_id, \
const ushort metal_local_size, \
uint simdgroup_size, \
uint simd_lane_index, \
uint simd_group_index, \
uint num_simd_groups) ccl_global const; \
}; \
kernel void kernel_metal_##name(device const kernel_gpu_##name *params_struct, \
constant KernelParamsMetal &ccl_restrict _launch_params_metal, \
constant MetalAncillaries *_metal_ancillaries, \
threadgroup int *simdgroup_offset[[ threadgroup(0) ]], \
const uint metal_global_id [[thread_position_in_grid]], \
const ushort metal_local_id [[thread_position_in_threadgroup]], \
const ushort metal_local_size [[threads_per_threadgroup]], \
uint simdgroup_size [[threads_per_simdgroup]], \
uint simd_lane_index [[thread_index_in_simdgroup]], \
uint simd_group_index [[simdgroup_index_in_threadgroup]], \
uint num_simd_groups [[simdgroups_per_threadgroup]]) { \
MetalKernelContext context(_launch_params_metal, _metal_ancillaries); \
INIT_DEBUG_BUFFER \
params_struct->run(context, simdgroup_offset, metal_global_id, metal_local_id, metal_local_size, simdgroup_size, simd_lane_index, simd_group_index, num_simd_groups); \
} \
void kernel_gpu_##name::run(thread MetalKernelContext& context, \
threadgroup int *simdgroup_offset, \
const uint metal_global_id, \
const ushort metal_local_id, \
const ushort metal_local_size, \
uint simdgroup_size, \
uint simd_lane_index, \
uint simd_group_index, \
uint num_simd_groups) ccl_global const
#define ccl_gpu_kernel_call(x) context.x
/* define a function object where "func" is the lambda body, and additional parameters are used to specify captured state */
#define ccl_gpu_kernel_lambda(func, ...) \
struct KernelLambda \
{ \
KernelLambda(ccl_private MetalKernelContext &_context) : context(_context) {} \
ccl_private MetalKernelContext &context; \
__VA_ARGS__; \
int operator()(const int state) const { return (func); } \
} ccl_gpu_kernel_lambda_pass(context)
// clang-format on
/* make_type definitions with Metal style element initializers */
#ifdef make_float2
# undef make_float2
@ -124,3 +216,38 @@ using namespace metal;
#define logf(x) trigmode::log(float(x))
#define NULL 0
/* texture bindings and sampler setup */
struct Texture2DParamsMetal {
texture2d<float, access::sample> tex;
};
struct Texture3DParamsMetal {
texture3d<float, access::sample> tex;
};
struct MetalAncillaries {
device Texture2DParamsMetal *textures_2d;
device Texture3DParamsMetal *textures_3d;
};
enum SamplerType {
SamplerFilterNearest_AddressRepeat,
SamplerFilterNearest_AddressClampEdge,
SamplerFilterNearest_AddressClampZero,
SamplerFilterLinear_AddressRepeat,
SamplerFilterLinear_AddressClampEdge,
SamplerFilterLinear_AddressClampZero,
SamplerCount
};
constant constexpr array<sampler, SamplerCount> metal_samplers = {
sampler(address::repeat, filter::nearest),
sampler(address::clamp_to_edge, filter::nearest),
sampler(address::clamp_to_zero, filter::nearest),
sampler(address::repeat, filter::linear),
sampler(address::clamp_to_edge, filter::linear),
sampler(address::clamp_to_zero, filter::linear),
};

View File

@ -0,0 +1,79 @@
/*
* Copyright 2021 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// clang-format off
/* Open the Metal kernel context class
* Necessary to access resource bindings */
class MetalKernelContext {
public:
constant KernelParamsMetal &launch_params_metal;
constant MetalAncillaries *metal_ancillaries;
MetalKernelContext(constant KernelParamsMetal &_launch_params_metal, constant MetalAncillaries * _metal_ancillaries)
: launch_params_metal(_launch_params_metal), metal_ancillaries(_metal_ancillaries)
{}
/* texture fetch adapter functions */
typedef uint64_t ccl_gpu_tex_object;
template<typename T>
inline __attribute__((__always_inline__))
T ccl_gpu_tex_object_read_2D(ccl_gpu_tex_object tex, float x, float y) const {
kernel_assert(0);
return 0;
}
template<typename T>
inline __attribute__((__always_inline__))
T ccl_gpu_tex_object_read_3D(ccl_gpu_tex_object tex, float x, float y, float z) const {
kernel_assert(0);
return 0;
}
// texture2d
template<>
inline __attribute__((__always_inline__))
float4 ccl_gpu_tex_object_read_2D(ccl_gpu_tex_object tex, float x, float y) const {
const uint tid(tex);
const uint sid(tex >> 32);
return metal_ancillaries->textures_2d[tid].tex.sample(metal_samplers[sid], float2(x, y));
}
template<>
inline __attribute__((__always_inline__))
float ccl_gpu_tex_object_read_2D(ccl_gpu_tex_object tex, float x, float y) const {
const uint tid(tex);
const uint sid(tex >> 32);
return metal_ancillaries->textures_2d[tid].tex.sample(metal_samplers[sid], float2(x, y)).x;
}
// texture3d
template<>
inline __attribute__((__always_inline__))
float4 ccl_gpu_tex_object_read_3D(ccl_gpu_tex_object tex, float x, float y, float z) const {
const uint tid(tex);
const uint sid(tex >> 32);
return metal_ancillaries->textures_3d[tid].tex.sample(metal_samplers[sid], float3(x, y, z));
}
template<>
inline __attribute__((__always_inline__))
float ccl_gpu_tex_object_read_3D(ccl_gpu_tex_object tex, float x, float y, float z) const {
const uint tid(tex);
const uint sid(tex >> 32);
return metal_ancillaries->textures_3d[tid].tex.sample(metal_samplers[sid], float3(x, y, z)).x;
}
# include "kernel/device/gpu/image.h"
// clang-format on

View File

@ -0,0 +1,23 @@
/*
* Copyright 2021 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
}
; /* end of MetalKernelContext class definition */
/* Silently redirect into the MetalKernelContext instance */
/* NOTE: These macros will need maintaining as entry-points change. */
#undef kernel_integrator_state
#define kernel_integrator_state context.launch_params_metal.__integrator_state

View File

@ -0,0 +1,51 @@
/*
* Copyright 2021 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Constant Globals */
#include "kernel/types.h"
#include "kernel/util/profiling.h"
#include "kernel/integrator/state.h"
CCL_NAMESPACE_BEGIN
typedef struct KernelParamsMetal {
#define KERNEL_TEX(type, name) ccl_constant type *name;
#include "kernel/textures.h"
#undef KERNEL_TEX
const IntegratorStateGPU __integrator_state;
const KernelData data;
} KernelParamsMetal;
typedef struct KernelGlobalsGPU {
int unused[1];
} KernelGlobalsGPU;
typedef ccl_global const KernelGlobalsGPU *ccl_restrict KernelGlobals;
#define kernel_data launch_params_metal.data
#define kernel_integrator_state launch_params_metal.__integrator_state
/* data lookup defines */
#define kernel_tex_fetch(tex, index) launch_params_metal.tex[index]
#define kernel_tex_array(tex) launch_params_metal.tex
CCL_NAMESPACE_END

View File

@ -0,0 +1,25 @@
/*
* Copyright 2021 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Metal kernel entry points */
// clang-format off
#include "kernel/device/metal/compat.h"
#include "kernel/device/metal/globals.h"
#include "kernel/device/gpu/kernel.h"
// clang-format on

View File

@ -76,6 +76,7 @@ typedef unsigned long long uint64_t;
#define ccl_gpu_block_idx_x (blockIdx.x)
#define ccl_gpu_grid_dim_x (gridDim.x)
#define ccl_gpu_warp_size (warpSize)
#define ccl_gpu_thread_mask(thread_warp) uint(0xFFFFFFFF >> (ccl_gpu_warp_size - thread_warp))
#define ccl_gpu_global_id_x() (ccl_gpu_block_idx_x * ccl_gpu_block_dim_x + ccl_gpu_thread_idx_x)
#define ccl_gpu_global_size_x() (ccl_gpu_grid_dim_x * ccl_gpu_block_dim_x)

View File

@ -57,7 +57,7 @@ extern "C" __global__ void __raygen__kernel_optix_integrator_intersect_closest()
const int global_index = optixGetLaunchIndex().x;
const int path_index = (__params.path_index_array) ? __params.path_index_array[global_index] :
global_index;
integrator_intersect_closest(nullptr, path_index);
integrator_intersect_closest(nullptr, path_index, __params.render_buffer);
}
extern "C" __global__ void __raygen__kernel_optix_integrator_intersect_shadow()

View File

@ -33,62 +33,72 @@ CCL_NAMESPACE_BEGIN
* them separately. */
ccl_device_inline void bsdf_eval_init(ccl_private BsdfEval *eval,
const bool is_diffuse,
const ClosureType closure_type,
float3 value)
{
eval->diffuse = zero_float3();
eval->glossy = zero_float3();
if (is_diffuse) {
if (CLOSURE_IS_BSDF_DIFFUSE(closure_type)) {
eval->diffuse = value;
}
else {
else if (CLOSURE_IS_BSDF_GLOSSY(closure_type)) {
eval->glossy = value;
}
eval->sum = value;
}
ccl_device_inline void bsdf_eval_accum(ccl_private BsdfEval *eval,
const bool is_diffuse,
float3 value,
float mis_weight)
const ClosureType closure_type,
float3 value)
{
value *= mis_weight;
if (is_diffuse) {
if (CLOSURE_IS_BSDF_DIFFUSE(closure_type)) {
eval->diffuse += value;
}
else {
else if (CLOSURE_IS_BSDF_GLOSSY(closure_type)) {
eval->glossy += value;
}
eval->sum += value;
}
ccl_device_inline bool bsdf_eval_is_zero(ccl_private BsdfEval *eval)
{
return is_zero(eval->diffuse) && is_zero(eval->glossy);
return is_zero(eval->sum);
}
ccl_device_inline void bsdf_eval_mul(ccl_private BsdfEval *eval, float value)
{
eval->diffuse *= value;
eval->glossy *= value;
eval->sum *= value;
}
ccl_device_inline void bsdf_eval_mul3(ccl_private BsdfEval *eval, float3 value)
{
eval->diffuse *= value;
eval->glossy *= value;
eval->sum *= value;
}
ccl_device_inline float3 bsdf_eval_sum(ccl_private const BsdfEval *eval)
{
return eval->diffuse + eval->glossy;
return eval->sum;
}
ccl_device_inline float3 bsdf_eval_diffuse_glossy_ratio(ccl_private const BsdfEval *eval)
ccl_device_inline float3 bsdf_eval_pass_diffuse_weight(ccl_private const BsdfEval *eval)
{
/* Ratio of diffuse and glossy to recover proportions for writing to render pass.
/* Ratio of diffuse weight to recover proportions for writing to render pass.
* We assume reflection, transmission and volume scatter to be exclusive. */
return safe_divide_float3_float3(eval->diffuse, eval->diffuse + eval->glossy);
return safe_divide_float3_float3(eval->diffuse, eval->sum);
}
ccl_device_inline float3 bsdf_eval_pass_glossy_weight(ccl_private const BsdfEval *eval)
{
/* Ratio of glossy weight to recover proportions for writing to render pass.
* We assume reflection, transmission and volume scatter to be exclusive. */
return safe_divide_float3_float3(eval->glossy, eval->sum);
}
/* --------------------------------------------------------------------
@ -141,7 +151,8 @@ ccl_device_forceinline ccl_global float *kernel_accum_pixel_render_buffer(
ccl_device_inline int kernel_accum_sample(KernelGlobals kg,
ConstIntegratorState state,
ccl_global float *ccl_restrict render_buffer,
int sample)
int sample,
int sample_offset)
{
if (kernel_data.film.pass_sample_count == PASS_UNUSED) {
return sample;
@ -149,7 +160,8 @@ ccl_device_inline int kernel_accum_sample(KernelGlobals kg,
ccl_global float *buffer = kernel_accum_pixel_render_buffer(kg, state, render_buffer);
return atomic_fetch_and_add_uint32((uint *)(buffer) + kernel_data.film.pass_sample_count, 1);
return atomic_fetch_and_add_uint32((uint *)(buffer) + kernel_data.film.pass_sample_count, 1) +
sample_offset;
}
ccl_device void kernel_accum_adaptive_buffer(KernelGlobals kg,
@ -351,37 +363,47 @@ ccl_device_inline void kernel_accum_emission_or_background_pass(KernelGlobals kg
/* Directly visible, write to emission or background pass. */
pass_offset = pass;
}
else if (path_flag & (PATH_RAY_REFLECT_PASS | PATH_RAY_TRANSMISSION_PASS)) {
/* Indirectly visible through reflection. */
const int glossy_pass_offset = (path_flag & PATH_RAY_REFLECT_PASS) ?
((INTEGRATOR_STATE(state, path, bounce) == 1) ?
kernel_data.film.pass_glossy_direct :
kernel_data.film.pass_glossy_indirect) :
((INTEGRATOR_STATE(state, path, bounce) == 1) ?
kernel_data.film.pass_transmission_direct :
kernel_data.film.pass_transmission_indirect);
else if (kernel_data.kernel_features & KERNEL_FEATURE_LIGHT_PASSES) {
if (path_flag & PATH_RAY_SURFACE_PASS) {
/* Indirectly visible through reflection. */
const float3 diffuse_weight = INTEGRATOR_STATE(state, path, pass_diffuse_weight);
const float3 glossy_weight = INTEGRATOR_STATE(state, path, pass_glossy_weight);
if (glossy_pass_offset != PASS_UNUSED) {
/* Glossy is a subset of the throughput, reconstruct it here using the
* diffuse-glossy ratio. */
const float3 ratio = INTEGRATOR_STATE(state, path, diffuse_glossy_ratio);
const float3 glossy_contribution = (one_float3() - ratio) * contribution;
kernel_write_pass_float3(buffer + glossy_pass_offset, glossy_contribution);
}
/* Glossy */
const int glossy_pass_offset = ((INTEGRATOR_STATE(state, path, bounce) == 1) ?
kernel_data.film.pass_glossy_direct :
kernel_data.film.pass_glossy_indirect);
if (glossy_pass_offset != PASS_UNUSED) {
kernel_write_pass_float3(buffer + glossy_pass_offset, glossy_weight * contribution);
}
/* Reconstruct diffuse subset of throughput. */
pass_offset = (INTEGRATOR_STATE(state, path, bounce) == 1) ?
kernel_data.film.pass_diffuse_direct :
kernel_data.film.pass_diffuse_indirect;
if (pass_offset != PASS_UNUSED) {
contribution *= INTEGRATOR_STATE(state, path, diffuse_glossy_ratio);
/* Transmission */
const int transmission_pass_offset = ((INTEGRATOR_STATE(state, path, bounce) == 1) ?
kernel_data.film.pass_transmission_direct :
kernel_data.film.pass_transmission_indirect);
if (transmission_pass_offset != PASS_UNUSED) {
/* Transmission is what remains if not diffuse and glossy, not stored explicitly to save
* GPU memory. */
const float3 transmission_weight = one_float3() - diffuse_weight - glossy_weight;
kernel_write_pass_float3(buffer + transmission_pass_offset,
transmission_weight * contribution);
}
/* Reconstruct diffuse subset of throughput. */
pass_offset = (INTEGRATOR_STATE(state, path, bounce) == 1) ?
kernel_data.film.pass_diffuse_direct :
kernel_data.film.pass_diffuse_indirect;
if (pass_offset != PASS_UNUSED) {
contribution *= diffuse_weight;
}
}
else if (path_flag & PATH_RAY_VOLUME_PASS) {
/* Indirectly visible through volume. */
pass_offset = (INTEGRATOR_STATE(state, path, bounce) == 1) ?
kernel_data.film.pass_volume_direct :
kernel_data.film.pass_volume_indirect;
}
}
else if (path_flag & PATH_RAY_VOLUME_PASS) {
/* Indirectly visible through volume. */
pass_offset = (INTEGRATOR_STATE(state, path, bounce) == 1) ?
kernel_data.film.pass_volume_direct :
kernel_data.film.pass_volume_indirect;
}
/* Single write call for GPU coherence. */
@ -426,45 +448,56 @@ ccl_device_inline void kernel_accum_light(KernelGlobals kg,
#ifdef __PASSES__
if (kernel_data.film.light_pass_flag & PASS_ANY) {
const uint32_t path_flag = INTEGRATOR_STATE(state, shadow_path, flag);
int pass_offset = PASS_UNUSED;
if (path_flag & (PATH_RAY_REFLECT_PASS | PATH_RAY_TRANSMISSION_PASS)) {
/* Indirectly visible through reflection. */
const int glossy_pass_offset = (path_flag & PATH_RAY_REFLECT_PASS) ?
((INTEGRATOR_STATE(state, shadow_path, bounce) == 0) ?
kernel_data.film.pass_glossy_direct :
kernel_data.film.pass_glossy_indirect) :
((INTEGRATOR_STATE(state, shadow_path, bounce) == 0) ?
kernel_data.film.pass_transmission_direct :
kernel_data.film.pass_transmission_indirect);
if (kernel_data.kernel_features & KERNEL_FEATURE_LIGHT_PASSES) {
int pass_offset = PASS_UNUSED;
if (glossy_pass_offset != PASS_UNUSED) {
/* Glossy is a subset of the throughput, reconstruct it here using the
* diffuse-glossy ratio. */
const float3 ratio = INTEGRATOR_STATE(state, shadow_path, diffuse_glossy_ratio);
const float3 glossy_contribution = (one_float3() - ratio) * contribution;
kernel_write_pass_float3(buffer + glossy_pass_offset, glossy_contribution);
if (path_flag & PATH_RAY_SURFACE_PASS) {
/* Indirectly visible through reflection. */
const float3 diffuse_weight = INTEGRATOR_STATE(state, shadow_path, pass_diffuse_weight);
const float3 glossy_weight = INTEGRATOR_STATE(state, shadow_path, pass_glossy_weight);
/* Glossy */
const int glossy_pass_offset = ((INTEGRATOR_STATE(state, shadow_path, bounce) == 0) ?
kernel_data.film.pass_glossy_direct :
kernel_data.film.pass_glossy_indirect);
if (glossy_pass_offset != PASS_UNUSED) {
kernel_write_pass_float3(buffer + glossy_pass_offset, glossy_weight * contribution);
}
/* Transmission */
const int transmission_pass_offset = ((INTEGRATOR_STATE(state, shadow_path, bounce) == 0) ?
kernel_data.film.pass_transmission_direct :
kernel_data.film.pass_transmission_indirect);
if (transmission_pass_offset != PASS_UNUSED) {
/* Transmission is what remains if not diffuse and glossy, not stored explicitly to save
* GPU memory. */
const float3 transmission_weight = one_float3() - diffuse_weight - glossy_weight;
kernel_write_pass_float3(buffer + transmission_pass_offset,
transmission_weight * contribution);
}
/* Reconstruct diffuse subset of throughput. */
pass_offset = (INTEGRATOR_STATE(state, shadow_path, bounce) == 0) ?
kernel_data.film.pass_diffuse_direct :
kernel_data.film.pass_diffuse_indirect;
if (pass_offset != PASS_UNUSED) {
contribution *= diffuse_weight;
}
}
else if (path_flag & PATH_RAY_VOLUME_PASS) {
/* Indirectly visible through volume. */
pass_offset = (INTEGRATOR_STATE(state, shadow_path, bounce) == 0) ?
kernel_data.film.pass_volume_direct :
kernel_data.film.pass_volume_indirect;
}
/* Reconstruct diffuse subset of throughput. */
pass_offset = (INTEGRATOR_STATE(state, shadow_path, bounce) == 0) ?
kernel_data.film.pass_diffuse_direct :
kernel_data.film.pass_diffuse_indirect;
/* Single write call for GPU coherence. */
if (pass_offset != PASS_UNUSED) {
contribution *= INTEGRATOR_STATE(state, shadow_path, diffuse_glossy_ratio);
kernel_write_pass_float3(buffer + pass_offset, contribution);
}
}
else if (path_flag & PATH_RAY_VOLUME_PASS) {
/* Indirectly visible through volume. */
pass_offset = (INTEGRATOR_STATE(state, shadow_path, bounce) == 0) ?
kernel_data.film.pass_volume_direct :
kernel_data.film.pass_volume_indirect;
}
/* Single write call for GPU coherence. */
if (pass_offset != PASS_UNUSED) {
kernel_write_pass_float3(buffer + pass_offset, contribution);
}
/* Write shadow pass. */
if (kernel_data.film.pass_shadow != PASS_UNUSED && (path_flag & PATH_RAY_SHADOW_FOR_LIGHT) &&
@ -540,11 +573,10 @@ ccl_device_inline void kernel_accum_background(KernelGlobals kg,
/* Write emission to render buffer. */
ccl_device_inline void kernel_accum_emission(KernelGlobals kg,
ConstIntegratorState state,
const float3 throughput,
const float3 L,
ccl_global float *ccl_restrict render_buffer)
{
float3 contribution = throughput * L;
float3 contribution = L;
kernel_accum_clamp(kg, &contribution, INTEGRATOR_STATE(state, path, bounce) - 1);
ccl_global float *buffer = kernel_accum_pixel_render_buffer(kg, state, render_buffer);

View File

@ -160,40 +160,6 @@ ccl_device_forceinline void kernel_write_denoising_features_volume(KernelGlobals
}
#endif /* __DENOISING_FEATURES__ */
#ifdef __SHADOW_CATCHER__
/* Write shadow catcher passes on a bounce from the shadow catcher object. */
ccl_device_forceinline void kernel_write_shadow_catcher_bounce_data(
KernelGlobals kg,
IntegratorState state,
ccl_private const ShaderData *sd,
ccl_global float *ccl_restrict render_buffer)
{
if (!kernel_data.integrator.has_shadow_catcher) {
return;
}
kernel_assert(kernel_data.film.pass_shadow_catcher_sample_count != PASS_UNUSED);
kernel_assert(kernel_data.film.pass_shadow_catcher_matte != PASS_UNUSED);
if (!kernel_shadow_catcher_is_path_split_bounce(kg, state, sd->object_flag)) {
return;
}
ccl_global float *buffer = kernel_pass_pixel_render_buffer(kg, state, render_buffer);
/* Count sample for the shadow catcher object. */
kernel_write_pass_float(buffer + kernel_data.film.pass_shadow_catcher_sample_count, 1.0f);
/* Since the split is done, the sample does not contribute to the matte, so accumulate it as
* transparency to the matte. */
const float3 throughput = INTEGRATOR_STATE(state, path, throughput);
kernel_write_pass_float(buffer + kernel_data.film.pass_shadow_catcher_matte + 3,
average(throughput));
}
#endif /* __SHADOW_CATCHER__ */
ccl_device_inline size_t kernel_write_id_pass(ccl_global float *ccl_restrict buffer,
size_t depth,
float id,

View File

@ -65,7 +65,8 @@ ccl_device bool integrator_init_from_bake(KernelGlobals kg,
}
/* Always count the sample, even if the camera sample will reject the ray. */
const int sample = kernel_accum_sample(kg, state, render_buffer, scheduled_sample);
const int sample = kernel_accum_sample(
kg, state, render_buffer, scheduled_sample, tile->sample_offset);
/* Setup render buffers. */
const int index = INTEGRATOR_STATE(state, path, render_pixel_index);

View File

@ -89,7 +89,8 @@ ccl_device bool integrator_init_from_camera(KernelGlobals kg,
* This logic allows to both count actual number of samples per pixel, and to add samples to this
* pixel after it was converged and samples were added somewhere else (in which case the
* `scheduled_sample` will be different from actual number of samples in this pixel). */
const int sample = kernel_accum_sample(kg, state, render_buffer, scheduled_sample);
const int sample = kernel_accum_sample(
kg, state, render_buffer, scheduled_sample, tile->sample_offset);
/* Initialize random number seed for path. */
const uint rng_hash = path_rng_hash_init(kg, sample, x, y);

View File

@ -88,7 +88,10 @@ ccl_device_forceinline bool integrator_intersect_terminate(KernelGlobals kg,
#ifdef __SHADOW_CATCHER__
/* Split path if a shadow catcher was hit. */
ccl_device_forceinline void integrator_split_shadow_catcher(
KernelGlobals kg, IntegratorState state, ccl_private const Intersection *ccl_restrict isect)
KernelGlobals kg,
IntegratorState state,
ccl_private const Intersection *ccl_restrict isect,
ccl_global float *ccl_restrict render_buffer)
{
/* Test if we hit a shadow catcher object, and potentially split the path to continue tracing two
* paths from here. */
@ -97,6 +100,8 @@ ccl_device_forceinline void integrator_split_shadow_catcher(
return;
}
kernel_write_shadow_catcher_bounce_data(kg, state, render_buffer);
/* Mark state as having done a shadow catcher split so that it stops contributing to
* the shadow catcher matte pass, but keeps contributing to the combined pass. */
INTEGRATOR_STATE_WRITE(state, path, flag) |= PATH_RAY_SHADOW_CATCHER_HIT;
@ -191,6 +196,7 @@ ccl_device_forceinline void integrator_intersect_next_kernel(
KernelGlobals kg,
IntegratorState state,
ccl_private const Intersection *ccl_restrict isect,
ccl_global float *ccl_restrict render_buffer,
const bool hit)
{
/* Continue with volume kernel if we are inside a volume, regardless if we hit anything. */
@ -233,7 +239,7 @@ ccl_device_forceinline void integrator_intersect_next_kernel(
#ifdef __SHADOW_CATCHER__
/* Handle shadow catcher. */
integrator_split_shadow_catcher(kg, state, isect);
integrator_split_shadow_catcher(kg, state, isect, render_buffer);
#endif
}
else {
@ -253,7 +259,10 @@ ccl_device_forceinline void integrator_intersect_next_kernel(
* volume shading and termination testing have already been done. */
template<uint32_t current_kernel>
ccl_device_forceinline void integrator_intersect_next_kernel_after_volume(
KernelGlobals kg, IntegratorState state, ccl_private const Intersection *ccl_restrict isect)
KernelGlobals kg,
IntegratorState state,
ccl_private const Intersection *ccl_restrict isect,
ccl_global float *ccl_restrict render_buffer)
{
if (isect->prim != PRIM_NONE) {
/* Hit a surface, continue with light or surface kernel. */
@ -278,7 +287,7 @@ ccl_device_forceinline void integrator_intersect_next_kernel_after_volume(
#ifdef __SHADOW_CATCHER__
/* Handle shadow catcher. */
integrator_split_shadow_catcher(kg, state, isect);
integrator_split_shadow_catcher(kg, state, isect, render_buffer);
#endif
return;
}
@ -290,7 +299,9 @@ ccl_device_forceinline void integrator_intersect_next_kernel_after_volume(
}
}
ccl_device void integrator_intersect_closest(KernelGlobals kg, IntegratorState state)
ccl_device void integrator_intersect_closest(KernelGlobals kg,
IntegratorState state,
ccl_global float *ccl_restrict render_buffer)
{
PROFILING_INIT(kg, PROFILING_INTERSECT_CLOSEST);
@ -341,7 +352,7 @@ ccl_device void integrator_intersect_closest(KernelGlobals kg, IntegratorState s
/* Setup up next kernel to be executed. */
integrator_intersect_next_kernel<DEVICE_KERNEL_INTEGRATOR_INTERSECT_CLOSEST>(
kg, state, &isect, hit);
kg, state, &isect, render_buffer, hit);
}
CCL_NAMESPACE_END

View File

@ -76,7 +76,7 @@ ccl_device void integrator_megakernel(KernelGlobals kg,
if (queued_kernel) {
switch (queued_kernel) {
case DEVICE_KERNEL_INTEGRATOR_INTERSECT_CLOSEST:
integrator_intersect_closest(kg, state);
integrator_intersect_closest(kg, state, render_buffer);
break;
case DEVICE_KERNEL_INTEGRATOR_SHADE_BACKGROUND:
integrator_shade_background(kg, state, render_buffer);

View File

@ -185,7 +185,7 @@ ccl_device_inline void path_state_next(KernelGlobals kg, IntegratorState state,
/* Render pass categories. */
if (bounce == 1) {
flag |= (label & LABEL_TRANSMIT) ? PATH_RAY_TRANSMISSION_PASS : PATH_RAY_REFLECT_PASS;
flag |= PATH_RAY_SURFACE_PASS;
}
}

View File

@ -175,7 +175,7 @@ ccl_device_inline void integrate_distant_lights(KernelGlobals kg,
/* Write to render buffer. */
const float3 throughput = INTEGRATOR_STATE(state, path, throughput);
kernel_accum_emission(kg, state, throughput, light_eval, render_buffer);
kernel_accum_emission(kg, state, throughput * light_eval, render_buffer);
}
}
}

View File

@ -90,7 +90,7 @@ ccl_device_inline void integrate_light(KernelGlobals kg,
/* Write to render buffer. */
const float3 throughput = INTEGRATOR_STATE(state, path, throughput);
kernel_accum_emission(kg, state, throughput, light_eval, render_buffer);
kernel_accum_emission(kg, state, throughput * light_eval, render_buffer);
}
ccl_device void integrator_shade_light(KernelGlobals kg,

View File

@ -101,7 +101,7 @@ ccl_device_forceinline void integrate_surface_emission(KernelGlobals kg,
}
const float3 throughput = INTEGRATOR_STATE(state, path, throughput);
kernel_accum_emission(kg, state, throughput, L, render_buffer);
kernel_accum_emission(kg, state, throughput * L, render_buffer);
}
#endif /* __EMISSION__ */
@ -191,14 +191,18 @@ ccl_device_forceinline void integrate_surface_direct_light(KernelGlobals kg,
const uint16_t transparent_bounce = INTEGRATOR_STATE(state, path, transparent_bounce);
uint32_t shadow_flag = INTEGRATOR_STATE(state, path, flag);
shadow_flag |= (is_light) ? PATH_RAY_SHADOW_FOR_LIGHT : 0;
shadow_flag |= (is_transmission) ? PATH_RAY_TRANSMISSION_PASS : PATH_RAY_REFLECT_PASS;
shadow_flag |= PATH_RAY_SURFACE_PASS;
const float3 throughput = INTEGRATOR_STATE(state, path, throughput) * bsdf_eval_sum(&bsdf_eval);
if (kernel_data.kernel_features & KERNEL_FEATURE_LIGHT_PASSES) {
const float3 diffuse_glossy_ratio = (bounce == 0) ?
bsdf_eval_diffuse_glossy_ratio(&bsdf_eval) :
INTEGRATOR_STATE(state, path, diffuse_glossy_ratio);
INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, diffuse_glossy_ratio) = diffuse_glossy_ratio;
const float3 pass_diffuse_weight = (bounce == 0) ?
bsdf_eval_pass_diffuse_weight(&bsdf_eval) :
INTEGRATOR_STATE(state, path, pass_diffuse_weight);
const float3 pass_glossy_weight = (bounce == 0) ?
bsdf_eval_pass_glossy_weight(&bsdf_eval) :
INTEGRATOR_STATE(state, path, pass_glossy_weight);
INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, pass_diffuse_weight) = pass_diffuse_weight;
INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, pass_glossy_weight) = pass_glossy_weight;
}
INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, render_pixel_index) = INTEGRATOR_STATE(
@ -283,7 +287,9 @@ ccl_device_forceinline int integrate_surface_bsdf_bssrdf_bounce(
if (kernel_data.kernel_features & KERNEL_FEATURE_LIGHT_PASSES) {
if (INTEGRATOR_STATE(state, path, bounce) == 0) {
INTEGRATOR_STATE_WRITE(state, path, diffuse_glossy_ratio) = bsdf_eval_diffuse_glossy_ratio(
INTEGRATOR_STATE_WRITE(state, path, pass_diffuse_weight) = bsdf_eval_pass_diffuse_weight(
&bsdf_eval);
INTEGRATOR_STATE_WRITE(state, path, pass_glossy_weight) = bsdf_eval_pass_glossy_weight(
&bsdf_eval);
}
}
@ -445,7 +451,7 @@ ccl_device bool integrate_surface(KernelGlobals kg,
}
#endif
shader_prepare_surface_closures(kg, state, &sd);
shader_prepare_surface_closures(kg, state, &sd, path_flag);
#ifdef __HOLDOUT__
/* Evaluate holdout. */
@ -492,10 +498,6 @@ ccl_device bool integrate_surface(KernelGlobals kg,
kernel_write_denoising_features_surface(kg, state, &sd, render_buffer);
#endif
#ifdef __SHADOW_CATCHER__
kernel_write_shadow_catcher_bounce_data(kg, state, &sd, render_buffer);
#endif
/* Direct light. */
PROFILING_EVENT(PROFILING_SHADE_SURFACE_DIRECT_LIGHT);
integrate_surface_direct_light(kg, state, &sd, &rng_state);

View File

@ -608,7 +608,7 @@ ccl_device_forceinline void volume_integrate_heterogeneous(
if (!result.indirect_scatter) {
const float3 emission = volume_emission_integrate(
&coeff, closure_flag, transmittance, dt);
accum_emission += emission;
accum_emission += result.indirect_throughput * emission;
}
}
@ -661,7 +661,7 @@ ccl_device_forceinline void volume_integrate_heterogeneous(
/* Write accumulated emission. */
if (!is_zero(accum_emission)) {
kernel_accum_emission(kg, state, result.indirect_throughput, accum_emission, render_buffer);
kernel_accum_emission(kg, state, accum_emission, render_buffer);
}
# ifdef __DENOISING_FEATURES__
@ -794,10 +794,11 @@ ccl_device_forceinline void integrate_volume_direct_light(
const float3 throughput_phase = throughput * bsdf_eval_sum(&phase_eval);
if (kernel_data.kernel_features & KERNEL_FEATURE_LIGHT_PASSES) {
const float3 diffuse_glossy_ratio = (bounce == 0) ?
one_float3() :
INTEGRATOR_STATE(state, path, diffuse_glossy_ratio);
INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, diffuse_glossy_ratio) = diffuse_glossy_ratio;
const float3 pass_diffuse_weight = (bounce == 0) ?
one_float3() :
INTEGRATOR_STATE(state, path, pass_diffuse_weight);
INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, pass_diffuse_weight) = pass_diffuse_weight;
INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, pass_glossy_weight) = zero_float3();
}
INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, render_pixel_index) = INTEGRATOR_STATE(
@ -876,7 +877,8 @@ ccl_device_forceinline bool integrate_volume_phase_scatter(
INTEGRATOR_STATE_WRITE(state, path, throughput) = throughput_phase;
if (kernel_data.kernel_features & KERNEL_FEATURE_LIGHT_PASSES) {
INTEGRATOR_STATE_WRITE(state, path, diffuse_glossy_ratio) = one_float3();
INTEGRATOR_STATE_WRITE(state, path, pass_diffuse_weight) = one_float3();
INTEGRATOR_STATE_WRITE(state, path, pass_glossy_weight) = zero_float3();
}
/* Update path state */
@ -1024,7 +1026,7 @@ ccl_device void integrator_shade_volume(KernelGlobals kg,
else {
/* Continue to background, light or surface. */
integrator_intersect_next_kernel_after_volume<DEVICE_KERNEL_INTEGRATOR_SHADE_VOLUME>(
kg, state, &isect);
kg, state, &isect, render_buffer);
return;
}
#endif /* __VOLUME__ */

View File

@ -105,8 +105,45 @@ ccl_device_inline void shader_copy_volume_phases(ccl_private ShaderVolumePhases
ccl_device_inline void shader_prepare_surface_closures(KernelGlobals kg,
ConstIntegratorState state,
ccl_private ShaderData *sd)
ccl_private ShaderData *sd,
const uint32_t path_flag)
{
/* Filter out closures. */
if (kernel_data.integrator.filter_closures) {
if (kernel_data.integrator.filter_closures & FILTER_CLOSURE_EMISSION) {
sd->closure_emission_background = zero_float3();
}
if (kernel_data.integrator.filter_closures & FILTER_CLOSURE_DIRECT_LIGHT) {
sd->flag &= ~SD_BSDF_HAS_EVAL;
}
if (path_flag & PATH_RAY_CAMERA) {
for (int i = 0; i < sd->num_closure; i++) {
ccl_private ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_DIFFUSE(sc->type)) {
if (kernel_data.integrator.filter_closures & FILTER_CLOSURE_DIFFUSE) {
sc->type = CLOSURE_NONE_ID;
sc->sample_weight = 0.0f;
}
}
else if (CLOSURE_IS_BSDF_GLOSSY(sc->type)) {
if (kernel_data.integrator.filter_closures & FILTER_CLOSURE_GLOSSY) {
sc->type = CLOSURE_NONE_ID;
sc->sample_weight = 0.0f;
}
}
else if (CLOSURE_IS_BSDF_TRANSMISSION(sc->type)) {
if (kernel_data.integrator.filter_closures & FILTER_CLOSURE_TRANSMISSION) {
sc->type = CLOSURE_NONE_ID;
sc->sample_weight = 0.0f;
}
}
}
}
}
/* Defensive sampling.
*
* We can likely also do defensive sampling at deeper bounces, particularly
@ -209,8 +246,7 @@ ccl_device_inline float _shader_bsdf_multi_eval(KernelGlobals kg,
float3 eval = bsdf_eval(kg, sd, sc, omega_in, is_transmission, &bsdf_pdf);
if (bsdf_pdf != 0.0f) {
const bool is_diffuse = CLOSURE_IS_BSDF_DIFFUSE(sc->type);
bsdf_eval_accum(result_eval, is_diffuse, eval * sc->weight, 1.0f);
bsdf_eval_accum(result_eval, sc->type, eval * sc->weight);
sum_pdf += bsdf_pdf * sc->sample_weight;
}
}
@ -235,7 +271,7 @@ ccl_device_inline
ccl_private BsdfEval *bsdf_eval,
const uint light_shader_flags)
{
bsdf_eval_init(bsdf_eval, false, zero_float3());
bsdf_eval_init(bsdf_eval, CLOSURE_NONE_ID, zero_float3());
return _shader_bsdf_multi_eval(
kg, sd, omega_in, is_transmission, NULL, bsdf_eval, 0.0f, 0.0f, light_shader_flags);
@ -328,8 +364,7 @@ ccl_device int shader_bsdf_sample_closure(KernelGlobals kg,
label = bsdf_sample(kg, sd, sc, randu, randv, &eval, omega_in, domega_in, pdf);
if (*pdf != 0.0f) {
const bool is_diffuse = CLOSURE_IS_BSDF_DIFFUSE(sc->type);
bsdf_eval_init(bsdf_eval, is_diffuse, eval * sc->weight);
bsdf_eval_init(bsdf_eval, sc->type, eval * sc->weight);
if (sd->num_closure > 1) {
const bool is_transmission = shader_bsdf_is_transmission(sd, *omega_in);
@ -655,7 +690,7 @@ ccl_device_inline float _shader_volume_phase_multi_eval(
float3 eval = volume_phase_eval(sd, svc, omega_in, &phase_pdf);
if (phase_pdf != 0.0f) {
bsdf_eval_accum(result_eval, false, eval, 1.0f);
bsdf_eval_accum(result_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, eval);
sum_pdf += phase_pdf * svc->sample_weight;
}
@ -671,7 +706,7 @@ ccl_device float shader_volume_phase_eval(KernelGlobals kg,
const float3 omega_in,
ccl_private BsdfEval *phase_eval)
{
bsdf_eval_init(phase_eval, false, zero_float3());
bsdf_eval_init(phase_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, zero_float3());
return _shader_volume_phase_multi_eval(sd, phases, omega_in, -1, phase_eval, 0.0f, 0.0f);
}
@ -729,7 +764,7 @@ ccl_device int shader_volume_phase_sample(KernelGlobals kg,
label = volume_phase_sample(sd, svc, randu, randv, &eval, omega_in, domega_in, pdf);
if (*pdf != 0.0f) {
bsdf_eval_init(phase_eval, false, eval);
bsdf_eval_init(phase_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, eval);
}
return label;
@ -752,7 +787,7 @@ ccl_device int shader_phase_sample_closure(KernelGlobals kg,
label = volume_phase_sample(sd, sc, randu, randv, &eval, omega_in, domega_in, pdf);
if (*pdf != 0.0f)
bsdf_eval_init(phase_eval, false, eval);
bsdf_eval_init(phase_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, eval);
return label;
}

View File

@ -16,6 +16,7 @@
#pragma once
#include "kernel/film/write_passes.h"
#include "kernel/integrator/path_state.h"
#include "kernel/integrator/state_util.h"
@ -47,7 +48,7 @@ ccl_device_inline bool kernel_shadow_catcher_is_path_split_bounce(KernelGlobals
return false;
}
if (path_flag & PATH_RAY_SHADOW_CATCHER_PASS) {
if (path_flag & PATH_RAY_SHADOW_CATCHER_HIT) {
return false;
}
@ -88,6 +89,28 @@ ccl_device_forceinline bool kernel_shadow_catcher_is_object_pass(const uint32_t
return path_flag & PATH_RAY_SHADOW_CATCHER_PASS;
}
/* Write shadow catcher passes on a bounce from the shadow catcher object. */
ccl_device_forceinline void kernel_write_shadow_catcher_bounce_data(
KernelGlobals kg, IntegratorState state, ccl_global float *ccl_restrict render_buffer)
{
kernel_assert(kernel_data.film.pass_shadow_catcher_sample_count != PASS_UNUSED);
kernel_assert(kernel_data.film.pass_shadow_catcher_matte != PASS_UNUSED);
const uint32_t render_pixel_index = INTEGRATOR_STATE(state, path, render_pixel_index);
const uint64_t render_buffer_offset = (uint64_t)render_pixel_index *
kernel_data.film.pass_stride;
ccl_global float *buffer = render_buffer + render_buffer_offset;
/* Count sample for the shadow catcher object. */
kernel_write_pass_float(buffer + kernel_data.film.pass_shadow_catcher_sample_count, 1.0f);
/* Since the split is done, the sample does not contribute to the matte, so accumulate it as
* transparency to the matte. */
const float3 throughput = INTEGRATOR_STATE(state, path, throughput);
kernel_write_pass_float(buffer + kernel_data.film.pass_shadow_catcher_matte + 3,
average(throughput));
}
#endif /* __SHADOW_CATCHER__ */
CCL_NAMESPACE_END

View File

@ -46,8 +46,9 @@ KERNEL_STRUCT_MEMBER(shadow_path,
float3,
unshadowed_throughput,
KERNEL_FEATURE_SHADOW_PASS | KERNEL_FEATURE_AO_ADDITIVE)
/* Ratio of throughput to distinguish diffuse and glossy render passes. */
KERNEL_STRUCT_MEMBER(shadow_path, float3, diffuse_glossy_ratio, KERNEL_FEATURE_LIGHT_PASSES)
/* Ratio of throughput to distinguish diffuse / glossy / transmission render passes. */
KERNEL_STRUCT_MEMBER(shadow_path, float3, pass_diffuse_weight, KERNEL_FEATURE_LIGHT_PASSES)
KERNEL_STRUCT_MEMBER(shadow_path, float3, pass_glossy_weight, KERNEL_FEATURE_LIGHT_PASSES)
/* Number of intersections found by ray-tracing. */
KERNEL_STRUCT_MEMBER(shadow_path, uint16_t, num_hits, KERNEL_FEATURE_PATH_TRACING)
KERNEL_STRUCT_END(shadow_path)

View File

@ -60,8 +60,9 @@ KERNEL_STRUCT_MEMBER(path, float, min_ray_pdf, KERNEL_FEATURE_PATH_TRACING)
KERNEL_STRUCT_MEMBER(path, float, continuation_probability, KERNEL_FEATURE_PATH_TRACING)
/* Throughput. */
KERNEL_STRUCT_MEMBER(path, float3, throughput, KERNEL_FEATURE_PATH_TRACING)
/* Ratio of throughput to distinguish diffuse and glossy render passes. */
KERNEL_STRUCT_MEMBER(path, float3, diffuse_glossy_ratio, KERNEL_FEATURE_LIGHT_PASSES)
/* Ratio of throughput to distinguish diffuse / glossy / transmission render passes. */
KERNEL_STRUCT_MEMBER(path, float3, pass_diffuse_weight, KERNEL_FEATURE_LIGHT_PASSES)
KERNEL_STRUCT_MEMBER(path, float3, pass_glossy_weight, KERNEL_FEATURE_LIGHT_PASSES)
/* Denoising. */
KERNEL_STRUCT_MEMBER(path, float3, denoising_feature_throughput, KERNEL_FEATURE_DENOISING)
/* Shader sorting. */

View File

@ -79,7 +79,8 @@ ccl_device int subsurface_bounce(KernelGlobals kg,
if (kernel_data.kernel_features & KERNEL_FEATURE_LIGHT_PASSES) {
if (INTEGRATOR_STATE(state, path, bounce) == 0) {
INTEGRATOR_STATE_WRITE(state, path, diffuse_glossy_ratio) = one_float3();
INTEGRATOR_STATE_WRITE(state, path, pass_diffuse_weight) = one_float3();
INTEGRATOR_STATE_WRITE(state, path, pass_glossy_weight) = zero_float3();
}
}

View File

@ -132,10 +132,12 @@ static void shaderdata_to_shaderglobals(const KernelGlobalsCPU *kg,
/* Used by render-services. */
sd->osl_globals = kg;
if (path_flag & PATH_RAY_SHADOW) {
sd->osl_path_state = nullptr;
sd->osl_shadow_path_state = (const IntegratorShadowStateCPU *)state;
}
else {
sd->osl_path_state = (const IntegratorStateCPU *)state;
sd->osl_shadow_path_state = nullptr;
}
}

View File

@ -286,27 +286,26 @@ enum PathRayFlag {
PATH_RAY_DENOISING_FEATURES = (1U << 23U),
/* Render pass categories. */
PATH_RAY_REFLECT_PASS = (1U << 24U),
PATH_RAY_TRANSMISSION_PASS = (1U << 25U),
PATH_RAY_VOLUME_PASS = (1U << 26U),
PATH_RAY_ANY_PASS = (PATH_RAY_REFLECT_PASS | PATH_RAY_TRANSMISSION_PASS | PATH_RAY_VOLUME_PASS),
PATH_RAY_SURFACE_PASS = (1U << 24U),
PATH_RAY_VOLUME_PASS = (1U << 25U),
PATH_RAY_ANY_PASS = (PATH_RAY_SURFACE_PASS | PATH_RAY_VOLUME_PASS),
/* Shadow ray is for a light or surface, or AO. */
PATH_RAY_SHADOW_FOR_LIGHT = (1U << 27U),
PATH_RAY_SHADOW_FOR_AO = (1U << 28U),
PATH_RAY_SHADOW_FOR_LIGHT = (1U << 26U),
PATH_RAY_SHADOW_FOR_AO = (1U << 27U),
/* A shadow catcher object was hit and the path was split into two. */
PATH_RAY_SHADOW_CATCHER_HIT = (1U << 29U),
PATH_RAY_SHADOW_CATCHER_HIT = (1U << 28U),
/* A shadow catcher object was hit and this path traces only shadow catchers, writing them into
* their dedicated pass for later division.
*
* NOTE: Is not covered with `PATH_RAY_ANY_PASS` because shadow catcher does special handling
* which is separate from the light passes. */
PATH_RAY_SHADOW_CATCHER_PASS = (1U << 30U),
PATH_RAY_SHADOW_CATCHER_PASS = (1U << 29U),
/* Path is evaluating background for an approximate shadow catcher with non-transparent film. */
PATH_RAY_SHADOW_CATCHER_BACKGROUND = (1U << 31U),
PATH_RAY_SHADOW_CATCHER_BACKGROUND = (1U << 30U),
};
/* Configure ray visibility bits for rays and objects respectively,
@ -428,8 +427,19 @@ typedef enum CryptomatteType {
typedef struct BsdfEval {
float3 diffuse;
float3 glossy;
float3 sum;
} BsdfEval;
/* Closure Filter */
typedef enum FilterClosures {
FILTER_CLOSURE_EMISSION = (1 << 0),
FILTER_CLOSURE_DIFFUSE = (1 << 1),
FILTER_CLOSURE_GLOSSY = (1 << 2),
FILTER_CLOSURE_TRANSMISSION = (1 << 3),
FILTER_CLOSURE_DIRECT_LIGHT = (1 << 4),
} FilterClosures;
/* Shader Flag */
typedef enum ShaderFlag {
@ -1186,7 +1196,11 @@ typedef struct KernelIntegrator {
int has_shadow_catcher;
float scrambling_distance;
/* Closure filter. */
int filter_closures;
/* padding */
int pad1, pad2, pad3;
} KernelIntegrator;
static_assert_align(KernelIntegrator, 16);
@ -1410,6 +1424,7 @@ typedef struct KernelWorkTile {
uint start_sample;
uint num_samples;
uint sample_offset;
int offset;
uint stride;

View File

@ -187,8 +187,6 @@ void Film::device_update(Device *device, DeviceScene *dscene, Scene *scene)
kfilm->pass_transmission_indirect = PASS_UNUSED;
kfilm->pass_volume_direct = PASS_UNUSED;
kfilm->pass_volume_indirect = PASS_UNUSED;
kfilm->pass_volume_direct = PASS_UNUSED;
kfilm->pass_volume_indirect = PASS_UNUSED;
kfilm->pass_shadow = PASS_UNUSED;
/* Mark passes as unused so that the kernel knows the pass is inaccessible. */
@ -673,13 +671,12 @@ uint Film::get_kernel_features(const Scene *scene) const
kernel_features |= KERNEL_FEATURE_DENOISING;
}
if (pass_type != PASS_NONE && pass_type != PASS_COMBINED &&
pass_type <= PASS_CATEGORY_LIGHT_END) {
if (pass_type >= PASS_DIFFUSE && pass_type <= PASS_VOLUME_INDIRECT) {
kernel_features |= KERNEL_FEATURE_LIGHT_PASSES;
}
if (pass_type == PASS_SHADOW) {
kernel_features |= KERNEL_FEATURE_SHADOW_PASS;
}
if (pass_type == PASS_SHADOW) {
kernel_features |= KERNEL_FEATURE_SHADOW_PASS;
}
if (pass_type == PASS_AO) {

View File

@ -63,6 +63,14 @@ NODE_DEFINE(Integrator)
SOCKET_BOOLEAN(caustics_reflective, "Reflective Caustics", true);
SOCKET_BOOLEAN(caustics_refractive, "Refractive Caustics", true);
SOCKET_FLOAT(filter_glossy, "Filter Glossy", 0.0f);
SOCKET_BOOLEAN(use_direct_light, "Use Direct Light", true);
SOCKET_BOOLEAN(use_indirect_light, "Use Indirect Light", true);
SOCKET_BOOLEAN(use_diffuse, "Use Diffuse", true);
SOCKET_BOOLEAN(use_glossy, "Use Glossy", true);
SOCKET_BOOLEAN(use_transmission, "Use Transmission", true);
SOCKET_BOOLEAN(use_emission, "Use Emission", true);
SOCKET_INT(seed, "Seed", 0);
SOCKET_FLOAT(sample_clamp_direct, "Sample Clamp Direct", 0.0f);
SOCKET_FLOAT(sample_clamp_indirect, "Sample Clamp Indirect", 0.0f);
@ -184,6 +192,27 @@ void Integrator::device_update(Device *device, DeviceScene *dscene, Scene *scene
kintegrator->caustics_refractive = caustics_refractive;
kintegrator->filter_glossy = (filter_glossy == 0.0f) ? FLT_MAX : 1.0f / filter_glossy;
kintegrator->filter_closures = 0;
if (!use_direct_light) {
kintegrator->filter_closures |= FILTER_CLOSURE_DIRECT_LIGHT;
}
if (!use_indirect_light) {
kintegrator->min_bounce = 1;
kintegrator->max_bounce = 1;
}
if (!use_diffuse) {
kintegrator->filter_closures |= FILTER_CLOSURE_DIFFUSE;
}
if (!use_glossy) {
kintegrator->filter_closures |= FILTER_CLOSURE_GLOSSY;
}
if (!use_transmission) {
kintegrator->filter_closures |= FILTER_CLOSURE_TRANSMISSION;
}
if (!use_emission) {
kintegrator->filter_closures |= FILTER_CLOSURE_EMISSION;
}
kintegrator->seed = seed;
kintegrator->sample_clamp_direct = (sample_clamp_direct == 0.0f) ? FLT_MAX :

View File

@ -56,6 +56,13 @@ class Integrator : public Node {
NODE_SOCKET_API(bool, caustics_refractive)
NODE_SOCKET_API(float, filter_glossy)
NODE_SOCKET_API(bool, use_direct_light);
NODE_SOCKET_API(bool, use_indirect_light);
NODE_SOCKET_API(bool, use_diffuse);
NODE_SOCKET_API(bool, use_glossy);
NODE_SOCKET_API(bool, use_transmission);
NODE_SOCKET_API(bool, use_emission);
NODE_SOCKET_API(int, seed)
NODE_SOCKET_API(float, sample_clamp_direct)

View File

@ -274,19 +274,26 @@ void OSLShaderManager::shading_system_init()
"diffuse_ancestor", /* PATH_RAY_DIFFUSE_ANCESTOR */
"__unused__", /* PATH_RAY_SINGLE_PASS_DONE */
"__unused__", /* PATH_RAY_TRANSPARENT_BACKGROUND */
"__unused__", /* PATH_RAY_TERMINATE_IMMEDIATE */
"__unused__", /* PATH_RAY_TERMINATE_AFTER_TRANSPARENT */
"__unused__", /* PATH_RAY_EMISSION */
"__unused__", /* PATH_RAY_SUBSURFACE */
"__unused__", /* PATH_RAY_DENOISING_FEATURES */
"__unused__", /* PATH_RAY_REFLECT_PASS */
"__unused__", /* PATH_RAY_TRANSMISSION_PASS */
"__unused__", /* PATH_RAY_VOLUME_PASS */
"__unused__", /* PATH_RAY_SHADOW_FOR_LIGHT */
"__unused__", /* PATH_RAY_SHADOW_CATCHER_HIT */
"__unused__", /* PATH_RAY_SHADOW_CATCHER_PASS */
/* Remaining irrelevant bits up to 32. */
"__unused__",
"__unused__",
"__unused__",
"__unused__",
"__unused__",
"__unused__",
"__unused__",
"__unused__",
"__unused__",
"__unused__",
"__unused__",
"__unused__",
"__unused__",
"__unused__",
"__unused__",
"__unused__",
"__unused__",
"__unused__",
"__unused__",
};
const int nraytypes = sizeof(raytypes) / sizeof(raytypes[0]);

View File

@ -262,6 +262,7 @@ RenderWork Session::run_update_for_next_iteration()
}
render_scheduler_.set_num_samples(params.samples);
render_scheduler_.set_start_sample(params.sample_offset);
render_scheduler_.set_time_limit(params.time_limit);
while (have_tiles) {
@ -397,7 +398,7 @@ void Session::do_delayed_reset()
/* Tile and work scheduling. */
tile_manager_.reset_scheduling(buffer_params_, get_effective_tile_size());
render_scheduler_.reset(buffer_params_, params.samples);
render_scheduler_.reset(buffer_params_, params.samples, params.sample_offset);
/* Passes. */
/* When multiple tiles are used SAMPLE_COUNT pass is used to keep track of possible partial

View File

@ -54,6 +54,7 @@ class SessionParams {
bool experimental;
int samples;
int sample_offset;
int pixel_size;
int threads;
@ -75,6 +76,7 @@ class SessionParams {
experimental = false;
samples = 1024;
sample_offset = 0;
pixel_size = 1;
threads = 0;
time_limit = 0.0;

View File

@ -29,6 +29,7 @@
#include "util/path.h"
#include "util/string.h"
#include "util/system.h"
#include "util/time.h"
#include "util/types.h"
CCL_NAMESPACE_BEGIN
@ -503,9 +504,9 @@ bool TileManager::write_tile(const RenderBuffers &tile_buffers)
}
}
DCHECK_EQ(tile_buffers.params.pass_stride, buffer_params_.pass_stride);
const double time_start = time_dt();
vector<float> pixel_storage;
DCHECK_EQ(tile_buffers.params.pass_stride, buffer_params_.pass_stride);
const BufferParams &tile_params = tile_buffers.params;
@ -515,13 +516,32 @@ bool TileManager::write_tile(const RenderBuffers &tile_buffers)
const int64_t pass_stride = tile_params.pass_stride;
const int64_t tile_row_stride = tile_params.width * pass_stride;
const int64_t xstride = pass_stride * sizeof(float);
const int64_t ystride = xstride * tile_params.width;
const int64_t zstride = ystride * tile_params.height;
vector<float> pixel_storage;
const float *pixels = tile_buffers.buffer.data() + tile_params.window_x * pass_stride +
tile_params.window_y * tile_row_stride;
/* If there is an overscan used for the tile copy pixels into single continuous block of memory
* without any "gaps".
* This is a workaround for bug in OIIO (https://github.com/OpenImageIO/oiio/pull/3176).
* Our task reference: T93008. */
if (tile_params.window_x || tile_params.window_y ||
tile_params.window_width != tile_params.width ||
tile_params.window_height != tile_params.height) {
pixel_storage.resize(pass_stride * tile_params.window_width * tile_params.window_height);
float *pixels_continuous = pixel_storage.data();
const int64_t pixels_row_stride = pass_stride * tile_params.width;
const int64_t pixels_continuous_row_stride = pass_stride * tile_params.window_width;
for (int i = 0; i < tile_params.window_height; ++i) {
memcpy(pixels_continuous, pixels, sizeof(float) * pixels_continuous_row_stride);
pixels += pixels_row_stride;
pixels_continuous += pixels_continuous_row_stride;
}
pixels = pixel_storage.data();
}
VLOG(3) << "Write tile at " << tile_x << ", " << tile_y;
/* The image tile sizes in the OpenEXR file are different from the size of our big tiles. The
@ -531,6 +551,11 @@ bool TileManager::write_tile(const RenderBuffers &tile_buffers)
*
* The only thing we have to ensure is that the tile_x and tile_y are a multiple of the
* image tile size, which happens in compute_render_tile_size. */
const int64_t xstride = pass_stride * sizeof(float);
const int64_t ystride = xstride * tile_params.window_width;
const int64_t zstride = ystride * tile_params.window_height;
if (!write_state_.tile_out->write_tiles(tile_x,
tile_x + tile_params.window_width,
tile_y,
@ -548,6 +573,8 @@ bool TileManager::write_tile(const RenderBuffers &tile_buffers)
++write_state_.num_tiles_written;
VLOG(3) << "Tile written in " << time_dt() - time_start << " seconds.";
return true;
}
@ -589,6 +616,9 @@ void TileManager::finish_write_tiles()
full_buffer_written_cb(write_state_.filename);
}
VLOG(3) << "Tile file size is "
<< string_human_readable_number(path_file_size(write_state_.filename)) << " bytes.";
/* Advance the counter upon explicit finish of the file.
* Makes it possible to re-use tile manager for another scene, and avoids unnecessary increments
* of the tile-file-within-session index. */

View File

@ -171,4 +171,9 @@ bool Profiler::get_object(int object, uint64_t &samples, uint64_t &hits)
return true;
}
bool Profiler::active() const
{
return (worker != nullptr);
}
CCL_NAMESPACE_END

View File

@ -96,6 +96,8 @@ class Profiler {
bool get_shader(int shader, uint64_t &samples, uint64_t &hits);
bool get_object(int object, uint64_t &samples, uint64_t &hits);
bool active() const;
protected:
void run();

View File

@ -728,13 +728,6 @@ extern GHOST_TSuccess GHOST_ReleaseOpenGLContext(GHOST_ContextHandle contexthand
*/
extern unsigned int GHOST_GetContextDefaultOpenGLFramebuffer(GHOST_ContextHandle contexthandle);
/**
* Returns whether a context is rendered upside down compared to OpenGL. This only needs to be
* called if there's a non-OpenGL context, which is really the exception.
* So generally, this does not need to be called.
*/
extern int GHOST_isUpsideDownContext(GHOST_ContextHandle contexthandle);
/**
* Get the OpenGL frame-buffer handle that serves as a default frame-buffer.
*/

View File

@ -654,8 +654,8 @@ enum {
GHOST_kXrContextDebug = (1 << 0),
GHOST_kXrContextDebugTime = (1 << 1),
# ifdef WIN32
/* Needed to avoid issues with the SteamVR OpenGL graphics binding (use DirectX fallback
instead). */
/* Needed to avoid issues with the SteamVR OpenGL graphics binding
* (use DirectX fallback instead). */
GHOST_kXrContextGpuNVIDIA = (1 << 2),
# endif
};

View File

@ -1245,7 +1245,7 @@ GHOST_TSuccess GHOST_SystemCocoa::handleDraggingEvent(GHOST_TEventType eventType
/* Convert the image in a RGBA 32bit format */
/* As Core Graphics does not support contexts with non premutliplied alpha,
we need to get alpha key values in a separate batch */
* we need to get alpha key values in a separate batch */
/* First get RGB values w/o Alpha to avoid pre-multiplication,
* 32bit but last byte is unused */
@ -1479,8 +1479,8 @@ GHOST_TSuccess GHOST_SystemCocoa::handleMouseEvent(void *eventPtr)
CocoaWindow *cocoawindow;
/* [event window] returns other windows if mouse-over, that's OSX input standard
however, if mouse exits window(s), the windows become inactive, until you click.
We then fall back to the active window from ghost */
* however, if mouse exits window(s), the windows become inactive, until you click.
* We then fall back to the active window from ghost. */
window = (GHOST_WindowCocoa *)m_windowManager->getWindowAssociatedWithOSWindow(
(void *)[event window]);
if (!window) {

View File

@ -216,8 +216,9 @@ GHOST_XrAction::GHOST_XrAction(XrInstance instance,
XrActionCreateInfo action_info{XR_TYPE_ACTION_CREATE_INFO};
strcpy(action_info.actionName, info.name);
strcpy(action_info.localizedActionName, info.name); /* Just use same name for localized. This can
be changed in the future if necessary. */
/* Just use same name for localized. This can be changed in the future if necessary. */
strcpy(action_info.localizedActionName, info.name);
switch (info.type) {
case GHOST_kXrActionTypeBooleanInput:

View File

@ -97,8 +97,8 @@ static void read_vertices(const tinygltf::Accessor &accessor,
validate_accessor(accessor, buffer_view, buffer, stride, packed_size);
/* Resize the vertices vector, if necessary, to include room for the attribute data.
If there are multiple attributes for a primitive, the first one will resize, and the
subsequent will not need to. */
* If there are multiple attributes for a primitive, the first one will resize, and the
* subsequent will not need to. */
primitive.vertices.resize(accessor.count);
/* Copy the attribute value over from the glTF buffer into the appropriate vertex field. */
@ -147,9 +147,9 @@ static void read_indices(const tinygltf::Accessor &accessor,
const tinygltf::Buffer &buffer,
GHOST_XrPrimitive &primitive)
{
if (buffer_view.target != TINYGLTF_TARGET_ELEMENT_ARRAY_BUFFER &&
buffer_view.target != 0) { /* Allow 0 (not specified) even though spec doesn't seem to allow
this (BoomBox GLB fails). */
/* Allow 0 (not specified) even though spec doesn't seem to allow this (BoomBox GLB fails). */
if (buffer_view.target != TINYGLTF_TARGET_ELEMENT_ARRAY_BUFFER && buffer_view.target != 0) {
throw GHOST_XrException(
"glTF: Accessor for indices uses bufferview with invalid 'target' type.");
}
@ -164,8 +164,8 @@ static void read_indices(const tinygltf::Accessor &accessor,
validate_accessor(accessor, buffer_view, buffer, component_size_bytes, component_size_bytes);
if ((accessor.count % 3) != 0) { /* Since only triangles are supported, enforce that the number
of indices is divisible by 3. */
/* Since only triangles are supported, enforce that the number of indices is divisible by 3. */
if ((accessor.count % 3) != 0) {
throw GHOST_XrException("glTF: Unexpected number of indices for triangle primitive");
}

View File

@ -264,7 +264,7 @@ PYGETTEXT_KEYWORDS = (() +
for it in ("BMO_error_raise",)) +
tuple(("{}\\((?:[^\"',]+,)\\s*" + _msg_re + r"\s*(?:\)|,)").format(it)
for it in ("modifier_setError",)) +
for it in ("BKE_modifier_set_error",)) +
tuple((r"{}\(\s*" + _msg_re + r"\s*,\s*(?:" +
r"\s*,\s*)?(?:".join(_ctxt_re_gen(i) for i in range(PYGETTEXT_MAX_MULTI_CTXT)) + r")?\s*\)").format(it)

View File

@ -26,6 +26,7 @@ not associated with blenders internal data.
__all__ = (
"blend_paths",
"escape_identifier",
"flip_name",
"unescape_identifier",
"keyconfig_init",
"keyconfig_set",
@ -61,6 +62,7 @@ from _bpy import (
_utils_units as units,
blend_paths,
escape_identifier,
flip_name,
unescape_identifier,
register_class,
resource_path,

View File

@ -1056,17 +1056,17 @@
<ThemeInfo
info_selected="#6080ff"
info_selected_text="#000000"
info_error="#FF0038ff"
info_error="#ff0038ff"
info_error_text="#000000"
info_warning="#FFE900ff"
info_warning="#ffe900ff"
info_warning_text="#000000"
info_info="#0068B3ff"
info_info="#0068b3ff"
info_info_text="#000000"
info_debug="#B30095ff"
info_debug="#b30095ff"
info_debug_text="#000000"
info_property="#44B300ff"
info_property="#44b300ff"
info_property_text="#000000"
info_operator="#44B300ff"
info_operator="#44b300ff"
info_operator_text="#000000"
>
<space>
@ -1352,6 +1352,15 @@
</panelcolors>
</ThemeSpaceGeneric>
</space>
<space_list>
<ThemeSpaceListGeneric
list="#adadad"
list_title="#c3c3c3"
list_text="#c3c3c3"
list_text_hi="#00ffff"
>
</ThemeSpaceListGeneric>
</space_list>
</ThemeSpreadsheet>
</spreadsheet>
<bone_color_sets>
@ -1530,6 +1539,44 @@
>
</ThemeCollectionColor>
</collection_color>
<strip_color>
<ThemeStripColor
color="#e2605b"
>
</ThemeStripColor>
<ThemeStripColor
color="#f1a355"
>
</ThemeStripColor>
<ThemeStripColor
color="#f1dc55"
>
</ThemeStripColor>
<ThemeStripColor
color="#7bcc7b"
>
</ThemeStripColor>
<ThemeStripColor
color="#5db6ea"
>
</ThemeStripColor>
<ThemeStripColor
color="#8d59da"
>
</ThemeStripColor>
<ThemeStripColor
color="#c673b8"
>
</ThemeStripColor>
<ThemeStripColor
color="#7a5441"
>
</ThemeStripColor>
<ThemeStripColor
color="#5f5f5f"
>
</ThemeStripColor>
</strip_color>
</Theme>
<ThemeStyle>
<panel_title>

View File

@ -999,6 +999,7 @@ def km_outliner(params):
# type specific actions.
("outliner.operation", {"type": 'RIGHTMOUSE', "value": 'PRESS'}, None),
op_menu("OUTLINER_MT_context_menu", {"type": 'RIGHTMOUSE', "value": 'PRESS'}),
op_menu_pie("OUTLINER_MT_view_pie", {"type": 'ACCENT_GRAVE', "value": 'PRESS'}),
("outliner.item_drag_drop", {"type": 'EVT_TWEAK_L', "value": 'ANY'}, None),
("outliner.item_drag_drop", {"type": 'EVT_TWEAK_L', "value": 'ANY', "shift": True}, None),
("outliner.show_hierarchy", {"type": 'HOME', "value": 'PRESS'}, None),
@ -1686,6 +1687,7 @@ def km_graph_editor(params):
("graph.view_all", {"type": 'NDOF_BUTTON_FIT', "value": 'PRESS'}, None),
("graph.view_selected", {"type": 'NUMPAD_PERIOD', "value": 'PRESS'}, None),
("graph.view_frame", {"type": 'NUMPAD_0', "value": 'PRESS'}, None),
op_menu_pie("GRAPH_MT_view_pie", {"type": 'ACCENT_GRAVE', "value": 'PRESS'}),
("graph.fmodifier_add", {"type": 'M', "value": 'PRESS', "shift": True, "ctrl": True},
{"properties": [("only_active", False)]}),
("anim.channels_editable_toggle", {"type": 'TAB', "value": 'PRESS'}, None),
@ -1737,7 +1739,9 @@ def km_image_generic(params):
("image.save", {"type": 'S', "value": 'PRESS', "alt": True}, None),
("image.cycle_render_slot", {"type": 'J', "value": 'PRESS', "repeat": True}, None),
("image.cycle_render_slot", {"type": 'J', "value": 'PRESS', "alt": True, "repeat": True},
{"properties": [("reverse", True)]}),])
{"properties": [("reverse", True)]}),
op_menu_pie("IMAGE_MT_view_pie", {"type": 'ACCENT_GRAVE', "value": 'PRESS'}),
])
if not params.legacy:
items.extend([("image.save_as", {"type": 'S', "value": 'PRESS', "shift": True, "alt": True}, None),])
@ -1920,6 +1924,7 @@ def km_node_editor(params):
("node.view_all", {"type": 'HOME', "value": 'PRESS'}, None),
("node.view_all", {"type": 'NDOF_BUTTON_FIT', "value": 'PRESS'}, None),
("node.view_selected", {"type": 'NUMPAD_PERIOD', "value": 'PRESS'}, None),
op_menu_pie("NODE_MT_view_pie", {"type": 'ACCENT_GRAVE', "value": 'PRESS'}),
("node.delete", {"type": 'X', "value": 'PRESS'}, None),
("node.delete", {"type": 'DEL', "value": 'PRESS'}, None),
("node.delete_reconnect", {"type": 'X', "value": 'PRESS', "ctrl": True}, None),
@ -2254,6 +2259,7 @@ def km_dopesheet(params):
("action.view_all", {"type": 'NDOF_BUTTON_FIT', "value": 'PRESS'}, None),
("action.view_selected", {"type": 'NUMPAD_PERIOD', "value": 'PRESS'}, None),
("action.view_frame", {"type": 'NUMPAD_0', "value": 'PRESS'}, None),
op_menu_pie("DOPESHEET_MT_view_pie", {"type": 'ACCENT_GRAVE', "value": 'PRESS'}),
("anim.channels_editable_toggle", {"type": 'TAB', "value": 'PRESS'}, None),
("anim.channels_select_filter", {"type": 'F', "value": 'PRESS', "ctrl": True}, None),
("transform.transform", {"type": 'G', "value": 'PRESS'},
@ -2354,6 +2360,7 @@ def km_nla_editor(params):
("nla.view_all", {"type": 'NDOF_BUTTON_FIT', "value": 'PRESS'}, None),
("nla.view_selected", {"type": 'NUMPAD_PERIOD', "value": 'PRESS'}, None),
("nla.view_frame", {"type": 'NUMPAD_0', "value": 'PRESS'}, None),
op_menu_pie("NLA_MT_view_pie", {"type": 'ACCENT_GRAVE', "value": 'PRESS'}),
("nla.actionclip_add", {"type": 'A', "value": 'PRESS', "shift": True}, None),
("nla.transition_add", {"type": 'T', "value": 'PRESS', "shift": True}, None),
("nla.soundclip_add", {"type": 'K', "value": 'PRESS', "shift": True}, None),
@ -2652,6 +2659,7 @@ def km_sequencer(params):
("sequencer.select_grouped", {"type": 'G', "value": 'PRESS', "shift": True}, None),
op_menu("SEQUENCER_MT_add", {"type": 'A', "value": 'PRESS', "shift": True}),
op_menu("SEQUENCER_MT_change", {"type": 'C', "value": 'PRESS'}),
op_menu_pie("SEQUENCER_MT_view_pie", {"type": 'ACCENT_GRAVE', "value": 'PRESS'}),
("sequencer.slip", {"type": 'S', "value": 'PRESS'}, None),
("wm.context_set_int", {"type": 'O', "value": 'PRESS'},
{"properties": [("data_path", 'scene.sequence_editor.overlay_frame'), ("value", 0)]}),
@ -2704,6 +2712,7 @@ def km_sequencerpreview(params):
{"properties": [("ratio", 0.25)]}),
("sequencer.view_zoom_ratio", {"type": 'NUMPAD_8', "value": 'PRESS'},
{"properties": [("ratio", 0.125)]}),
op_menu_pie("SEQUENCER_MT_preview_view_pie", {"type": 'ACCENT_GRAVE', "value": 'PRESS'}),
# Edit.
("transform.translate", {"type": params.select_tweak, "value": 'ANY'}, None),
@ -2841,7 +2850,9 @@ def km_clip(_params):
op_menu_pie("CLIP_MT_tracking_pie", {"type": 'E', "value": 'PRESS'}),
op_menu_pie("CLIP_MT_solving_pie", {"type": 'S', "value": 'PRESS', "shift": True}),
op_menu_pie("CLIP_MT_marker_pie", {"type": 'E', "value": 'PRESS', "shift": True}),
op_menu_pie("CLIP_MT_reconstruction_pie", {"type": 'W', "value": 'PRESS', "shift": True}),])
op_menu_pie("CLIP_MT_reconstruction_pie", {"type": 'W', "value": 'PRESS', "shift": True}),
op_menu_pie("CLIP_MT_view_pie", {"type": 'ACCENT_GRAVE', "value": 'PRESS'}),
])
return keymap

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 13.2
bpy.context.camera.sensor_height = 8.80
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 13.2
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 7.18
bpy.context.camera.sensor_height = 5.32
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 7.18
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 6.17
bpy.context.camera.sensor_height = 4.55
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 6.17
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 5.76
bpy.context.camera.sensor_height = 4.29
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 5.76
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 5.37
bpy.context.camera.sensor_height = 4.04
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 5.37
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 4.54
bpy.context.camera.sensor_height = 3.42
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 4.54
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 8.8
bpy.context.camera.sensor_height = 6.6
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 8.8
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 23.6
bpy.context.camera.sensor_height = 15.6
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 23.6
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 22.30
bpy.context.camera.sensor_height = 14.90
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 22.30
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 27.90
bpy.context.camera.sensor_height = 18.60
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 27.90
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 10.26
bpy.context.camera.sensor_height = 7.49
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 10.26
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 22
bpy.context.camera.sensor_height = 16
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 22
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 52.45
bpy.context.camera.sensor_height = 23.01
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 52.45
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 71.41
bpy.context.camera.sensor_height = 52.63
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 71.41
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 12.35
bpy.context.camera.sensor_height = 7.42
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 12.35
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 24.89
bpy.context.camera.sensor_height = 18.66
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 24.89
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

View File

@ -1,4 +1,6 @@
import bpy
bpy.context.camera.sensor_width = 54.12
bpy.context.camera.sensor_height = 25.58
bpy.context.camera.sensor_fit = 'HORIZONTAL'
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 54.12
camera.units = 'MILLIMETERS'
camera.pixel_aspect = 1

Some files were not shown because too many files have changed in this diff Show More