Cleanup: consistently use uint32_t for path flag

This commit is contained in:
Brecht Van Lommel 2021-10-17 20:09:45 +02:00
parent 1df3b51988
commit fc4b1fede3
25 changed files with 138 additions and 130 deletions

View File

@ -36,7 +36,7 @@ CCL_NAMESPACE_BEGIN
ccl_device void bsdf_transparent_setup(ccl_private ShaderData *sd,
const float3 weight,
int path_flag)
uint32_t path_flag)
{
/* Check cutoff weight. */
float sample_weight = fabsf(average(weight));

View File

@ -58,7 +58,7 @@ ccl_device_forceinline bool integrator_intersect_terminate(KernelGlobals kg,
* and evaluating the shader when not needed. Only for emission and transparent
* surfaces in front of emission do we need to evaluate the shader, since we
* perform MIS as part of indirect rays. */
const int path_flag = INTEGRATOR_STATE(state, path, flag);
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
const float probability = path_state_continuation_probability(kg, state, path_flag);
if (probability != 1.0f) {
@ -184,7 +184,7 @@ ccl_device void integrator_intersect_closest(KernelGlobals kg, IntegratorState s
/* NOTE: if we make lights visible to camera rays, we'll need to initialize
* these in the path_state_init. */
const int last_type = INTEGRATOR_STATE(state, isect, type);
const int path_flag = INTEGRATOR_STATE(state, path, flag);
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
hit = lights_intersect(
kg, &ray, &isect, last_isect_prim, last_isect_object, last_type, path_flag) ||

View File

@ -56,7 +56,7 @@ ccl_device_forceinline bool integrate_surface_holdout(KernelGlobals kg,
if (kernel_data.background.transparent) {
const float3 throughput = INTEGRATOR_STATE(state, path, throughput);
const float transparent = average(holdout_weight * throughput);
kernel_accum_transparent(kg, state, transparent, render_buffer);
kernel_accum_transparent(kg, state, path_flag, transparent, render_buffer);
}
if (isequal_float3(holdout_weight, one_float3())) {
return false;
@ -118,7 +118,7 @@ ccl_device_forceinline void integrate_surface_direct_light(KernelGlobals kg,
/* Sample position on a light. */
LightSample ls ccl_optional_struct_init;
{
const int path_flag = INTEGRATOR_STATE(state, path, flag);
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
const uint bounce = INTEGRATOR_STATE(state, path, bounce);
float light_u, light_v;
path_state_rng_2D(kg, rng_state, PRNG_LIGHT_U, &light_u, &light_v);
@ -375,7 +375,7 @@ ccl_device bool integrate_surface(KernelGlobals kg,
#ifdef __VOLUME__
if (!(sd.flag & SD_HAS_ONLY_VOLUME)) {
#endif
const int path_flag = INTEGRATOR_STATE(state, path, flag);
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
#ifdef __SUBSURFACE__
/* Can skip shader evaluation for BSSRDF exit point without bump mapping. */

View File

@ -94,7 +94,7 @@ ccl_device_inline bool volume_shader_sample(KernelGlobals kg,
ccl_private ShaderData *ccl_restrict sd,
ccl_private VolumeShaderCoefficients *coeff)
{
const int path_flag = INTEGRATOR_STATE(state, path, flag);
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
shader_eval_volume<false>(kg, state, sd, path_flag, [=](const int i) {
return integrator_state_read_volume_stack(state, i);
});
@ -686,7 +686,7 @@ ccl_device_forceinline bool integrate_volume_sample_light(
}
/* Sample position on a light. */
const int path_flag = INTEGRATOR_STATE(state, path, flag);
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
const uint bounce = INTEGRATOR_STATE(state, path, bounce);
float light_u, light_v;
path_state_rng_2D(kg, rng_state, PRNG_LIGHT_U, &light_u, &light_v);
@ -725,7 +725,7 @@ ccl_device_forceinline void integrate_volume_direct_light(
* TODO: decorrelate random numbers and use light_sample_new_position to
* avoid resampling the CDF. */
{
const int path_flag = INTEGRATOR_STATE(state, path, flag);
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
const uint bounce = INTEGRATOR_STATE(state, path, bounce);
float light_u, light_v;
path_state_rng_2D(kg, rng_state, PRNG_LIGHT_U, &light_u, &light_v);

View File

@ -153,7 +153,7 @@ ccl_device_inline int kernel_accum_sample(KernelGlobals kg,
}
ccl_device void kernel_accum_adaptive_buffer(KernelGlobals kg,
ConstIntegratorState state,
const int sample,
const float3 contribution,
ccl_global float *ccl_restrict buffer)
{
@ -166,7 +166,6 @@ ccl_device void kernel_accum_adaptive_buffer(KernelGlobals kg,
return;
}
const int sample = INTEGRATOR_STATE(state, path, sample);
if (sample_is_even(kernel_data.integrator.sampling_pattern, sample)) {
kernel_write_pass_float4(
buffer + kernel_data.film.pass_adaptive_aux_buffer,
@ -186,7 +185,7 @@ ccl_device void kernel_accum_adaptive_buffer(KernelGlobals kg,
* passes (like combined, adaptive sampling). */
ccl_device bool kernel_accum_shadow_catcher(KernelGlobals kg,
ConstIntegratorState state,
const uint32_t path_flag,
const float3 contribution,
ccl_global float *ccl_restrict buffer)
{
@ -198,7 +197,7 @@ ccl_device bool kernel_accum_shadow_catcher(KernelGlobals kg,
kernel_assert(kernel_data.film.pass_shadow_catcher_matte != PASS_UNUSED);
/* Matte pass. */
if (kernel_shadow_catcher_is_matte_path(kg, state)) {
if (kernel_shadow_catcher_is_matte_path(path_flag)) {
kernel_write_pass_float3(buffer + kernel_data.film.pass_shadow_catcher_matte, contribution);
/* NOTE: Accumulate the combined pass and to the samples count pass, so that the adaptive
* sampling is based on how noisy the combined pass is as if there were no catchers in the
@ -206,7 +205,7 @@ ccl_device bool kernel_accum_shadow_catcher(KernelGlobals kg,
}
/* Shadow catcher pass. */
if (kernel_shadow_catcher_is_object_pass(kg, state)) {
if (kernel_shadow_catcher_is_object_pass(path_flag)) {
kernel_write_pass_float3(buffer + kernel_data.film.pass_shadow_catcher, contribution);
return true;
}
@ -215,7 +214,7 @@ ccl_device bool kernel_accum_shadow_catcher(KernelGlobals kg,
}
ccl_device bool kernel_accum_shadow_catcher_transparent(KernelGlobals kg,
ConstIntegratorState state,
const uint32_t path_flag,
const float3 contribution,
const float transparent,
ccl_global float *ccl_restrict buffer)
@ -227,12 +226,12 @@ ccl_device bool kernel_accum_shadow_catcher_transparent(KernelGlobals kg,
kernel_assert(kernel_data.film.pass_shadow_catcher != PASS_UNUSED);
kernel_assert(kernel_data.film.pass_shadow_catcher_matte != PASS_UNUSED);
if (INTEGRATOR_STATE(state, path, flag) & PATH_RAY_SHADOW_CATCHER_BACKGROUND) {
if (path_flag & PATH_RAY_SHADOW_CATCHER_BACKGROUND) {
return true;
}
/* Matte pass. */
if (kernel_shadow_catcher_is_matte_path(kg, state)) {
if (kernel_shadow_catcher_is_matte_path(path_flag)) {
kernel_write_pass_float4(
buffer + kernel_data.film.pass_shadow_catcher_matte,
make_float4(contribution.x, contribution.y, contribution.z, transparent));
@ -242,7 +241,7 @@ ccl_device bool kernel_accum_shadow_catcher_transparent(KernelGlobals kg,
}
/* Shadow catcher pass. */
if (kernel_shadow_catcher_is_object_pass(kg, state)) {
if (kernel_shadow_catcher_is_object_pass(path_flag)) {
/* NOTE: The transparency of the shadow catcher pass is ignored. It is not needed for the
* calculation and the alpha channel of the pass contains numbers of samples contributed to a
* pixel of the pass. */
@ -254,7 +253,7 @@ ccl_device bool kernel_accum_shadow_catcher_transparent(KernelGlobals kg,
}
ccl_device void kernel_accum_shadow_catcher_transparent_only(KernelGlobals kg,
ConstIntegratorState state,
const uint32_t path_flag,
const float transparent,
ccl_global float *ccl_restrict buffer)
{
@ -265,7 +264,7 @@ ccl_device void kernel_accum_shadow_catcher_transparent_only(KernelGlobals kg,
kernel_assert(kernel_data.film.pass_shadow_catcher_matte != PASS_UNUSED);
/* Matte pass. */
if (kernel_shadow_catcher_is_matte_path(kg, state)) {
if (kernel_shadow_catcher_is_matte_path(path_flag)) {
kernel_write_pass_float(buffer + kernel_data.film.pass_shadow_catcher_matte + 3, transparent);
}
}
@ -278,12 +277,13 @@ ccl_device void kernel_accum_shadow_catcher_transparent_only(KernelGlobals kg,
/* Write combined pass. */
ccl_device_inline void kernel_accum_combined_pass(KernelGlobals kg,
ConstIntegratorState state,
const uint32_t path_flag,
const int sample,
const float3 contribution,
ccl_global float *ccl_restrict buffer)
{
#ifdef __SHADOW_CATCHER__
if (kernel_accum_shadow_catcher(kg, state, contribution, buffer)) {
if (kernel_accum_shadow_catcher(kg, path_flag, contribution, buffer)) {
return;
}
#endif
@ -292,19 +292,20 @@ ccl_device_inline void kernel_accum_combined_pass(KernelGlobals kg,
kernel_write_pass_float3(buffer + kernel_data.film.pass_combined, contribution);
}
kernel_accum_adaptive_buffer(kg, state, contribution, buffer);
kernel_accum_adaptive_buffer(kg, sample, contribution, buffer);
}
/* Write combined pass with transparency. */
ccl_device_inline void kernel_accum_combined_transparent_pass(KernelGlobals kg,
ConstIntegratorState state,
const uint32_t path_flag,
const int sample,
const float3 contribution,
const float transparent,
ccl_global float *ccl_restrict
buffer)
{
#ifdef __SHADOW_CATCHER__
if (kernel_accum_shadow_catcher_transparent(kg, state, contribution, transparent, buffer)) {
if (kernel_accum_shadow_catcher_transparent(kg, path_flag, contribution, transparent, buffer)) {
return;
}
#endif
@ -315,7 +316,7 @@ ccl_device_inline void kernel_accum_combined_transparent_pass(KernelGlobals kg,
make_float4(contribution.x, contribution.y, contribution.z, transparent));
}
kernel_accum_adaptive_buffer(kg, state, contribution, buffer);
kernel_accum_adaptive_buffer(kg, sample, contribution, buffer);
}
/* Write background or emission to appropriate pass. */
@ -331,7 +332,7 @@ ccl_device_inline void kernel_accum_emission_or_background_pass(KernelGlobals kg
}
#ifdef __PASSES__
const int path_flag = INTEGRATOR_STATE(state, path, flag);
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
int pass_offset = PASS_UNUSED;
/* Denoising albedo. */
@ -401,11 +402,14 @@ ccl_device_inline void kernel_accum_light(KernelGlobals kg,
ccl_global float *buffer = kernel_accum_pixel_render_buffer(kg, state, render_buffer);
kernel_accum_combined_pass(kg, state, contribution, buffer);
const uint32_t path_flag = INTEGRATOR_STATE(state, shadow_path, flag);
const int sample = INTEGRATOR_STATE(state, path, sample);
kernel_accum_combined_pass(kg, path_flag, sample, contribution, buffer);
#ifdef __PASSES__
if (kernel_data.film.light_pass_flag & PASS_ANY) {
const int path_flag = INTEGRATOR_STATE(state, shadow_path, flag);
const uint32_t path_flag = INTEGRATOR_STATE(state, shadow_path, flag);
int pass_offset = PASS_UNUSED;
if (path_flag & (PATH_RAY_REFLECT_PASS | PATH_RAY_TRANSMISSION_PASS)) {
@ -467,6 +471,7 @@ ccl_device_inline void kernel_accum_light(KernelGlobals kg,
* in many places. */
ccl_device_inline void kernel_accum_transparent(KernelGlobals kg,
ConstIntegratorState state,
const uint32_t path_flag,
const float transparent,
ccl_global float *ccl_restrict render_buffer)
{
@ -476,7 +481,7 @@ ccl_device_inline void kernel_accum_transparent(KernelGlobals kg,
kernel_write_pass_float(buffer + kernel_data.film.pass_combined + 3, transparent);
}
kernel_accum_shadow_catcher_transparent_only(kg, state, transparent, buffer);
kernel_accum_shadow_catcher_transparent_only(kg, path_flag, transparent, buffer);
}
/* Write background contribution to render buffer.
@ -493,12 +498,15 @@ ccl_device_inline void kernel_accum_background(KernelGlobals kg,
kernel_accum_clamp(kg, &contribution, INTEGRATOR_STATE(state, path, bounce) - 1);
ccl_global float *buffer = kernel_accum_pixel_render_buffer(kg, state, render_buffer);
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
if (is_transparent_background_ray) {
kernel_accum_transparent(kg, state, transparent, render_buffer);
kernel_accum_transparent(kg, state, path_flag, transparent, render_buffer);
}
else {
kernel_accum_combined_transparent_pass(kg, state, contribution, transparent, buffer);
const int sample = INTEGRATOR_STATE(state, path, sample);
kernel_accum_combined_transparent_pass(
kg, path_flag, sample, contribution, transparent, buffer);
}
kernel_accum_emission_or_background_pass(
kg, state, contribution, buffer, kernel_data.film.pass_background);
@ -515,8 +523,10 @@ ccl_device_inline void kernel_accum_emission(KernelGlobals kg,
kernel_accum_clamp(kg, &contribution, INTEGRATOR_STATE(state, path, bounce) - 1);
ccl_global float *buffer = kernel_accum_pixel_render_buffer(kg, state, render_buffer);
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
const int sample = INTEGRATOR_STATE(state, path, sample);
kernel_accum_combined_pass(kg, state, contribution, buffer);
kernel_accum_combined_pass(kg, path_flag, sample, contribution, buffer);
kernel_accum_emission_or_background_pass(
kg, state, contribution, buffer, kernel_data.film.pass_emission);
}

View File

@ -75,7 +75,7 @@ ccl_device void kernel_background_evaluate(KernelGlobals kg,
/* Evaluate shader.
* This is being evaluated for all BSDFs, so path flag does not contain a specific type. */
const int path_flag = PATH_RAY_EMISSION;
const uint32_t path_flag = PATH_RAY_EMISSION;
shader_eval_surface<KERNEL_FEATURE_NODE_MASK_SURFACE_LIGHT>(
kg, INTEGRATOR_STATE_NULL, &sd, NULL, path_flag);
float3 color = shader_background_eval(&sd);

View File

@ -50,7 +50,7 @@ ccl_device_inline bool light_sample(KernelGlobals kg,
const float randu,
const float randv,
const float3 P,
const int path_flag,
const uint32_t path_flag,
ccl_private LightSample *ls)
{
const ccl_global KernelLight *klight = &kernel_tex_fetch(__lights, lamp);
@ -215,7 +215,7 @@ ccl_device bool lights_intersect(KernelGlobals kg,
const int last_prim,
const int last_object,
const int last_type,
const int path_flag)
const uint32_t path_flag)
{
for (int lamp = 0; lamp < kernel_data.integrator.num_all_lights; lamp++) {
const ccl_global KernelLight *klight = &kernel_tex_fetch(__lights, lamp);
@ -797,7 +797,7 @@ ccl_device_noinline bool light_distribution_sample(KernelGlobals kg,
const float time,
const float3 P,
const int bounce,
const int path_flag,
const uint32_t path_flag,
ccl_private LightSample *ls)
{
/* Sample light index from distribution. */
@ -837,7 +837,7 @@ ccl_device_inline bool light_distribution_sample_from_volume_segment(KernelGloba
const float time,
const float3 P,
const int bounce,
const int path_flag,
const uint32_t path_flag,
ccl_private LightSample *ls)
{
return light_distribution_sample<true>(kg, randu, randv, time, P, bounce, path_flag, ls);
@ -849,7 +849,7 @@ ccl_device_inline bool light_distribution_sample_from_position(KernelGlobals kg,
const float time,
const float3 P,
const int bounce,
const int path_flag,
const uint32_t path_flag,
ccl_private LightSample *ls)
{
return light_distribution_sample<false>(kg, randu, randv, time, P, bounce, path_flag, ls);

View File

@ -201,7 +201,7 @@ ccl_device_inline void kernel_write_data_passes(KernelGlobals kg,
ccl_global float *ccl_restrict render_buffer)
{
#ifdef __PASSES__
const int path_flag = INTEGRATOR_STATE(state, path, flag);
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
if (!(path_flag & PATH_RAY_CAMERA)) {
return;

View File

@ -580,7 +580,7 @@ ccl_device void shader_eval_surface(KernelGlobals kg,
ConstIntegratorState state,
ccl_private ShaderData *ccl_restrict sd,
ccl_global float *ccl_restrict buffer,
int path_flag)
uint32_t path_flag)
{
/* If path is being terminated, we are tracing a shadow ray or evaluating
* emission, then we don't need to store closures. The emission and shadow
@ -767,7 +767,7 @@ template<const bool shadow, typename StackReadOp>
ccl_device_inline void shader_eval_volume(KernelGlobals kg,
ConstIntegratorState state,
ccl_private ShaderData *ccl_restrict sd,
const int path_flag,
const uint32_t path_flag,
StackReadOp stack_read)
{
/* If path is being terminated, we are tracing a shadow ray or evaluating

View File

@ -39,7 +39,7 @@ ccl_device_inline bool kernel_shadow_catcher_is_path_split_bounce(KernelGlobals
return false;
}
const int path_flag = INTEGRATOR_STATE(state, path, flag);
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
if ((path_flag & PATH_RAY_TRANSPARENT_BACKGROUND) == 0) {
/* Split only on primary rays, secondary bounces are to treat shadow catcher as a regular
@ -66,7 +66,7 @@ ccl_device_inline bool kernel_shadow_catcher_path_can_split(KernelGlobals kg,
return false;
}
const int path_flag = INTEGRATOR_STATE(state, path, flag);
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
if (path_flag & PATH_RAY_SHADOW_CATCHER_HIT) {
/* Shadow catcher was already hit and the state was split. No further split is allowed. */
@ -105,16 +105,14 @@ ccl_device_inline bool kernel_shadow_catcher_split(KernelGlobals kg,
#ifdef __SHADOW_CATCHER__
ccl_device_forceinline bool kernel_shadow_catcher_is_matte_path(KernelGlobals kg,
ConstIntegratorState state)
ccl_device_forceinline bool kernel_shadow_catcher_is_matte_path(const uint32_t path_flag)
{
return (INTEGRATOR_STATE(state, path, flag) & PATH_RAY_SHADOW_CATCHER_HIT) == 0;
return (path_flag & PATH_RAY_SHADOW_CATCHER_HIT) == 0;
}
ccl_device_forceinline bool kernel_shadow_catcher_is_object_pass(KernelGlobals kg,
ConstIntegratorState state)
ccl_device_forceinline bool kernel_shadow_catcher_is_object_pass(const uint32_t path_flag)
{
return INTEGRATOR_STATE(state, path, flag) & PATH_RAY_SHADOW_CATCHER_PASS;
return path_flag & PATH_RAY_SHADOW_CATCHER_PASS;
}
#endif /* __SHADOW_CATCHER__ */

View File

@ -197,30 +197,30 @@ enum PathRayFlag {
* NOTE: Recalculated after a surface bounce.
*/
PATH_RAY_CAMERA = (1 << 0),
PATH_RAY_REFLECT = (1 << 1),
PATH_RAY_TRANSMIT = (1 << 2),
PATH_RAY_DIFFUSE = (1 << 3),
PATH_RAY_GLOSSY = (1 << 4),
PATH_RAY_SINGULAR = (1 << 5),
PATH_RAY_TRANSPARENT = (1 << 6),
PATH_RAY_VOLUME_SCATTER = (1 << 7),
PATH_RAY_CAMERA = (1U << 0U),
PATH_RAY_REFLECT = (1U << 1U),
PATH_RAY_TRANSMIT = (1U << 2U),
PATH_RAY_DIFFUSE = (1U << 3U),
PATH_RAY_GLOSSY = (1U << 4U),
PATH_RAY_SINGULAR = (1U << 5U),
PATH_RAY_TRANSPARENT = (1U << 6U),
PATH_RAY_VOLUME_SCATTER = (1U << 7U),
/* Shadow ray visibility. */
PATH_RAY_SHADOW_OPAQUE = (1 << 8),
PATH_RAY_SHADOW_TRANSPARENT = (1 << 9),
PATH_RAY_SHADOW_OPAQUE = (1U << 8U),
PATH_RAY_SHADOW_TRANSPARENT = (1U << 9U),
PATH_RAY_SHADOW = (PATH_RAY_SHADOW_OPAQUE | PATH_RAY_SHADOW_TRANSPARENT),
/* Special flag to tag unaligned BVH nodes.
* Only set and used in BVH nodes to distinguish how to interpret bounding box information stored
* in the node (either it should be intersected as AABB or as OBB). */
PATH_RAY_NODE_UNALIGNED = (1 << 10),
* in the node (either it should be intersected as AABB or as OBBU). */
PATH_RAY_NODE_UNALIGNED = (1U << 10U),
/* Subset of flags used for ray visibility for intersection.
*
* NOTE: SHADOW_CATCHER macros below assume there are no more than
* 16 visibility bits. */
PATH_RAY_ALL_VISIBILITY = ((1 << 11) - 1),
PATH_RAY_ALL_VISIBILITY = ((1U << 11U) - 1U),
/* --------------------------------------------------------------------
* Path flags.
@ -228,69 +228,69 @@ enum PathRayFlag {
/* Don't apply multiple importance sampling weights to emission from
* lamp or surface hits, because they were not direct light sampled. */
PATH_RAY_MIS_SKIP = (1 << 11),
PATH_RAY_MIS_SKIP = (1U << 11U),
/* Diffuse bounce earlier in the path, skip SSS to improve performance
* and avoid branching twice with disk sampling SSS. */
PATH_RAY_DIFFUSE_ANCESTOR = (1 << 12),
PATH_RAY_DIFFUSE_ANCESTOR = (1U << 12U),
/* Single pass has been written. */
PATH_RAY_SINGLE_PASS_DONE = (1 << 13),
PATH_RAY_SINGLE_PASS_DONE = (1U << 13U),
/* Zero background alpha, for camera or transparent glass rays. */
PATH_RAY_TRANSPARENT_BACKGROUND = (1 << 14),
PATH_RAY_TRANSPARENT_BACKGROUND = (1U << 14U),
/* Terminate ray immediately at next bounce. */
PATH_RAY_TERMINATE_ON_NEXT_SURFACE = (1 << 15),
PATH_RAY_TERMINATE_IN_NEXT_VOLUME = (1 << 16),
PATH_RAY_TERMINATE_ON_NEXT_SURFACE = (1U << 15U),
PATH_RAY_TERMINATE_IN_NEXT_VOLUME = (1U << 16U),
/* Ray is to be terminated, but continue with transparent bounces and
* emission as long as we encounter them. This is required to make the
* MIS between direct and indirect light rays match, as shadow rays go
* through transparent surfaces to reach emission too. */
PATH_RAY_TERMINATE_AFTER_TRANSPARENT = (1 << 17),
PATH_RAY_TERMINATE_AFTER_TRANSPARENT = (1U << 17U),
/* Terminate ray immediately after volume shading. */
PATH_RAY_TERMINATE_AFTER_VOLUME = (1 << 18),
PATH_RAY_TERMINATE_AFTER_VOLUME = (1U << 18U),
/* Ray is to be terminated. */
PATH_RAY_TERMINATE = (PATH_RAY_TERMINATE_ON_NEXT_SURFACE | PATH_RAY_TERMINATE_IN_NEXT_VOLUME |
PATH_RAY_TERMINATE_AFTER_TRANSPARENT | PATH_RAY_TERMINATE_AFTER_VOLUME),
/* Path and shader is being evaluated for direct lighting emission. */
PATH_RAY_EMISSION = (1 << 19),
PATH_RAY_EMISSION = (1U << 19U),
/* Perform subsurface scattering. */
PATH_RAY_SUBSURFACE_RANDOM_WALK = (1 << 20),
PATH_RAY_SUBSURFACE_DISK = (1 << 21),
PATH_RAY_SUBSURFACE_USE_FRESNEL = (1 << 22),
PATH_RAY_SUBSURFACE_RANDOM_WALK = (1U << 20U),
PATH_RAY_SUBSURFACE_DISK = (1U << 21U),
PATH_RAY_SUBSURFACE_USE_FRESNEL = (1U << 22U),
PATH_RAY_SUBSURFACE = (PATH_RAY_SUBSURFACE_RANDOM_WALK | PATH_RAY_SUBSURFACE_DISK |
PATH_RAY_SUBSURFACE_USE_FRESNEL),
/* Contribute to denoising features. */
PATH_RAY_DENOISING_FEATURES = (1 << 23),
PATH_RAY_DENOISING_FEATURES = (1U << 23U),
/* Render pass categories. */
PATH_RAY_REFLECT_PASS = (1 << 24),
PATH_RAY_TRANSMISSION_PASS = (1 << 25),
PATH_RAY_VOLUME_PASS = (1 << 26),
PATH_RAY_REFLECT_PASS = (1U << 24U),
PATH_RAY_TRANSMISSION_PASS = (1U << 25U),
PATH_RAY_VOLUME_PASS = (1U << 26U),
PATH_RAY_ANY_PASS = (PATH_RAY_REFLECT_PASS | PATH_RAY_TRANSMISSION_PASS | PATH_RAY_VOLUME_PASS),
/* Shadow ray is for a light or surface. */
PATH_RAY_SHADOW_FOR_LIGHT = (1 << 27),
PATH_RAY_SHADOW_FOR_LIGHT = (1U << 27U),
/* A shadow catcher object was hit and the path was split into two. */
PATH_RAY_SHADOW_CATCHER_HIT = (1 << 28),
PATH_RAY_SHADOW_CATCHER_HIT = (1U << 28U),
/* A shadow catcher object was hit and this path traces only shadow catchers, writing them into
* their dedicated pass for later division.
*
* NOTE: Is not covered with `PATH_RAY_ANY_PASS` because shadow catcher does special handling
* which is separate from the light passes. */
PATH_RAY_SHADOW_CATCHER_PASS = (1 << 29),
PATH_RAY_SHADOW_CATCHER_PASS = (1U << 29U),
/* Path is evaluating background for an approximate shadow catcher with non-transparent film. */
PATH_RAY_SHADOW_CATCHER_BACKGROUND = (1 << 30),
PATH_RAY_SHADOW_CATCHER_BACKGROUND = (1U << 30U),
};
/* Configure ray visibility bits for rays and objects respectively,

View File

@ -54,7 +54,7 @@ using namespace OSL;
///
class GenericBackgroundClosure : public CClosurePrimitive {
public:
void setup(ShaderData *sd, int /* path_flag */, float3 weight)
void setup(ShaderData *sd, uint32_t /* path_flag */, float3 weight)
{
background_setup(sd, weight);
}
@ -69,7 +69,7 @@ class GenericBackgroundClosure : public CClosurePrimitive {
///
class HoldoutClosure : CClosurePrimitive {
public:
void setup(ShaderData *sd, int /* path_flag */, float3 weight)
void setup(ShaderData *sd, uint32_t /* path_flag */, float3 weight)
{
closure_alloc(sd, sizeof(ShaderClosure), CLOSURE_HOLDOUT_ID, weight);
sd->flag |= SD_HOLDOUT;

View File

@ -53,7 +53,7 @@ class DiffuseRampClosure : public CBSDFClosure {
DiffuseRampBsdf params;
Color3 colors[8];
void setup(ShaderData *sd, int /* path_flag */, float3 weight)
void setup(ShaderData *sd, uint32_t /* path_flag */, float3 weight)
{
DiffuseRampBsdf *bsdf = (DiffuseRampBsdf *)bsdf_alloc_osl(
sd, sizeof(DiffuseRampBsdf), weight, &params);

View File

@ -52,7 +52,7 @@ class PhongRampClosure : public CBSDFClosure {
PhongRampBsdf params;
Color3 colors[8];
void setup(ShaderData *sd, int /* path_flag */, float3 weight)
void setup(ShaderData *sd, uint32_t /* path_flag */, float3 weight)
{
PhongRampBsdf *bsdf = (PhongRampBsdf *)bsdf_alloc_osl(
sd, sizeof(PhongRampBsdf), weight, &params);

View File

@ -56,7 +56,7 @@ using namespace OSL;
///
class GenericEmissiveClosure : public CClosurePrimitive {
public:
void setup(ShaderData *sd, int /* path_flag */, float3 weight)
void setup(ShaderData *sd, uint32_t /* path_flag */, float3 weight)
{
emission_setup(sd, weight);
}

View File

@ -67,7 +67,7 @@ class CBSSRDFClosure : public CClosurePrimitive {
ior = 1.4f;
}
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
if (method == u_burley) {
alloc(sd, path_flag, weight, CLOSURE_BSSRDF_BURLEY_ID);
@ -80,7 +80,7 @@ class CBSSRDFClosure : public CClosurePrimitive {
}
}
void alloc(ShaderData *sd, int path_flag, float3 weight, ClosureType type)
void alloc(ShaderData *sd, uint32_t path_flag, float3 weight, ClosureType type)
{
Bssrdf *bssrdf = bssrdf_alloc(sd, weight);

View File

@ -203,7 +203,7 @@ CLOSURE_FLOAT3_PARAM(DiffuseClosure, params.N),
public:
PrincipledSheenBsdf params;
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
if (!skip(sd, path_flag, LABEL_DIFFUSE)) {
PrincipledSheenBsdf *bsdf = (PrincipledSheenBsdf *)bsdf_alloc_osl(
@ -228,7 +228,7 @@ class PrincipledHairClosure : public CBSDFClosure {
public:
PrincipledHairBSDF params;
PrincipledHairBSDF *alloc(ShaderData *sd, int path_flag, float3 weight)
PrincipledHairBSDF *alloc(ShaderData *sd, uint32_t path_flag, float3 weight)
{
PrincipledHairBSDF *bsdf = (PrincipledHairBSDF *)bsdf_alloc_osl(
sd, sizeof(PrincipledHairBSDF), weight, &params);
@ -246,7 +246,7 @@ class PrincipledHairClosure : public CBSDFClosure {
return bsdf;
}
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
if (!skip(sd, path_flag, LABEL_GLOSSY)) {
PrincipledHairBSDF *bsdf = (PrincipledHairBSDF *)alloc(sd, path_flag, weight);
@ -282,7 +282,7 @@ class PrincipledClearcoatClosure : public CBSDFClosure {
MicrofacetBsdf params;
float clearcoat, clearcoat_roughness;
MicrofacetBsdf *alloc(ShaderData *sd, int path_flag, float3 weight)
MicrofacetBsdf *alloc(ShaderData *sd, uint32_t path_flag, float3 weight)
{
MicrofacetBsdf *bsdf = (MicrofacetBsdf *)bsdf_alloc_osl(
sd, sizeof(MicrofacetBsdf), weight, &params);
@ -306,7 +306,7 @@ class PrincipledClearcoatClosure : public CBSDFClosure {
return bsdf;
}
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
MicrofacetBsdf *bsdf = alloc(sd, path_flag, weight);
if (!bsdf) {
@ -496,7 +496,7 @@ void OSLShader::register_closures(OSLShadingSystem *ss_)
/* BSDF Closure */
bool CBSDFClosure::skip(const ShaderData *sd, int path_flag, int scattering)
bool CBSDFClosure::skip(const ShaderData *sd, uint32_t path_flag, int scattering)
{
/* caustic options */
if ((scattering & LABEL_GLOSSY) && (path_flag & PATH_RAY_DIFFUSE)) {
@ -519,7 +519,7 @@ class MicrofacetClosure : public CBSDFClosure {
ustring distribution;
int refract;
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
static ustring u_ggx("ggx");
static ustring u_default("default");
@ -595,7 +595,7 @@ class MicrofacetFresnelClosure : public CBSDFClosure {
float3 color;
float3 cspec0;
MicrofacetBsdf *alloc(ShaderData *sd, int path_flag, float3 weight)
MicrofacetBsdf *alloc(ShaderData *sd, uint32_t path_flag, float3 weight)
{
/* Technically, the MultiGGX Glass closure may also transmit. However,
* since this is set statically and only used for caustic flags, this
@ -625,7 +625,7 @@ class MicrofacetFresnelClosure : public CBSDFClosure {
class MicrofacetGGXFresnelClosure : public MicrofacetFresnelClosure {
public:
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
MicrofacetBsdf *bsdf = alloc(sd, path_flag, weight);
if (!bsdf) {
@ -654,7 +654,7 @@ CCLOSURE_PREPARE(closure_bsdf_microfacet_ggx_fresnel_prepare, MicrofacetGGXFresn
class MicrofacetGGXAnisoFresnelClosure : public MicrofacetFresnelClosure {
public:
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
MicrofacetBsdf *bsdf = alloc(sd, path_flag, weight);
if (!bsdf) {
@ -689,7 +689,7 @@ class MicrofacetMultiClosure : public CBSDFClosure {
MicrofacetBsdf params;
float3 color;
MicrofacetBsdf *alloc(ShaderData *sd, int path_flag, float3 weight)
MicrofacetBsdf *alloc(ShaderData *sd, uint32_t path_flag, float3 weight)
{
/* Technically, the MultiGGX closure may also transmit. However,
* since this is set statically and only used for caustic flags, this
@ -719,7 +719,7 @@ class MicrofacetMultiClosure : public CBSDFClosure {
class MicrofacetMultiGGXClosure : public MicrofacetMultiClosure {
public:
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
MicrofacetBsdf *bsdf = alloc(sd, path_flag, weight);
if (!bsdf) {
@ -747,7 +747,7 @@ CCLOSURE_PREPARE(closure_bsdf_microfacet_multi_ggx_prepare, MicrofacetMultiGGXCl
class MicrofacetMultiGGXAnisoClosure : public MicrofacetMultiClosure {
public:
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
MicrofacetBsdf *bsdf = alloc(sd, path_flag, weight);
if (!bsdf) {
@ -779,7 +779,7 @@ class MicrofacetMultiGGXGlassClosure : public MicrofacetMultiClosure {
{
}
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
MicrofacetBsdf *bsdf = alloc(sd, path_flag, weight);
if (!bsdf) {
@ -813,7 +813,7 @@ class MicrofacetMultiFresnelClosure : public CBSDFClosure {
float3 color;
float3 cspec0;
MicrofacetBsdf *alloc(ShaderData *sd, int path_flag, float3 weight)
MicrofacetBsdf *alloc(ShaderData *sd, uint32_t path_flag, float3 weight)
{
/* Technically, the MultiGGX closure may also transmit. However,
* since this is set statically and only used for caustic flags, this
@ -843,7 +843,7 @@ class MicrofacetMultiFresnelClosure : public CBSDFClosure {
class MicrofacetMultiGGXFresnelClosure : public MicrofacetMultiFresnelClosure {
public:
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
MicrofacetBsdf *bsdf = alloc(sd, path_flag, weight);
if (!bsdf) {
@ -873,7 +873,7 @@ CCLOSURE_PREPARE(closure_bsdf_microfacet_multi_ggx_fresnel_prepare,
class MicrofacetMultiGGXAnisoFresnelClosure : public MicrofacetMultiFresnelClosure {
public:
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
MicrofacetBsdf *bsdf = alloc(sd, path_flag, weight);
if (!bsdf) {
@ -907,7 +907,7 @@ class MicrofacetMultiGGXGlassFresnelClosure : public MicrofacetMultiFresnelClosu
{
}
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
MicrofacetBsdf *bsdf = alloc(sd, path_flag, weight);
if (!bsdf) {
@ -942,7 +942,7 @@ class TransparentClosure : public CBSDFClosure {
ShaderClosure params;
float3 unused;
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
bsdf_transparent_setup(sd, weight, path_flag);
}
@ -961,7 +961,7 @@ CCLOSURE_PREPARE(closure_bsdf_transparent_prepare, TransparentClosure)
class VolumeAbsorptionClosure : public CBSDFClosure {
public:
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
volume_extinction_setup(sd, weight);
}
@ -980,7 +980,7 @@ class VolumeHenyeyGreensteinClosure : public CBSDFClosure {
public:
HenyeyGreensteinVolume params;
void setup(ShaderData *sd, int path_flag, float3 weight)
void setup(ShaderData *sd, uint32_t path_flag, float3 weight)
{
volume_extinction_setup(sd, weight);

View File

@ -113,7 +113,7 @@ void closure_bsdf_principled_hair_prepare(OSL::RendererServices *, int id, void
class CClosurePrimitive {
public:
virtual void setup(ShaderData *sd, int path_flag, float3 weight) = 0;
virtual void setup(ShaderData *sd, uint32_t path_flag, float3 weight) = 0;
OSL::ustring label;
};
@ -122,7 +122,7 @@ class CClosurePrimitive {
class CBSDFClosure : public CClosurePrimitive {
public:
bool skip(const ShaderData *sd, int path_flag, int scattering);
bool skip(const ShaderData *sd, uint32_t path_flag, int scattering);
};
#define BSDF_CLOSURE_CLASS_BEGIN(Upper, lower, structname, TYPE) \
@ -132,7 +132,7 @@ class CBSDFClosure : public CClosurePrimitive {
structname params; \
float3 unused; \
\
void setup(ShaderData *sd, int path_flag, float3 weight) \
void setup(ShaderData *sd, uint32_t path_flag, float3 weight) \
{ \
if (!skip(sd, path_flag, TYPE)) { \
structname *bsdf = (structname *)bsdf_alloc_osl(sd, sizeof(structname), weight, &params); \

View File

@ -90,7 +90,7 @@ void OSLShader::thread_free(KernelGlobalsCPU *kg)
static void shaderdata_to_shaderglobals(const KernelGlobalsCPU *kg,
ShaderData *sd,
const IntegratorStateCPU *state,
int path_flag,
uint32_t path_flag,
OSLThreadData *tdata)
{
OSL::ShaderGlobals *globals = &tdata->globals;
@ -140,7 +140,7 @@ static void shaderdata_to_shaderglobals(const KernelGlobalsCPU *kg,
/* Surface */
static void flatten_surface_closure_tree(ShaderData *sd,
int path_flag,
uint32_t path_flag,
const OSL::ClosureColor *closure,
float3 weight = make_float3(1.0f, 1.0f, 1.0f))
{
@ -177,7 +177,7 @@ static void flatten_surface_closure_tree(ShaderData *sd,
void OSLShader::eval_surface(const KernelGlobalsCPU *kg,
const IntegratorStateCPU *state,
ShaderData *sd,
int path_flag)
uint32_t path_flag)
{
/* setup shader globals from shader data */
OSLThreadData *tdata = kg->osl_tdata;
@ -285,7 +285,7 @@ static void flatten_background_closure_tree(ShaderData *sd,
void OSLShader::eval_background(const KernelGlobalsCPU *kg,
const IntegratorStateCPU *state,
ShaderData *sd,
int path_flag)
uint32_t path_flag)
{
/* setup shader globals from shader data */
OSLThreadData *tdata = kg->osl_tdata;
@ -343,7 +343,7 @@ static void flatten_volume_closure_tree(ShaderData *sd,
void OSLShader::eval_volume(const KernelGlobalsCPU *kg,
const IntegratorStateCPU *state,
ShaderData *sd,
int path_flag)
uint32_t path_flag)
{
/* setup shader globals from shader data */
OSLThreadData *tdata = kg->osl_tdata;

View File

@ -57,15 +57,15 @@ class OSLShader {
static void eval_surface(const KernelGlobalsCPU *kg,
const IntegratorStateCPU *state,
ShaderData *sd,
int path_flag);
uint32_t path_flag);
static void eval_background(const KernelGlobalsCPU *kg,
const IntegratorStateCPU *state,
ShaderData *sd,
int path_flag);
uint32_t path_flag);
static void eval_volume(const KernelGlobalsCPU *kg,
const IntegratorStateCPU *state,
ShaderData *sd,
int path_flag);
uint32_t path_flag);
static void eval_displacement(const KernelGlobalsCPU *kg,
const IntegratorStateCPU *state,
ShaderData *sd);

View File

@ -230,7 +230,7 @@ ccl_device void svm_eval_nodes(KernelGlobals kg,
ConstIntegratorState state,
ShaderData *sd,
ccl_global float *render_buffer,
int path_flag)
uint32_t path_flag)
{
float stack[SVM_STACK_SIZE];
int offset = sd->shader & SHADER_MASK;

View File

@ -18,7 +18,7 @@
CCL_NAMESPACE_BEGIN
ccl_device_inline bool svm_node_aov_check(const int path_flag, ccl_global float *render_buffer)
ccl_device_inline bool svm_node_aov_check(const uint32_t path_flag, ccl_global float *render_buffer)
{
bool is_primary = (path_flag & PATH_RAY_CAMERA) && (!(path_flag & PATH_RAY_SINGLE_PASS_DONE));

View File

@ -79,7 +79,7 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg,
ccl_private ShaderData *sd,
ccl_private float *stack,
uint4 node,
int path_flag,
uint32_t path_flag,
int offset)
{
uint type, param1_offset, param2_offset;
@ -1015,7 +1015,7 @@ ccl_device_noinline int svm_node_principled_volume(KernelGlobals kg,
ccl_private ShaderData *sd,
ccl_private float *stack,
uint4 node,
int path_flag,
uint32_t path_flag,
int offset)
{
#ifdef __VOLUME__

View File

@ -25,7 +25,7 @@ ccl_device_noinline void svm_node_light_path(KernelGlobals kg,
ccl_private float *stack,
uint type,
uint out_offset,
int path_flag)
uint32_t path_flag)
{
float info = 0.0f;

View File

@ -24,7 +24,7 @@ CCL_NAMESPACE_BEGIN
ccl_device_noinline int svm_node_tex_coord(KernelGlobals kg,
ccl_private ShaderData *sd,
int path_flag,
uint32_t path_flag,
ccl_private float *stack,
uint4 node,
int offset)
@ -105,7 +105,7 @@ ccl_device_noinline int svm_node_tex_coord(KernelGlobals kg,
ccl_device_noinline int svm_node_tex_coord_bump_dx(KernelGlobals kg,
ccl_private ShaderData *sd,
int path_flag,
uint32_t path_flag,
ccl_private float *stack,
uint4 node,
int offset)
@ -190,7 +190,7 @@ ccl_device_noinline int svm_node_tex_coord_bump_dx(KernelGlobals kg,
ccl_device_noinline int svm_node_tex_coord_bump_dy(KernelGlobals kg,
ccl_private ShaderData *sd,
int path_flag,
uint32_t path_flag,
ccl_private float *stack,
uint4 node,
int offset)