Cycles: OpenCL split kernel refactor

This does a few things at once:

- Refactors host side split kernel logic into a new device
  agnostic class `DeviceSplitKernel`.
- Removes tile splitting, a new work pool implementation takes its place and
  allows as many threads as will fit in memory regardless of tile size, which
  can give performance gains.
- Refactors split state buffers into one buffer, as well as reduces the
  number of arguments passed to kernels. Means there's less code to deal
  with overall.
- Moves kernel logic out of OpenCL kernel files so they can later be used by
  other device types.
- Replaced OpenCL specific APIs with new generic versions
- Tiles can now be seen updating during rendering
This commit is contained in:
Mai Lavelle 2017-02-22 08:10:02 -05:00
parent 520b53364c
commit 230c00d872
41 changed files with 1750 additions and 2587 deletions

View File

@ -3,6 +3,7 @@ set(INC
.
../graph
../kernel
../kernel/split
../kernel/svm
../kernel/osl
../util
@ -33,6 +34,7 @@ set(SRC
device_cuda.cpp
device_multi.cpp
device_opencl.cpp
device_split_kernel.cpp
device_task.cpp
)
@ -56,6 +58,7 @@ set(SRC_HEADERS
device_memory.h
device_intern.h
device_network.h
device_split_kernel.h
device_task.h
)

View File

@ -0,0 +1,283 @@
/*
* Copyright 2011-2016 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "device_split_kernel.h"
#include "kernel_types.h"
#include "kernel_split_data.h"
#include "util_time.h"
CCL_NAMESPACE_BEGIN
static const double alpha = 0.1; /* alpha for rolling average */
DeviceSplitKernel::DeviceSplitKernel(Device *device) : device(device)
{
current_max_closure = -1;
first_tile = true;
avg_time_per_sample = 0.0;
}
DeviceSplitKernel::~DeviceSplitKernel()
{
device->mem_free(split_data);
device->mem_free(ray_state);
device->mem_free(use_queues_flag);
device->mem_free(queue_index);
device->mem_free(work_pool_wgs);
delete kernel_scene_intersect;
delete kernel_lamp_emission;
delete kernel_queue_enqueue;
delete kernel_background_buffer_update;
delete kernel_shader_eval;
delete kernel_holdout_emission_blurring_pathtermination_ao;
delete kernel_direct_lighting;
delete kernel_shadow_blocked;
delete kernel_next_iteration_setup;
delete kernel_sum_all_radiance;
}
bool DeviceSplitKernel::load_kernels(const DeviceRequestedFeatures& requested_features)
{
#define LOAD_KERNEL(name) \
kernel_##name = get_split_kernel_function(#name, requested_features); \
if(!kernel_##name) { \
return false; \
}
LOAD_KERNEL(scene_intersect);
LOAD_KERNEL(lamp_emission);
LOAD_KERNEL(queue_enqueue);
LOAD_KERNEL(background_buffer_update);
LOAD_KERNEL(shader_eval);
LOAD_KERNEL(holdout_emission_blurring_pathtermination_ao);
LOAD_KERNEL(direct_lighting);
LOAD_KERNEL(shadow_blocked);
LOAD_KERNEL(next_iteration_setup);
LOAD_KERNEL(sum_all_radiance);
#undef LOAD_KERNEL
current_max_closure = requested_features.max_closure;
return true;
}
size_t DeviceSplitKernel::max_elements_for_max_buffer_size(size_t max_buffer_size, size_t passes_size)
{
size_t size_per_element = split_data_buffer_size(1024, current_max_closure, passes_size) / 1024;
return max_buffer_size / size_per_element;
}
bool DeviceSplitKernel::path_trace(DeviceTask *task,
RenderTile& tile,
device_memory& kgbuffer,
device_memory& kernel_data)
{
if(device->have_error()) {
return false;
}
/* Get local size */
size_t local_size[2];
{
int2 lsize = split_kernel_local_size();
local_size[0] = lsize[0];
local_size[1] = lsize[1];
}
/* Calculate per_thread_output_buffer_size. */
size_t per_thread_output_buffer_size = task->passes_size;
/* Set gloabl size */
size_t global_size[2];
{
int2 gsize = split_kernel_global_size(task);
/* Make sure that set work size is a multiple of local
* work size dimensions.
*/
global_size[0] = round_up(gsize[0], local_size[0]);
global_size[1] = round_up(gsize[1], local_size[1]);
}
/* Number of elements in the global state buffer */
int num_global_elements = global_size[0] * global_size[1];
/* Allocate all required global memory once. */
if(first_tile) {
first_tile = false;
/* Calculate max groups */
/* Denotes the maximum work groups possible w.r.t. current requested tile size. */
unsigned int max_work_groups = num_global_elements / WORK_POOL_SIZE + 1;
/* Allocate work_pool_wgs memory. */
work_pool_wgs.resize(max_work_groups * sizeof(unsigned int));
device->mem_alloc(work_pool_wgs, MEM_READ_WRITE);
queue_index.resize(NUM_QUEUES * sizeof(int));
device->mem_alloc(queue_index, MEM_READ_WRITE);
use_queues_flag.resize(sizeof(char));
device->mem_alloc(use_queues_flag, MEM_READ_WRITE);
ray_state.resize(num_global_elements);
device->mem_alloc(ray_state, MEM_READ_WRITE);
split_data.resize(split_data_buffer_size(num_global_elements,
current_max_closure,
per_thread_output_buffer_size));
device->mem_alloc(split_data, MEM_READ_WRITE);
}
#define ENQUEUE_SPLIT_KERNEL(name, global_size, local_size) \
if(device->have_error()) { \
return false; \
} \
if(!kernel_##name->enqueue(KernelDimensions(global_size, local_size), kgbuffer, kernel_data)) { \
return false; \
}
tile.sample = tile.start_sample;
/* for exponential increase between tile updates */
int time_multiplier = 1;
while(tile.sample < tile.start_sample + tile.num_samples) {
/* to keep track of how long it takes to run a number of samples */
double start_time = time_dt();
/* initial guess to start rolling average */
const int initial_num_samples = 1;
/* approx number of samples per second */
int samples_per_second = (avg_time_per_sample > 0.0) ?
int(double(time_multiplier) / avg_time_per_sample) + 1 : initial_num_samples;
RenderTile subtile = tile;
subtile.start_sample = tile.sample;
subtile.num_samples = min(samples_per_second, tile.start_sample + tile.num_samples - tile.sample);
if(device->have_error()) {
return false;
}
/* reset state memory here as global size for data_init
* kernel might not be large enough to do in kernel
*/
device->mem_zero(work_pool_wgs);
device->mem_zero(split_data);
if(!enqueue_split_kernel_data_init(KernelDimensions(global_size, local_size),
subtile,
num_global_elements,
kgbuffer,
kernel_data,
split_data,
ray_state,
queue_index,
use_queues_flag,
work_pool_wgs
))
{
return false;
}
bool activeRaysAvailable = true;
while(activeRaysAvailable) {
/* Twice the global work size of other kernels for
* ckPathTraceKernel_shadow_blocked_direct_lighting. */
size_t global_size_shadow_blocked[2];
global_size_shadow_blocked[0] = global_size[0] * 2;
global_size_shadow_blocked[1] = global_size[1];
/* Do path-iteration in host [Enqueue Path-iteration kernels. */
for(int PathIter = 0; PathIter < 16; PathIter++) {
ENQUEUE_SPLIT_KERNEL(scene_intersect, global_size, local_size);
ENQUEUE_SPLIT_KERNEL(lamp_emission, global_size, local_size);
ENQUEUE_SPLIT_KERNEL(queue_enqueue, global_size, local_size);
ENQUEUE_SPLIT_KERNEL(background_buffer_update, global_size, local_size);
ENQUEUE_SPLIT_KERNEL(shader_eval, global_size, local_size);
ENQUEUE_SPLIT_KERNEL(holdout_emission_blurring_pathtermination_ao, global_size, local_size);
ENQUEUE_SPLIT_KERNEL(direct_lighting, global_size, local_size);
ENQUEUE_SPLIT_KERNEL(shadow_blocked, global_size_shadow_blocked, local_size);
ENQUEUE_SPLIT_KERNEL(next_iteration_setup, global_size, local_size);
if(task->get_cancel()) {
return true;
}
}
/* Decide if we should exit path-iteration in host. */
device->mem_copy_from(ray_state, 0, global_size[0] * global_size[1] * sizeof(char), 1, 1);
activeRaysAvailable = false;
for(int rayStateIter = 0; rayStateIter < global_size[0] * global_size[1]; ++rayStateIter) {
if(int8_t(ray_state.get_data()[rayStateIter]) != RAY_INACTIVE) {
/* Not all rays are RAY_INACTIVE. */
activeRaysAvailable = true;
break;
}
}
if(task->get_cancel()) {
return true;
}
}
double time_per_sample = ((time_dt()-start_time) / subtile.num_samples);
if(avg_time_per_sample == 0.0) {
/* start rolling average */
avg_time_per_sample = time_per_sample;
}
else {
avg_time_per_sample = alpha*time_per_sample + (1.0-alpha)*avg_time_per_sample;
}
size_t sum_all_radiance_local_size[2] = {16, 16};
size_t sum_all_radiance_global_size[2];
sum_all_radiance_global_size[0] = round_up(tile.w, sum_all_radiance_local_size[0]);
sum_all_radiance_global_size[1] = round_up(tile.h, sum_all_radiance_local_size[1]);
ENQUEUE_SPLIT_KERNEL(sum_all_radiance,
sum_all_radiance_global_size,
sum_all_radiance_local_size);
#undef ENQUEUE_SPLIT_KERNEL
tile.sample += subtile.num_samples;
task->update_progress(&tile, tile.w*tile.h*subtile.num_samples);
time_multiplier = min(time_multiplier << 1, 10);
if(task->get_cancel()) {
return true;
}
}
return true;
}
CCL_NAMESPACE_END

View File

@ -0,0 +1,126 @@
/*
* Copyright 2011-2016 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __DEVICE_SPLIT_KERNEL_H__
#define __DEVICE_SPLIT_KERNEL_H__
#include "device.h"
#include "buffers.h"
CCL_NAMESPACE_BEGIN
/* When allocate global memory in chunks. We may not be able to
* allocate exactly "CL_DEVICE_MAX_MEM_ALLOC_SIZE" bytes in chunks;
* Since some bytes may be needed for aligning chunks of memory;
* This is the amount of memory that we dedicate for that purpose.
*/
#define DATA_ALLOCATION_MEM_FACTOR 5000000 //5MB
/* Types used for split kernel */
class KernelDimensions {
public:
size_t global_size[2];
size_t local_size[2];
KernelDimensions(size_t global_size_[2], size_t local_size_[2])
{
memcpy(global_size, global_size_, sizeof(global_size));
memcpy(local_size, local_size_, sizeof(local_size));
}
};
class SplitKernelFunction {
public:
virtual ~SplitKernelFunction() {}
/* enqueue the kernel, returns false if there is an error */
virtual bool enqueue(const KernelDimensions& dim, device_memory& kg, device_memory& data) = 0;
};
class DeviceSplitKernel {
private:
Device *device;
SplitKernelFunction *kernel_scene_intersect;
SplitKernelFunction *kernel_lamp_emission;
SplitKernelFunction *kernel_queue_enqueue;
SplitKernelFunction *kernel_background_buffer_update;
SplitKernelFunction *kernel_shader_eval;
SplitKernelFunction *kernel_holdout_emission_blurring_pathtermination_ao;
SplitKernelFunction *kernel_direct_lighting;
SplitKernelFunction *kernel_shadow_blocked;
SplitKernelFunction *kernel_next_iteration_setup;
SplitKernelFunction *kernel_sum_all_radiance;
/* Global memory variables [porting]; These memory is used for
* co-operation between different kernels; Data written by one
* kernel will be available to another kernel via this global
* memory.
*/
device_memory split_data;
device_vector<uchar> ray_state;
device_memory queue_index; /* Array of size num_queues * sizeof(int) that tracks the size of each queue. */
/* Flag to make sceneintersect and lampemission kernel use queues. */
device_memory use_queues_flag;
/* Approximate time it takes to complete one sample */
double avg_time_per_sample;
/* Work pool with respect to each work group. */
device_memory work_pool_wgs;
/* clos_max value for which the kernels have been loaded currently. */
int current_max_closure;
/* Marked True in constructor and marked false at the end of path_trace(). */
bool first_tile;
public:
explicit DeviceSplitKernel(Device* device);
virtual ~DeviceSplitKernel();
bool load_kernels(const DeviceRequestedFeatures& requested_features);
bool path_trace(DeviceTask *task,
RenderTile& rtile,
device_memory& kgbuffer,
device_memory& kernel_data);
size_t max_elements_for_max_buffer_size(size_t max_buffer_size, size_t passes_size);
virtual bool enqueue_split_kernel_data_init(const KernelDimensions& dim,
RenderTile& rtile,
int num_global_elements,
device_memory& kernel_globals,
device_memory& kernel_data_,
device_memory& split_data,
device_memory& ray_state,
device_memory& queue_index,
device_memory& use_queues_flag,
device_memory& work_pool_wgs) = 0;
virtual SplitKernelFunction* get_split_kernel_function(string kernel_name, const DeviceRequestedFeatures&) = 0;
virtual int2 split_kernel_local_size() = 0;
virtual int2 split_kernel_global_size(DeviceTask *task) = 0;
};
CCL_NAMESPACE_END
#endif /* __DEVICE_SPLIT_KERNEL_H__ */

View File

@ -26,30 +26,9 @@
CCL_NAMESPACE_BEGIN
#define CL_MEM_PTR(p) ((cl_mem)(uintptr_t)(p))
/* Macro declarations used with split kernel */
/* Macro to enable/disable work-stealing */
#define __WORK_STEALING__
#define SPLIT_KERNEL_LOCAL_SIZE_X 64
#define SPLIT_KERNEL_LOCAL_SIZE_Y 1
/* This value may be tuned according to the scene we are rendering.
*
* Modifying PATH_ITER_INC_FACTOR value proportional to number of expected
* ray-bounces will improve performance.
*/
#define PATH_ITER_INC_FACTOR 8
/* When allocate global memory in chunks. We may not be able to
* allocate exactly "CL_DEVICE_MAX_MEM_ALLOC_SIZE" bytes in chunks;
* Since some bytes may be needed for aligning chunks of memory;
* This is the amount of memory that we dedicate for that purpose.
*/
#define DATA_ALLOCATION_MEM_FACTOR 5000000 //5MB
struct OpenCLPlatformDevice {
OpenCLPlatformDevice(cl_platform_id platform_id,
const string& platform_name,
@ -266,7 +245,7 @@ public:
/* Has to be implemented by the real device classes.
* The base device will then load all these programs. */
virtual void load_kernels(const DeviceRequestedFeatures& requested_features,
virtual bool load_kernels(const DeviceRequestedFeatures& requested_features,
vector<OpenCLProgram*> &programs) = 0;
void mem_alloc(device_memory& mem, MemoryType type);
@ -326,16 +305,39 @@ protected:
class ArgumentWrapper {
public:
ArgumentWrapper() : size(0), pointer(NULL) {}
template <typename T>
ArgumentWrapper() : size(0), pointer(NULL)
{
}
ArgumentWrapper(device_memory& argument) : size(sizeof(void*)),
pointer((void*)(&argument.device_pointer))
{
}
template<typename T>
ArgumentWrapper(device_vector<T>& argument) : size(sizeof(void*)),
pointer((void*)(&argument.device_pointer))
{
}
template<typename T>
ArgumentWrapper(T& argument) : size(sizeof(argument)),
pointer(&argument) { }
pointer(&argument)
{
}
ArgumentWrapper(int argument) : size(sizeof(int)),
int_value(argument),
pointer(&int_value) { }
pointer(&int_value)
{
}
ArgumentWrapper(float argument) : size(sizeof(float)),
float_value(argument),
pointer(&float_value) { }
pointer(&float_value)
{
}
size_t size;
int int_value;
float float_value;

View File

@ -211,7 +211,9 @@ bool OpenCLDeviceBase::load_kernels(const DeviceRequestedFeatures& requested_fea
vector<OpenCLProgram*> programs;
programs.push_back(&base_program);
/* Call actual class to fill the vector with its programs. */
load_kernels(requested_features, programs);
if(!load_kernels(requested_features, programs)) {
return false;
}
/* Parallel compilation is supported by Cycles, but currently all OpenCL frameworks
* serialize the calls internally, so it's not much use right now.

View File

@ -43,11 +43,12 @@ public:
return true;
}
virtual void load_kernels(const DeviceRequestedFeatures& /*requested_features*/,
virtual bool load_kernels(const DeviceRequestedFeatures& /*requested_features*/,
vector<OpenCLProgram*> &programs)
{
path_trace_program.add_kernel(ustring("path_trace"));
programs.push_back(&path_trace_program);
return true;
}
~OpenCLDeviceMegaKernel()

File diff suppressed because it is too large Load Diff

View File

@ -195,10 +195,12 @@ set(SRC_SPLIT_HEADERS
split/kernel_holdout_emission_blurring_pathtermination_ao.h
split/kernel_lamp_emission.h
split/kernel_next_iteration_setup.h
split/kernel_queue_enqueue.h
split/kernel_scene_intersect.h
split/kernel_shader_eval.h
split/kernel_shadow_blocked.h
split/kernel_split_common.h
split/kernel_split_data.h
split/kernel_sum_all_radiance.h
)

View File

@ -39,6 +39,7 @@
#define ccl_constant __constant
#define ccl_global __global
#define ccl_local __local
#define ccl_local_param __local
#define ccl_private __private
#define ccl_restrict restrict
#define ccl_align(n) __attribute__((aligned(n)))
@ -49,6 +50,15 @@
# define ccl_addr_space
#endif
#define ccl_local_id(d) get_local_id(d)
#define ccl_global_id(d) get_global_id(d)
#define ccl_local_size(d) get_local_size(d)
#define ccl_global_size(d) get_global_size(d)
#define ccl_group_id(d) get_group_id(d)
#define ccl_num_groups(d) get_num_groups(d)
/* Selective nodes compilation. */
#ifndef __NODES_MAX_GROUP__
# define __NODES_MAX_GROUP__ NODE_GROUP_LEVEL_MAX

View File

@ -105,6 +105,8 @@ typedef ccl_addr_space struct KernelGlobals {
# ifdef __SPLIT_KERNEL__
ShaderData *sd_input;
Intersection *isect_shadow;
SplitData split_data;
SplitParams split_param_data;
# endif
} KernelGlobals;

View File

@ -19,16 +19,16 @@ CCL_NAMESPACE_BEGIN
ccl_device_inline void kernel_write_pass_float(ccl_global float *buffer, int sample, float value)
{
ccl_global float *buf = buffer;
#if defined(__SPLIT_KERNEL__) && defined(__WORK_STEALING__)
#if defined(__SPLIT_KERNEL__)
atomic_add_and_fetch_float(buf, value);
#else
*buf = (sample == 0)? value: *buf + value;
#endif // __SPLIT_KERNEL__ && __WORK_STEALING__
#endif /* __SPLIT_KERNEL__ */
}
ccl_device_inline void kernel_write_pass_float3(ccl_global float *buffer, int sample, float3 value)
{
#if defined(__SPLIT_KERNEL__) && defined(__WORK_STEALING__)
#if defined(__SPLIT_KERNEL__)
ccl_global float *buf_x = buffer + 0;
ccl_global float *buf_y = buffer + 1;
ccl_global float *buf_z = buffer + 2;
@ -39,12 +39,12 @@ ccl_device_inline void kernel_write_pass_float3(ccl_global float *buffer, int sa
#else
ccl_global float3 *buf = (ccl_global float3*)buffer;
*buf = (sample == 0)? value: *buf + value;
#endif // __SPLIT_KERNEL__ && __WORK_STEALING__
#endif /* __SPLIT_KERNEL__ */
}
ccl_device_inline void kernel_write_pass_float4(ccl_global float *buffer, int sample, float4 value)
{
#if defined(__SPLIT_KERNEL__) && defined(__WORK_STEALING__)
#if defined(__SPLIT_KERNEL__)
ccl_global float *buf_x = buffer + 0;
ccl_global float *buf_y = buffer + 1;
ccl_global float *buf_z = buffer + 2;
@ -57,7 +57,7 @@ ccl_device_inline void kernel_write_pass_float4(ccl_global float *buffer, int sa
#else
ccl_global float4 *buf = (ccl_global float4*)buffer;
*buf = (sample == 0)? value: *buf + value;
#endif // __SPLIT_KERNEL__ && __WORK_STEALING__
#endif /* __SPLIT_KERNEL__ */
}
ccl_device_inline void kernel_write_data_passes(KernelGlobals *kg, ccl_global float *buffer, PathRadiance *L,

View File

@ -17,6 +17,8 @@
#ifndef __KERNEL_QUEUE_H__
#define __KERNEL_QUEUE_H__
CCL_NAMESPACE_BEGIN
/*
* Queue utility functions for split kernel
*/
@ -35,7 +37,8 @@ ccl_device void enqueue_ray_index(
ccl_global int *queue_index) /* Array of size num_queues; Used for atomic increment. */
{
/* This thread's queue index. */
int my_queue_index = atomic_inc(&queue_index[queue_number]) + (queue_number * queue_size);
int my_queue_index = atomic_fetch_and_inc_uint32((ccl_global uint*)&queue_index[queue_number])
+ (queue_number * queue_size);
queues[my_queue_index] = ray_index;
}
@ -47,6 +50,7 @@ ccl_device void enqueue_ray_index(
* is no more ray to allocate to other threads.
*/
ccl_device int get_ray_index(
KernelGlobals *kg,
int thread_index, /* Global thread index. */
int queue_number, /* Queue to operate on. */
ccl_global int *queues, /* Buffer of all queues. */
@ -68,24 +72,25 @@ ccl_device void enqueue_ray_index_local(
int queue_number, /* Queue in which to enqueue ray index. */
char enqueue_flag, /* True for threads whose ray index has to be enqueued. */
int queuesize, /* queue size. */
ccl_local unsigned int *local_queue_atomics, /* To to local queue atomics. */
ccl_local_param unsigned int *local_queue_atomics, /* To to local queue atomics. */
ccl_global int *Queue_data, /* Queues. */
ccl_global int *Queue_index) /* To do global queue atomics. */
{
int lidx = get_local_id(1) * get_local_size(0) + get_local_id(0);
int lidx = ccl_local_id(1) * ccl_local_size(0) + ccl_local_id(0);
/* Get local queue id .*/
unsigned int lqidx;
if(enqueue_flag) {
lqidx = atomic_inc(local_queue_atomics);
lqidx = atomic_fetch_and_inc_uint32(local_queue_atomics);
}
barrier(CLK_LOCAL_MEM_FENCE);
ccl_barrier(CCL_LOCAL_MEM_FENCE);
/* Get global queue offset. */
if(lidx == 0) {
*local_queue_atomics = atomic_add(&Queue_index[queue_number], *local_queue_atomics);
*local_queue_atomics = atomic_fetch_and_add_uint32((ccl_global uint*)&Queue_index[queue_number],
*local_queue_atomics);
}
barrier(CLK_LOCAL_MEM_FENCE);
ccl_barrier(CCL_LOCAL_MEM_FENCE);
/* Get global queue index and enqueue ray. */
if(enqueue_flag) {
@ -96,19 +101,19 @@ ccl_device void enqueue_ray_index_local(
ccl_device unsigned int get_local_queue_index(
int queue_number, /* Queue in which to enqueue the ray; -1 if no queue */
ccl_local unsigned int *local_queue_atomics)
ccl_local_param unsigned int *local_queue_atomics)
{
int my_lqidx = atomic_inc(&local_queue_atomics[queue_number]);
int my_lqidx = atomic_fetch_and_inc_uint32(&local_queue_atomics[queue_number]);
return my_lqidx;
}
ccl_device unsigned int get_global_per_queue_offset(
int queue_number,
ccl_local unsigned int *local_queue_atomics,
ccl_local_param unsigned int *local_queue_atomics,
ccl_global int* global_queue_atomics)
{
unsigned int queue_offset = atomic_add(&global_queue_atomics[queue_number],
local_queue_atomics[queue_number]);
unsigned int queue_offset = atomic_fetch_and_add_uint32((ccl_global uint*)&global_queue_atomics[queue_number],
local_queue_atomics[queue_number]);
return queue_offset;
}
@ -116,10 +121,12 @@ ccl_device unsigned int get_global_queue_index(
int queue_number,
int queuesize,
unsigned int lqidx,
ccl_local unsigned int * global_per_queue_offset)
ccl_local_param unsigned int * global_per_queue_offset)
{
int my_gqidx = queuesize * queue_number + lqidx + global_per_queue_offset[queue_number];
return my_gqidx;
}
CCL_NAMESPACE_END
#endif // __KERNEL_QUEUE_H__

View File

@ -248,7 +248,7 @@ ccl_device bool shadow_blocked_transparent_all(KernelGlobals *kg,
}
# endif /* __SHADOW_RECORD_ALL__ */
# ifdef __KERNEL_GPU__
# if defined(__KERNEL_GPU__) || !defined(__SHADOW_RECORD_ALL__)
/* Shadow function to compute how much light is blocked,
*
* Here we raytrace from one transparent surface to the next step by step.
@ -359,7 +359,7 @@ ccl_device bool shadow_blocked_transparent_stepped(
shadow);
}
# endif /* __KERNEL_GPU__ */
# endif /* __KERNEL_GPU__ || !__SHADOW_RECORD_ALL__ */
#endif /* __TRANSPARENT_SHADOWS__ */
ccl_device_inline bool shadow_blocked(KernelGlobals *kg,
@ -374,7 +374,7 @@ ccl_device_inline bool shadow_blocked(KernelGlobals *kg,
#ifdef __SPLIT_KERNEL__
Ray private_ray = *ray_input;
Ray *ray = &private_ray;
Intersection *isect = &kg->isect_shadow[SD_THREAD];
Intersection *isect = &kernel_split_state.isect_shadow[SD_THREAD];
#else /* __SPLIT_KERNEL__ */
Ray *ray = ray_input;
Intersection isect_object;

View File

@ -56,6 +56,8 @@ CCL_NAMESPACE_BEGIN
#define VOLUME_STACK_SIZE 16
#define WORK_POOL_SIZE 64
/* device capabilities */
#ifdef __KERNEL_CPU__
# ifdef __KERNEL_SSE2__
@ -799,7 +801,7 @@ enum ShaderDataObjectFlag {
};
#ifdef __SPLIT_KERNEL__
# define SD_THREAD (get_global_id(1) * get_global_size(0) + get_global_id(0))
# define SD_THREAD (ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0))
# if !defined(__SPLIT_KERNEL_SOA__)
/* ShaderData is stored as an Array-of-Structures */
# define ccl_soa_member(type, name) type soa_##name
@ -807,7 +809,7 @@ enum ShaderDataObjectFlag {
# define ccl_fetch_array(s, t, index) (&s[SD_THREAD].soa_##t[index])
# else
/* ShaderData is stored as an Structure-of-Arrays */
# define SD_GLOBAL_SIZE (get_global_size(0) * get_global_size(1))
# define SD_GLOBAL_SIZE (ccl_global_size(0) * ccl_global_size(1))
# define SD_FIELD_SIZE(t) sizeof(((struct ShaderData*)0)->t)
# define SD_OFFSETOF(t) ((char*)(&((struct ShaderData*)0)->t) - (char*)0)
# define ccl_soa_member(type, name) type soa_##name

View File

@ -17,177 +17,102 @@
#ifndef __KERNEL_WORK_STEALING_H__
#define __KERNEL_WORK_STEALING_H__
CCL_NAMESPACE_BEGIN
/*
* Utility functions for work stealing
*/
#ifdef __WORK_STEALING__
#ifdef __KERNEL_OPENCL__
# pragma OPENCL EXTENSION cl_khr_global_int32_base_atomics : enable
#endif
uint get_group_id_with_ray_index(uint ray_index,
uint tile_dim_x,
uint tile_dim_y,
uint parallel_samples,
int dim)
ccl_device_inline uint kernel_total_work_size(KernelGlobals *kg)
{
if(dim == 0) {
uint x_span = ray_index % (tile_dim_x * parallel_samples);
return x_span / get_local_size(0);
return kernel_split_params.w * kernel_split_params.h * kernel_split_params.num_samples;
}
ccl_device_inline uint kernel_num_work_pools(KernelGlobals *kg)
{
return ccl_global_size(0) * ccl_global_size(1) / WORK_POOL_SIZE;
}
ccl_device_inline uint work_pool_from_ray_index(KernelGlobals *kg, uint ray_index)
{
return ray_index / WORK_POOL_SIZE;
}
ccl_device_inline uint work_pool_work_size(KernelGlobals *kg, uint work_pool)
{
uint total_work_size = kernel_total_work_size(kg);
uint num_pools = kernel_num_work_pools(kg);
if(work_pool >= num_pools || work_pool * WORK_POOL_SIZE >= total_work_size) {
return 0;
}
else /*if(dim == 1)*/ {
kernel_assert(dim == 1);
uint y_span = ray_index / (tile_dim_x * parallel_samples);
return y_span / get_local_size(1);
uint work_size = (total_work_size / (num_pools * WORK_POOL_SIZE)) * WORK_POOL_SIZE;
uint remainder = (total_work_size % (num_pools * WORK_POOL_SIZE));
if(work_pool < remainder / WORK_POOL_SIZE) {
work_size += WORK_POOL_SIZE;
}
else if(work_pool == remainder / WORK_POOL_SIZE) {
work_size += remainder % WORK_POOL_SIZE;
}
return work_size;
}
uint get_total_work(uint tile_dim_x,
uint tile_dim_y,
uint grp_idx,
uint grp_idy,
uint num_samples)
ccl_device_inline uint get_global_work_index(KernelGlobals *kg, uint work_index, uint ray_index)
{
uint threads_within_tile_border_x =
(grp_idx == (get_num_groups(0) - 1)) ? tile_dim_x % get_local_size(0)
: get_local_size(0);
uint threads_within_tile_border_y =
(grp_idy == (get_num_groups(1) - 1)) ? tile_dim_y % get_local_size(1)
: get_local_size(1);
uint num_pools = kernel_num_work_pools(kg);
uint pool = work_pool_from_ray_index(kg, ray_index);
threads_within_tile_border_x =
(threads_within_tile_border_x == 0) ? get_local_size(0)
: threads_within_tile_border_x;
threads_within_tile_border_y =
(threads_within_tile_border_y == 0) ? get_local_size(1)
: threads_within_tile_border_y;
return threads_within_tile_border_x *
threads_within_tile_border_y *
num_samples;
return (work_index / WORK_POOL_SIZE) * (num_pools * WORK_POOL_SIZE)
+ (pool * WORK_POOL_SIZE)
+ (work_index % WORK_POOL_SIZE);
}
/* Returns 0 in case there is no next work available */
/* Returns 1 in case work assigned is valid */
int get_next_work(ccl_global uint *work_pool,
ccl_private uint *my_work,
uint tile_dim_x,
uint tile_dim_y,
uint num_samples,
uint parallel_samples,
uint ray_index)
/* Returns true if there is work */
ccl_device bool get_next_work(KernelGlobals *kg, ccl_private uint *work_index, uint ray_index)
{
uint grp_idx = get_group_id_with_ray_index(ray_index,
tile_dim_x,
tile_dim_y,
parallel_samples,
0);
uint grp_idy = get_group_id_with_ray_index(ray_index,
tile_dim_x,
tile_dim_y,
parallel_samples,
1);
uint total_work = get_total_work(tile_dim_x,
tile_dim_y,
grp_idx,
grp_idy,
num_samples);
uint group_index = grp_idy * get_num_groups(0) + grp_idx;
*my_work = atomic_inc(&work_pool[group_index]);
return (*my_work < total_work) ? 1 : 0;
uint work_pool = work_pool_from_ray_index(kg, ray_index);
uint pool_size = work_pool_work_size(kg, work_pool);
if(pool_size == 0) {
return false;
}
*work_index = atomic_fetch_and_inc_uint32(&kernel_split_params.work_pools[work_pool]);
return (*work_index < pool_size);
}
/* This function assumes that the passed my_work is valid. */
/* Decode sample number w.r.t. assigned my_work. */
uint get_my_sample(uint my_work,
uint tile_dim_x,
uint tile_dim_y,
uint parallel_samples,
uint ray_index)
/* This function assumes that the passed `work` is valid. */
/* Decode sample number w.r.t. assigned `work`. */
ccl_device uint get_work_sample(KernelGlobals *kg, uint work_index, uint ray_index)
{
uint grp_idx = get_group_id_with_ray_index(ray_index,
tile_dim_x,
tile_dim_y,
parallel_samples,
0);
uint grp_idy = get_group_id_with_ray_index(ray_index,
tile_dim_x,
tile_dim_y,
parallel_samples,
1);
uint threads_within_tile_border_x =
(grp_idx == (get_num_groups(0) - 1)) ? tile_dim_x % get_local_size(0)
: get_local_size(0);
uint threads_within_tile_border_y =
(grp_idy == (get_num_groups(1) - 1)) ? tile_dim_y % get_local_size(1)
: get_local_size(1);
threads_within_tile_border_x =
(threads_within_tile_border_x == 0) ? get_local_size(0)
: threads_within_tile_border_x;
threads_within_tile_border_y =
(threads_within_tile_border_y == 0) ? get_local_size(1)
: threads_within_tile_border_y;
return my_work /
(threads_within_tile_border_x * threads_within_tile_border_y);
return get_global_work_index(kg, work_index, ray_index) / (kernel_split_params.w * kernel_split_params.h);
}
/* Decode pixel and tile position w.r.t. assigned my_work. */
void get_pixel_tile_position(ccl_private uint *pixel_x,
/* Decode pixel and tile position w.r.t. assigned `work`. */
ccl_device void get_work_pixel_tile_position(KernelGlobals *kg,
ccl_private uint *pixel_x,
ccl_private uint *pixel_y,
ccl_private uint *tile_x,
ccl_private uint *tile_y,
uint my_work,
uint tile_dim_x,
uint tile_dim_y,
uint tile_offset_x,
uint tile_offset_y,
uint parallel_samples,
uint work_index,
uint ray_index)
{
uint grp_idx = get_group_id_with_ray_index(ray_index,
tile_dim_x,
tile_dim_y,
parallel_samples,
0);
uint grp_idy = get_group_id_with_ray_index(ray_index,
tile_dim_x,
tile_dim_y,
parallel_samples,
1);
uint threads_within_tile_border_x =
(grp_idx == (get_num_groups(0) - 1)) ? tile_dim_x % get_local_size(0)
: get_local_size(0);
uint threads_within_tile_border_y =
(grp_idy == (get_num_groups(1) - 1)) ? tile_dim_y % get_local_size(1)
: get_local_size(1);
uint pixel_index = get_global_work_index(kg, work_index, ray_index) % (kernel_split_params.w*kernel_split_params.h);
threads_within_tile_border_x =
(threads_within_tile_border_x == 0) ? get_local_size(0)
: threads_within_tile_border_x;
threads_within_tile_border_y =
(threads_within_tile_border_y == 0) ? get_local_size(1)
: threads_within_tile_border_y;
*tile_x = pixel_index % kernel_split_params.w;
*tile_y = pixel_index / kernel_split_params.w;
uint total_associated_pixels =
threads_within_tile_border_x * threads_within_tile_border_y;
uint work_group_pixel_index = my_work % total_associated_pixels;
uint work_group_pixel_x =
work_group_pixel_index % threads_within_tile_border_x;
uint work_group_pixel_y =
work_group_pixel_index / threads_within_tile_border_x;
*pixel_x =
tile_offset_x + (grp_idx * get_local_size(0)) + work_group_pixel_x;
*pixel_y =
tile_offset_y + (grp_idy * get_local_size(1)) + work_group_pixel_y;
*tile_x = *pixel_x - tile_offset_x;
*tile_y = *pixel_y - tile_offset_y;
*pixel_x = *tile_x + kernel_split_params.x;
*pixel_y = *tile_y + kernel_split_params.y;
}
#endif /* __WORK_STEALING__ */
CCL_NAMESPACE_END
#endif /* __KERNEL_WORK_STEALING_H__ */

View File

@ -67,8 +67,8 @@ __kernel void kernel_ocl_path_trace(
kg->name = name;
#include "../../kernel_textures.h"
int x = sx + get_global_id(0);
int y = sy + get_global_id(1);
int x = sx + ccl_global_id(0);
int y = sy + ccl_global_id(1);
if(x < sx + sw && y < sy + sh)
kernel_path_trace(kg, buffer, rng_state, sample, x, y, offset, stride);
@ -96,7 +96,7 @@ __kernel void kernel_ocl_shader(
kg->name = name;
#include "../../kernel_textures.h"
int x = sx + get_global_id(0);
int x = sx + ccl_global_id(0);
if(x < sx + sw) {
kernel_shader_evaluate(kg,
@ -128,7 +128,7 @@ __kernel void kernel_ocl_bake(
kg->name = name;
#include "../../kernel_textures.h"
int x = sx + get_global_id(0);
int x = sx + ccl_global_id(0);
if(x < sx + sw) {
#ifdef __NO_BAKING__
@ -159,8 +159,8 @@ __kernel void kernel_ocl_convert_to_byte(
kg->name = name;
#include "../../kernel_textures.h"
int x = sx + get_global_id(0);
int y = sy + get_global_id(1);
int x = sx + ccl_global_id(0);
int y = sy + ccl_global_id(1);
if(x < sx + sw && y < sy + sh)
kernel_film_convert_to_byte(kg, rgba, buffer, sample_scale, x, y, offset, stride);
@ -186,8 +186,8 @@ __kernel void kernel_ocl_convert_to_half_float(
kg->name = name;
#include "../../kernel_textures.h"
int x = sx + get_global_id(0);
int y = sy + get_global_id(1);
int x = sx + ccl_global_id(0);
int y = sy + ccl_global_id(1);
if(x < sx + sw && y < sy + sh)
kernel_film_convert_to_half_float(kg, rgba, buffer, sample_scale, x, y, offset, stride);
@ -195,7 +195,7 @@ __kernel void kernel_ocl_convert_to_half_float(
__kernel void kernel_ocl_zero_buffer(ccl_global float4 *buffer, ulong size, ulong offset)
{
size_t i = get_global_id(0) + get_global_id(1) * get_global_size(0);
size_t i = ccl_global_id(0) + ccl_global_id(1) * ccl_global_size(0);
if(i < size / sizeof(float4)) {
buffer[i+offset/sizeof(float4)] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);

View File

@ -14,112 +14,13 @@
* limitations under the License.
*/
#include "kernel_compat_opencl.h"
#include "split/kernel_split_common.h"
#include "split/kernel_background_buffer_update.h"
__kernel void kernel_ocl_path_trace_background_buffer_update(
ccl_global char *kg,
ccl_constant KernelData *data,
ccl_global float *per_sample_output_buffers,
ccl_global uint *rng_state,
ccl_global uint *rng_coop, /* Required for buffer Update */
ccl_global float3 *throughput_coop, /* Required for background hit processing */
PathRadiance *PathRadiance_coop, /* Required for background hit processing and buffer Update */
ccl_global Ray *Ray_coop, /* Required for background hit processing */
ccl_global PathState *PathState_coop, /* Required for background hit processing */
ccl_global float *L_transparent_coop, /* Required for background hit processing and buffer Update */
ccl_global char *ray_state, /* Stores information on the current state of a ray */
int sw, int sh, int sx, int sy, int stride,
int rng_state_offset_x,
int rng_state_offset_y,
int rng_state_stride,
ccl_global unsigned int *work_array, /* Denotes work of each ray */
ccl_global int *Queue_data, /* Queues memory */
ccl_global int *Queue_index, /* Tracks the number of elements in each queue */
int queuesize, /* Size (capacity) of each queue */
int end_sample,
int start_sample,
#ifdef __WORK_STEALING__
ccl_global unsigned int *work_pool_wgs,
unsigned int num_samples,
#endif
#ifdef __KERNEL_DEBUG__
DebugData *debugdata_coop,
#endif
int parallel_samples) /* Number of samples to be processed in parallel */
KernelGlobals *kg,
ccl_constant KernelData *data)
{
ccl_local unsigned int local_queue_atomics;
if(get_local_id(0) == 0 && get_local_id(1) == 0) {
local_queue_atomics = 0;
}
barrier(CLK_LOCAL_MEM_FENCE);
int ray_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
if(ray_index == 0) {
/* We will empty this queue in this kernel. */
Queue_index[QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS] = 0;
}
char enqueue_flag = 0;
ray_index = get_ray_index(ray_index,
QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
Queue_data,
queuesize,
1);
#ifdef __COMPUTE_DEVICE_GPU__
/* If we are executing on a GPU device, we exit all threads that are not
* required.
*
* If we are executing on a CPU device, then we need to keep all threads
* active since we have barrier() calls later in the kernel. CPU devices,
* expect all threads to execute barrier statement.
*/
if(ray_index == QUEUE_EMPTY_SLOT) {
return;
}
#endif
#ifndef __COMPUTE_DEVICE_GPU__
if(ray_index != QUEUE_EMPTY_SLOT) {
#endif
enqueue_flag =
kernel_background_buffer_update((KernelGlobals *)kg,
per_sample_output_buffers,
rng_state,
rng_coop,
throughput_coop,
PathRadiance_coop,
Ray_coop,
PathState_coop,
L_transparent_coop,
ray_state,
sw, sh, sx, sy, stride,
rng_state_offset_x,
rng_state_offset_y,
rng_state_stride,
work_array,
end_sample,
start_sample,
#ifdef __WORK_STEALING__
work_pool_wgs,
num_samples,
#endif
#ifdef __KERNEL_DEBUG__
debugdata_coop,
#endif
parallel_samples,
ray_index);
#ifndef __COMPUTE_DEVICE_GPU__
}
#endif
/* Enqueue RAY_REGENERATED rays into QUEUE_ACTIVE_AND_REGENERATED_RAYS;
* These rays will be made active during next SceneIntersectkernel.
*/
enqueue_ray_index_local(ray_index,
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
enqueue_flag,
queuesize,
&local_queue_atomics,
Queue_data,
Queue_index);
kernel_background_buffer_update(kg);
}

View File

@ -14,77 +14,49 @@
* limitations under the License.
*/
#include "kernel_compat_opencl.h"
#include "split/kernel_split_common.h"
#include "split/kernel_data_init.h"
__kernel void kernel_ocl_path_trace_data_init(
ccl_global char *globals,
ccl_global char *sd_DL_shadow,
KernelGlobals *kg,
ccl_constant KernelData *data,
ccl_global float *per_sample_output_buffers,
ccl_global void *split_data_buffer,
int num_elements,
ccl_global char *ray_state,
ccl_global uint *rng_state,
ccl_global uint *rng_coop, /* rng array to store rng values for all rays */
ccl_global float3 *throughput_coop, /* throughput array to store throughput values for all rays */
ccl_global float *L_transparent_coop, /* L_transparent array to store L_transparent values for all rays */
PathRadiance *PathRadiance_coop, /* PathRadiance array to store PathRadiance values for all rays */
ccl_global Ray *Ray_coop, /* Ray array to store Ray information for all rays */
ccl_global PathState *PathState_coop, /* PathState array to store PathState information for all rays */
Intersection *Intersection_coop_shadow,
ccl_global char *ray_state, /* Stores information on current state of a ray */
#define KERNEL_TEX(type, ttype, name) \
ccl_global type *name,
#include "../../kernel_textures.h"
int start_sample, int sx, int sy, int sw, int sh, int offset, int stride,
int rng_state_offset_x,
int rng_state_offset_y,
int rng_state_stride,
ccl_global int *Queue_data, /* Memory for queues */
int start_sample,
int end_sample,
int sx, int sy, int sw, int sh, int offset, int stride,
ccl_global int *Queue_index, /* Tracks the number of elements in queues */
int queuesize, /* size (capacity) of the queue */
ccl_global char *use_queues_flag, /* flag to decide if scene-intersect kernel should use queues to fetch ray index */
ccl_global unsigned int *work_array, /* work array to store which work each ray belongs to */
#ifdef __WORK_STEALING__
ccl_global unsigned int *work_pool_wgs, /* Work pool for each work group */
unsigned int num_samples, /* Total number of samples per pixel */
#endif
#ifdef __KERNEL_DEBUG__
DebugData *debugdata_coop,
#endif
int parallel_samples) /* Number of samples to be processed in parallel */
ccl_global float *buffer)
{
kernel_data_init((KernelGlobals *)globals,
(ShaderData *)sd_DL_shadow,
kernel_data_init(kg,
data,
per_sample_output_buffers,
rng_state,
rng_coop,
throughput_coop,
L_transparent_coop,
PathRadiance_coop,
Ray_coop,
PathState_coop,
Intersection_coop_shadow,
split_data_buffer,
num_elements,
ray_state,
rng_state,
#define KERNEL_TEX(type, ttype, name) name,
#include "../../kernel_textures.h"
start_sample, sx, sy, sw, sh, offset, stride,
rng_state_offset_x,
rng_state_offset_y,
rng_state_stride,
Queue_data,
start_sample,
end_sample,
sx, sy, sw, sh, offset, stride,
Queue_index,
queuesize,
use_queues_flag,
work_array,
#ifdef __WORK_STEALING__
work_pool_wgs,
num_samples,
#endif
#ifdef __KERNEL_DEBUG__
debugdata_coop,
#endif
parallel_samples);
buffer);
}

View File

@ -14,74 +14,13 @@
* limitations under the License.
*/
#include "kernel_compat_opencl.h"
#include "split/kernel_split_common.h"
#include "split/kernel_direct_lighting.h"
__kernel void kernel_ocl_path_trace_direct_lighting(
ccl_global char *kg,
ccl_constant KernelData *data,
ccl_global char *sd, /* Required for direct lighting */
ccl_global uint *rng_coop, /* Required for direct lighting */
ccl_global PathState *PathState_coop, /* Required for direct lighting */
ccl_global int *ISLamp_coop, /* Required for direct lighting */
ccl_global Ray *LightRay_coop, /* Required for direct lighting */
ccl_global BsdfEval *BSDFEval_coop, /* Required for direct lighting */
ccl_global char *ray_state, /* Denotes the state of each ray */
ccl_global int *Queue_data, /* Queue memory */
ccl_global int *Queue_index, /* Tracks the number of elements in each queue */
int queuesize) /* Size (capacity) of each queue */
KernelGlobals *kg,
ccl_constant KernelData *data)
{
ccl_local unsigned int local_queue_atomics;
if(get_local_id(0) == 0 && get_local_id(1) == 0) {
local_queue_atomics = 0;
}
barrier(CLK_LOCAL_MEM_FENCE);
char enqueue_flag = 0;
int ray_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
ray_index = get_ray_index(ray_index,
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
Queue_data,
queuesize,
0);
#ifdef __COMPUTE_DEVICE_GPU__
/* If we are executing on a GPU device, we exit all threads that are not
* required.
*
* If we are executing on a CPU device, then we need to keep all threads
* active since we have barrier() calls later in the kernel. CPU devices,
* expect all threads to execute barrier statement.
*/
if(ray_index == QUEUE_EMPTY_SLOT) {
return;
}
#endif
#ifndef __COMPUTE_DEVICE_GPU__
if(ray_index != QUEUE_EMPTY_SLOT) {
#endif
enqueue_flag = kernel_direct_lighting((KernelGlobals *)kg,
(ShaderData *)sd,
rng_coop,
PathState_coop,
ISLamp_coop,
LightRay_coop,
BSDFEval_coop,
ray_state,
ray_index);
#ifndef __COMPUTE_DEVICE_GPU__
}
#endif
#ifdef __EMISSION__
/* Enqueue RAY_SHADOW_RAY_CAST_DL rays. */
enqueue_ray_index_local(ray_index,
QUEUE_SHADOW_RAY_CAST_DL_RAYS,
enqueue_flag,
queuesize,
&local_queue_atomics,
Queue_data,
Queue_index);
#endif
kernel_direct_lighting(kg);
}

View File

@ -14,110 +14,13 @@
* limitations under the License.
*/
#include "kernel_compat_opencl.h"
#include "split/kernel_split_common.h"
#include "split/kernel_holdout_emission_blurring_pathtermination_ao.h"
__kernel void kernel_ocl_path_trace_holdout_emission_blurring_pathtermination_ao(
ccl_global char *kg,
ccl_constant KernelData *data,
ccl_global char *sd, /* Required throughout the kernel except probabilistic path termination and AO */
ccl_global float *per_sample_output_buffers,
ccl_global uint *rng_coop, /* Required for "kernel_write_data_passes" and AO */
ccl_global float3 *throughput_coop, /* Required for handling holdout material and AO */
ccl_global float *L_transparent_coop, /* Required for handling holdout material */
PathRadiance *PathRadiance_coop, /* Required for "kernel_write_data_passes" and indirect primitive emission */
ccl_global PathState *PathState_coop, /* Required throughout the kernel and AO */
Intersection *Intersection_coop, /* Required for indirect primitive emission */
ccl_global float3 *AOAlpha_coop, /* Required for AO */
ccl_global float3 *AOBSDF_coop, /* Required for AO */
ccl_global Ray *AOLightRay_coop, /* Required for AO */
int sw, int sh, int sx, int sy, int stride,
ccl_global char *ray_state, /* Denotes the state of each ray */
ccl_global unsigned int *work_array, /* Denotes the work that each ray belongs to */
ccl_global int *Queue_data, /* Queue memory */
ccl_global int *Queue_index, /* Tracks the number of elements in each queue */
int queuesize, /* Size (capacity) of each queue */
#ifdef __WORK_STEALING__
unsigned int start_sample,
#endif
int parallel_samples) /* Number of samples to be processed in parallel */
KernelGlobals *kg,
ccl_constant KernelData *data)
{
ccl_local unsigned int local_queue_atomics_bg;
ccl_local unsigned int local_queue_atomics_ao;
if(get_local_id(0) == 0 && get_local_id(1) == 0) {
local_queue_atomics_bg = 0;
local_queue_atomics_ao = 0;
}
barrier(CLK_LOCAL_MEM_FENCE);
char enqueue_flag = 0;
char enqueue_flag_AO_SHADOW_RAY_CAST = 0;
int ray_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
ray_index = get_ray_index(ray_index,
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
Queue_data,
queuesize,
0);
#ifdef __COMPUTE_DEVICE_GPU__
/* If we are executing on a GPU device, we exit all threads that are not
* required.
*
* If we are executing on a CPU device, then we need to keep all threads
* active since we have barrier() calls later in the kernel. CPU devices,
* expect all threads to execute barrier statement.
*/
if(ray_index == QUEUE_EMPTY_SLOT) {
return;
}
#endif /* __COMPUTE_DEVICE_GPU__ */
#ifndef __COMPUTE_DEVICE_GPU__
if(ray_index != QUEUE_EMPTY_SLOT) {
#endif
kernel_holdout_emission_blurring_pathtermination_ao(
(KernelGlobals *)kg,
(ShaderData *)sd,
per_sample_output_buffers,
rng_coop,
throughput_coop,
L_transparent_coop,
PathRadiance_coop,
PathState_coop,
Intersection_coop,
AOAlpha_coop,
AOBSDF_coop,
AOLightRay_coop,
sw, sh, sx, sy, stride,
ray_state,
work_array,
#ifdef __WORK_STEALING__
start_sample,
#endif
parallel_samples,
ray_index,
&enqueue_flag,
&enqueue_flag_AO_SHADOW_RAY_CAST);
#ifndef __COMPUTE_DEVICE_GPU__
}
#endif
/* Enqueue RAY_UPDATE_BUFFER rays. */
enqueue_ray_index_local(ray_index,
QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
enqueue_flag,
queuesize,
&local_queue_atomics_bg,
Queue_data,
Queue_index);
#ifdef __AO__
/* Enqueue to-shadow-ray-cast rays. */
enqueue_ray_index_local(ray_index,
QUEUE_SHADOW_RAY_CAST_AO_RAYS,
enqueue_flag_AO_SHADOW_RAY_CAST,
queuesize,
&local_queue_atomics_ao,
Queue_data,
Queue_index);
#endif
kernel_holdout_emission_blurring_pathtermination_ao(kg);
}

View File

@ -14,67 +14,13 @@
* limitations under the License.
*/
#include "kernel_compat_opencl.h"
#include "split/kernel_split_common.h"
#include "split/kernel_lamp_emission.h"
__kernel void kernel_ocl_path_trace_lamp_emission(
ccl_global char *kg,
ccl_constant KernelData *data,
ccl_global float3 *throughput_coop, /* Required for lamp emission */
PathRadiance *PathRadiance_coop, /* Required for lamp emission */
ccl_global Ray *Ray_coop, /* Required for lamp emission */
ccl_global PathState *PathState_coop, /* Required for lamp emission */
Intersection *Intersection_coop, /* Required for lamp emission */
ccl_global char *ray_state, /* Denotes the state of each ray */
int sw, int sh,
ccl_global int *Queue_data, /* Memory for queues */
ccl_global int *Queue_index, /* Tracks the number of elements in queues */
int queuesize, /* Size (capacity) of queues */
ccl_global char *use_queues_flag, /* Used to decide if this kernel should use
* queues to fetch ray index
*/
int parallel_samples) /* Number of samples to be processed in parallel */
KernelGlobals *kg,
ccl_constant KernelData *data)
{
int x = get_global_id(0);
int y = get_global_id(1);
/* We will empty this queue in this kernel. */
if(get_global_id(0) == 0 && get_global_id(1) == 0) {
Queue_index[QUEUE_ACTIVE_AND_REGENERATED_RAYS] = 0;
}
/* Fetch use_queues_flag. */
ccl_local char local_use_queues_flag;
if(get_local_id(0) == 0 && get_local_id(1) == 0) {
local_use_queues_flag = use_queues_flag[0];
}
barrier(CLK_LOCAL_MEM_FENCE);
int ray_index;
if(local_use_queues_flag) {
int thread_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
ray_index = get_ray_index(thread_index,
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
Queue_data,
queuesize,
1);
if(ray_index == QUEUE_EMPTY_SLOT) {
return;
}
} else {
if(x < (sw * parallel_samples) && y < sh) {
ray_index = x + y * (sw * parallel_samples);
} else {
return;
}
}
kernel_lamp_emission((KernelGlobals *)kg,
throughput_coop,
PathRadiance_coop,
Ray_coop,
PathState_coop,
Intersection_coop,
ray_state,
sw, sh,
use_queues_flag,
ray_index);
kernel_lamp_emission(kg);
}

View File

@ -14,101 +14,13 @@
* limitations under the License.
*/
#include "kernel_compat_opencl.h"
#include "split/kernel_split_common.h"
#include "split/kernel_next_iteration_setup.h"
__kernel void kernel_ocl_path_trace_next_iteration_setup(
ccl_global char *kg,
ccl_constant KernelData *data,
ccl_global char *sd, /* Required for setting up ray for next iteration */
ccl_global uint *rng_coop, /* Required for setting up ray for next iteration */
ccl_global float3 *throughput_coop, /* Required for setting up ray for next iteration */
PathRadiance *PathRadiance_coop, /* Required for setting up ray for next iteration */
ccl_global Ray *Ray_coop, /* Required for setting up ray for next iteration */
ccl_global PathState *PathState_coop, /* Required for setting up ray for next iteration */
ccl_global Ray *LightRay_dl_coop, /* Required for radiance update - direct lighting */
ccl_global int *ISLamp_coop, /* Required for radiance update - direct lighting */
ccl_global BsdfEval *BSDFEval_coop, /* Required for radiance update - direct lighting */
ccl_global Ray *LightRay_ao_coop, /* Required for radiance update - AO */
ccl_global float3 *AOBSDF_coop, /* Required for radiance update - AO */
ccl_global float3 *AOAlpha_coop, /* Required for radiance update - AO */
ccl_global char *ray_state, /* Denotes the state of each ray */
ccl_global int *Queue_data, /* Queue memory */
ccl_global int *Queue_index, /* Tracks the number of elements in each queue */
int queuesize, /* Size (capacity) of each queue */
ccl_global char *use_queues_flag) /* flag to decide if scene_intersect kernel should
* use queues to fetch ray index */
KernelGlobals *kg,
ccl_constant KernelData *data)
{
ccl_local unsigned int local_queue_atomics;
if(get_local_id(0) == 0 && get_local_id(1) == 0) {
local_queue_atomics = 0;
}
barrier(CLK_LOCAL_MEM_FENCE);
if(get_global_id(0) == 0 && get_global_id(1) == 0) {
/* If we are here, then it means that scene-intersect kernel
* has already been executed atleast once. From the next time,
* scene-intersect kernel may operate on queues to fetch ray index
*/
use_queues_flag[0] = 1;
/* Mark queue indices of QUEUE_SHADOW_RAY_CAST_AO_RAYS and
* QUEUE_SHADOW_RAY_CAST_DL_RAYS queues that were made empty during the
* previous kernel.
*/
Queue_index[QUEUE_SHADOW_RAY_CAST_AO_RAYS] = 0;
Queue_index[QUEUE_SHADOW_RAY_CAST_DL_RAYS] = 0;
}
char enqueue_flag = 0;
int ray_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
ray_index = get_ray_index(ray_index,
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
Queue_data,
queuesize,
0);
#ifdef __COMPUTE_DEVICE_GPU__
/* If we are executing on a GPU device, we exit all threads that are not
* required.
*
* If we are executing on a CPU device, then we need to keep all threads
* active since we have barrier() calls later in the kernel. CPU devices,
* expect all threads to execute barrier statement.
*/
if(ray_index == QUEUE_EMPTY_SLOT) {
return;
}
#endif
#ifndef __COMPUTE_DEVICE_GPU__
if(ray_index != QUEUE_EMPTY_SLOT) {
#endif
enqueue_flag = kernel_next_iteration_setup((KernelGlobals *)kg,
(ShaderData *)sd,
rng_coop,
throughput_coop,
PathRadiance_coop,
Ray_coop,
PathState_coop,
LightRay_dl_coop,
ISLamp_coop,
BSDFEval_coop,
LightRay_ao_coop,
AOBSDF_coop,
AOAlpha_coop,
ray_state,
use_queues_flag,
ray_index);
#ifndef __COMPUTE_DEVICE_GPU__
}
#endif
/* Enqueue RAY_UPDATE_BUFFER rays. */
enqueue_ray_index_local(ray_index,
QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
enqueue_flag,
queuesize,
&local_queue_atomics,
Queue_data,
Queue_index);
kernel_next_iteration_setup(kg);
}

View File

@ -14,93 +14,13 @@
* limitations under the License.
*/
#include "../../kernel_compat_opencl.h"
#include "../../kernel_math.h"
#include "../../kernel_types.h"
#include "../../kernel_globals.h"
#include "../../kernel_queues.h"
#include "kernel_compat_opencl.h"
#include "split/kernel_split_common.h"
#include "split/kernel_queue_enqueue.h"
/*
* The kernel "kernel_queue_enqueue" enqueues rays of
* different ray state into their appropriate Queues;
* 1. Rays that have been determined to hit the background from the
* "kernel_scene_intersect" kernel
* are enqueued in QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS;
* 2. Rays that have been determined to be actively participating in path-iteration will be enqueued into QUEUE_ACTIVE_AND_REGENERATED_RAYS.
*
* The input and output of the kernel is as follows,
*
* ray_state -------------------------------------------|--- kernel_queue_enqueue --|--- Queue_data (QUEUE_ACTIVE_AND_REGENERATED_RAYS & QUEUE_HITBF_BUFF_UPDATE_TOREGEN_RAYS)
* Queue_index(QUEUE_ACTIVE_AND_REGENERATED_RAYS) ------| |--- Queue_index (QUEUE_ACTIVE_AND_REGENERATED_RAYS & QUEUE_HITBF_BUFF_UPDATE_TOREGEN_RAYS)
* Queue_index(QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS) ---| |
* queuesize -------------------------------------------| |
*
* Note on Queues :
* State of queues during the first time this kernel is called :
* At entry,
* Both QUEUE_ACTIVE_AND_REGENERATED_RAYS and QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be empty.
* At exit,
* QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE rays
* QUEUE_HITBF_BUFF_UPDATE_TOREGEN_RAYS will be filled with RAY_HIT_BACKGROUND rays.
*
* State of queue during other times this kernel is called :
* At entry,
* QUEUE_ACTIVE_AND_REGENERATED_RAYS will be empty.
* QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will contain RAY_TO_REGENERATE and RAY_UPDATE_BUFFER rays.
* At exit,
* QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE rays.
* QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with RAY_TO_REGENERATE, RAY_UPDATE_BUFFER, RAY_HIT_BACKGROUND rays.
*/
__kernel void kernel_ocl_path_trace_queue_enqueue(
ccl_global int *Queue_data, /* Queue memory */
ccl_global int *Queue_index, /* Tracks the number of elements in each queue */
ccl_global char *ray_state, /* Denotes the state of each ray */
int queuesize) /* Size (capacity) of each queue */
KernelGlobals *kg,
ccl_constant KernelData *data)
{
/* We have only 2 cases (Hit/Not-Hit) */
ccl_local unsigned int local_queue_atomics[2];
int lidx = get_local_id(1) * get_local_size(0) + get_local_id(0);
int ray_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
if(lidx < 2 ) {
local_queue_atomics[lidx] = 0;
}
barrier(CLK_LOCAL_MEM_FENCE);
int queue_number = -1;
if(IS_STATE(ray_state, ray_index, RAY_HIT_BACKGROUND)) {
queue_number = QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS;
}
else if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
queue_number = QUEUE_ACTIVE_AND_REGENERATED_RAYS;
}
unsigned int my_lqidx;
if(queue_number != -1) {
my_lqidx = get_local_queue_index(queue_number, local_queue_atomics);
}
barrier(CLK_LOCAL_MEM_FENCE);
if(lidx == 0) {
local_queue_atomics[QUEUE_ACTIVE_AND_REGENERATED_RAYS] =
get_global_per_queue_offset(QUEUE_ACTIVE_AND_REGENERATED_RAYS,
local_queue_atomics,
Queue_index);
local_queue_atomics[QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS] =
get_global_per_queue_offset(QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
local_queue_atomics,
Queue_index);
}
barrier(CLK_LOCAL_MEM_FENCE);
unsigned int my_gqidx;
if(queue_number != -1) {
my_gqidx = get_global_queue_index(queue_number,
queuesize,
my_lqidx,
local_queue_atomics);
Queue_data[my_gqidx] = ray_index;
}
kernel_queue_enqueue(kg);
}

View File

@ -14,67 +14,13 @@
* limitations under the License.
*/
#include "kernel_compat_opencl.h"
#include "split/kernel_split_common.h"
#include "split/kernel_scene_intersect.h"
__kernel void kernel_ocl_path_trace_scene_intersect(
ccl_global char *kg,
ccl_constant KernelData *data,
ccl_global uint *rng_coop,
ccl_global Ray *Ray_coop, /* Required for scene_intersect */
ccl_global PathState *PathState_coop, /* Required for scene_intersect */
Intersection *Intersection_coop, /* Required for scene_intersect */
ccl_global char *ray_state, /* Denotes the state of each ray */
int sw, int sh,
ccl_global int *Queue_data, /* Memory for queues */
ccl_global int *Queue_index, /* Tracks the number of elements in queues */
int queuesize, /* Size (capacity) of queues */
ccl_global char *use_queues_flag, /* used to decide if this kernel should use
* queues to fetch ray index */
#ifdef __KERNEL_DEBUG__
DebugData *debugdata_coop,
#endif
int parallel_samples) /* Number of samples to be processed in parallel */
KernelGlobals *kg,
ccl_constant KernelData *data)
{
int x = get_global_id(0);
int y = get_global_id(1);
/* Fetch use_queues_flag */
ccl_local char local_use_queues_flag;
if(get_local_id(0) == 0 && get_local_id(1) == 0) {
local_use_queues_flag = use_queues_flag[0];
}
barrier(CLK_LOCAL_MEM_FENCE);
int ray_index;
if(local_use_queues_flag) {
int thread_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
ray_index = get_ray_index(thread_index,
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
Queue_data,
queuesize,
0);
if(ray_index == QUEUE_EMPTY_SLOT) {
return;
}
} else {
if(x < (sw * parallel_samples) && y < sh) {
ray_index = x + y * (sw * parallel_samples);
} else {
return;
}
}
kernel_scene_intersect((KernelGlobals *)kg,
rng_coop,
Ray_coop,
PathState_coop,
Intersection_coop,
ray_state,
sw, sh,
use_queues_flag,
#ifdef __KERNEL_DEBUG__
debugdata_coop,
#endif
ray_index);
kernel_scene_intersect(kg);
}

View File

@ -14,55 +14,13 @@
* limitations under the License.
*/
#include "kernel_compat_opencl.h"
#include "split/kernel_split_common.h"
#include "split/kernel_shader_eval.h"
__kernel void kernel_ocl_path_trace_shader_eval(
ccl_global char *kg,
ccl_constant KernelData *data,
ccl_global char *sd, /* Output ShaderData structure to be filled */
ccl_global uint *rng_coop, /* Required for rbsdf calculation */
ccl_global Ray *Ray_coop, /* Required for setting up shader from ray */
ccl_global PathState *PathState_coop, /* Required for all functions in this kernel */
Intersection *Intersection_coop, /* Required for setting up shader from ray */
ccl_global char *ray_state, /* Denotes the state of each ray */
ccl_global int *Queue_data, /* queue memory */
ccl_global int *Queue_index, /* Tracks the number of elements in each queue */
int queuesize) /* Size (capacity) of each queue */
KernelGlobals *kg,
ccl_constant KernelData *data)
{
/* Enqeueue RAY_TO_REGENERATE rays into QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS queue. */
ccl_local unsigned int local_queue_atomics;
if(get_local_id(0) == 0 && get_local_id(1) == 0) {
local_queue_atomics = 0;
}
barrier(CLK_LOCAL_MEM_FENCE);
int ray_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
ray_index = get_ray_index(ray_index,
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
Queue_data,
queuesize,
0);
if(ray_index == QUEUE_EMPTY_SLOT) {
return;
}
char enqueue_flag = (IS_STATE(ray_state, ray_index, RAY_TO_REGENERATE)) ? 1 : 0;
enqueue_ray_index_local(ray_index,
QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
enqueue_flag,
queuesize,
&local_queue_atomics,
Queue_data,
Queue_index);
/* Continue on with shader evaluation. */
kernel_shader_eval((KernelGlobals *)kg,
(ShaderData *)sd,
rng_coop,
Ray_coop,
PathState_coop,
Intersection_coop,
ray_state,
ray_index);
kernel_shader_eval(kg);
}

View File

@ -14,52 +14,13 @@
* limitations under the License.
*/
#include "kernel_compat_opencl.h"
#include "split/kernel_split_common.h"
#include "split/kernel_shadow_blocked.h"
__kernel void kernel_ocl_path_trace_shadow_blocked(
ccl_global char *kg,
ccl_constant KernelData *data,
ccl_global PathState *PathState_coop, /* Required for shadow blocked */
ccl_global Ray *LightRay_dl_coop, /* Required for direct lighting's shadow blocked */
ccl_global Ray *LightRay_ao_coop, /* Required for AO's shadow blocked */
ccl_global char *ray_state,
ccl_global int *Queue_data, /* Queue memory */
ccl_global int *Queue_index, /* Tracks the number of elements in each queue */
int queuesize) /* Size (capacity) of each queue */
KernelGlobals *kg,
ccl_constant KernelData *data)
{
int lidx = get_local_id(1) * get_local_id(0) + get_local_id(0);
ccl_local unsigned int ao_queue_length;
ccl_local unsigned int dl_queue_length;
if(lidx == 0) {
ao_queue_length = Queue_index[QUEUE_SHADOW_RAY_CAST_AO_RAYS];
dl_queue_length = Queue_index[QUEUE_SHADOW_RAY_CAST_DL_RAYS];
}
barrier(CLK_LOCAL_MEM_FENCE);
/* flag determining if the current ray is to process shadow ray for AO or DL */
char shadow_blocked_type = -1;
int ray_index = QUEUE_EMPTY_SLOT;
int thread_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
if(thread_index < ao_queue_length + dl_queue_length) {
if(thread_index < ao_queue_length) {
ray_index = get_ray_index(thread_index, QUEUE_SHADOW_RAY_CAST_AO_RAYS, Queue_data, queuesize, 1);
shadow_blocked_type = RAY_SHADOW_RAY_CAST_AO;
} else {
ray_index = get_ray_index(thread_index - ao_queue_length, QUEUE_SHADOW_RAY_CAST_DL_RAYS, Queue_data, queuesize, 1);
shadow_blocked_type = RAY_SHADOW_RAY_CAST_DL;
}
}
if(ray_index == QUEUE_EMPTY_SLOT)
return;
kernel_shadow_blocked((KernelGlobals *)kg,
PathState_coop,
LightRay_dl_coop,
LightRay_ao_coop,
ray_state,
shadow_blocked_type,
ray_index);
kernel_shadow_blocked(kg);
}

View File

@ -14,25 +14,13 @@
* limitations under the License.
*/
#include "kernel_compat_opencl.h"
#include "split/kernel_split_common.h"
#include "split/kernel_sum_all_radiance.h"
__kernel void kernel_ocl_path_trace_sum_all_radiance(
ccl_constant KernelData *data, /* To get pass_stride to offet into buffer */
ccl_global float *buffer, /* Output buffer of RenderTile */
ccl_global float *per_sample_output_buffer, /* Radiance contributed by all samples */
int parallel_samples, int sw, int sh, int stride,
int buffer_offset_x,
int buffer_offset_y,
int buffer_stride,
int start_sample)
KernelGlobals *kg,
ccl_constant KernelData *data)
{
kernel_sum_all_radiance(data,
buffer,
per_sample_output_buffer,
parallel_samples,
sw, sh, stride,
buffer_offset_x,
buffer_offset_y,
buffer_stride,
start_sample);
kernel_sum_all_radiance(kg);
}

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "kernel_split_common.h"
CCL_NAMESPACE_BEGIN
/* Note on kernel_background_buffer_update kernel.
* This is the fourth kernel in the ray tracing logic, and the third
@ -69,50 +69,61 @@
* QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE and RAY_REGENERATED rays
* QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be empty
*/
ccl_device char kernel_background_buffer_update(
KernelGlobals *kg,
ccl_global float *per_sample_output_buffers,
ccl_global uint *rng_state,
ccl_global uint *rng_coop, /* Required for buffer Update */
ccl_global float3 *throughput_coop, /* Required for background hit processing */
PathRadiance *PathRadiance_coop, /* Required for background hit processing and buffer Update */
ccl_global Ray *Ray_coop, /* Required for background hit processing */
ccl_global PathState *PathState_coop, /* Required for background hit processing */
ccl_global float *L_transparent_coop, /* Required for background hit processing and buffer Update */
ccl_global char *ray_state, /* Stores information on the current state of a ray */
int sw, int sh, int sx, int sy, int stride,
int rng_state_offset_x,
int rng_state_offset_y,
int rng_state_stride,
ccl_global unsigned int *work_array, /* Denotes work of each ray */
int end_sample,
int start_sample,
#ifdef __WORK_STEALING__
ccl_global unsigned int *work_pool_wgs,
unsigned int num_samples,
#endif
#ifdef __KERNEL_DEBUG__
DebugData *debugdata_coop,
#endif
int parallel_samples, /* Number of samples to be processed in parallel */
int ray_index)
ccl_device void kernel_background_buffer_update(KernelGlobals *kg)
{
char enqueue_flag = 0;
#ifdef __KERNEL_DEBUG__
DebugData *debug_data = &debugdata_coop[ray_index];
#endif
ccl_global PathState *state = &PathState_coop[ray_index];
PathRadiance *L = L = &PathRadiance_coop[ray_index];
ccl_global Ray *ray = &Ray_coop[ray_index];
ccl_global float3 *throughput = &throughput_coop[ray_index];
ccl_global float *L_transparent = &L_transparent_coop[ray_index];
ccl_global uint *rng = &rng_coop[ray_index];
ccl_local unsigned int local_queue_atomics;
if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
local_queue_atomics = 0;
}
ccl_barrier(CCL_LOCAL_MEM_FENCE);
#ifdef __WORK_STEALING__
unsigned int my_work;
ccl_global float *initial_per_sample_output_buffers;
ccl_global uint *initial_rng;
int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
if(ray_index == 0) {
/* We will empty this queue in this kernel. */
kernel_split_params.queue_index[QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS] = 0;
}
char enqueue_flag = 0;
ray_index = get_ray_index(kg, ray_index,
QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
kernel_split_state.queue_data,
kernel_split_params.queue_size,
1);
#ifdef __COMPUTE_DEVICE_GPU__
/* If we are executing on a GPU device, we exit all threads that are not
* required.
*
* If we are executing on a CPU device, then we need to keep all threads
* active since we have barrier() calls later in the kernel. CPU devices,
* expect all threads to execute barrier statement.
*/
if(ray_index == QUEUE_EMPTY_SLOT) {
return;
}
#endif
#ifndef __COMPUTE_DEVICE_GPU__
if(ray_index != QUEUE_EMPTY_SLOT) {
#endif
ccl_global uint *rng_state = kernel_split_params.rng_state;
int stride = kernel_split_params.stride;
ccl_global char *ray_state = kernel_split_state.ray_state;
#ifdef __KERNEL_DEBUG__
DebugData *debug_data = &kernel_split_state.debug_data[ray_index];
#endif
ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
ccl_global float *L_transparent = &kernel_split_state.L_transparent[ray_index];
ccl_global uint *rng = &kernel_split_state.rng[ray_index];
ccl_global float *per_sample_output_buffers = kernel_split_state.per_sample_output_buffers;
unsigned int work_index;
ccl_global uint *initial_rng;
unsigned int sample;
unsigned int tile_x;
unsigned int tile_y;
@ -120,29 +131,17 @@ ccl_device char kernel_background_buffer_update(
unsigned int pixel_y;
unsigned int my_sample_tile;
#ifdef __WORK_STEALING__
my_work = work_array[ray_index];
sample = get_my_sample(my_work, sw, sh, parallel_samples, ray_index) + start_sample;
get_pixel_tile_position(&pixel_x, &pixel_y,
work_index = kernel_split_state.work_array[ray_index];
sample = get_work_sample(kg, work_index, ray_index) + kernel_split_params.start_sample;
get_work_pixel_tile_position(kg, &pixel_x, &pixel_y,
&tile_x, &tile_y,
my_work,
sw, sh, sx, sy,
parallel_samples,
work_index,
ray_index);
my_sample_tile = 0;
initial_per_sample_output_buffers = per_sample_output_buffers;
initial_rng = rng_state;
#else /* __WORK_STEALING__ */
sample = work_array[ray_index];
int tile_index = ray_index / parallel_samples;
/* buffer and rng_state's stride is "stride". Find x and y using ray_index */
tile_x = tile_index % sw;
tile_y = tile_index / sw;
my_sample_tile = ray_index - (tile_index * parallel_samples);
#endif /* __WORK_STEALING__ */
rng_state += (rng_state_offset_x + tile_x) + (rng_state_offset_y + tile_y) * rng_state_stride;
per_sample_output_buffers += (((tile_x + (tile_y * stride)) * parallel_samples) + my_sample_tile) * kernel_data.film.pass_stride;
rng_state += kernel_split_params.offset + pixel_x + pixel_y*kernel_split_params.stride;
per_sample_output_buffers += ((tile_x + (tile_y * stride)) + my_sample_tile) * kernel_data.film.pass_stride;
if(IS_STATE(ray_state, ray_index, RAY_HIT_BACKGROUND)) {
/* eval background shader if nothing hit */
@ -157,7 +156,7 @@ ccl_device char kernel_background_buffer_update(
if(IS_STATE(ray_state, ray_index, RAY_HIT_BACKGROUND)) {
#ifdef __BACKGROUND__
/* sample background shader */
float3 L_background = indirect_background(kg, kg->sd_input, state, ray);
float3 L_background = indirect_background(kg, kernel_split_state.sd_DL_shadow, state, ray);
path_radiance_accum_background(L, (*throughput), L_background, state->bounce);
#endif
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER);
@ -180,41 +179,26 @@ ccl_device char kernel_background_buffer_update(
}
if(IS_STATE(ray_state, ray_index, RAY_TO_REGENERATE)) {
#ifdef __WORK_STEALING__
/* We have completed current work; So get next work */
int valid_work = get_next_work(work_pool_wgs, &my_work, sw, sh, num_samples, parallel_samples, ray_index);
int valid_work = get_next_work(kg, &work_index, ray_index);
if(!valid_work) {
/* If work is invalid, this means no more work is available and the thread may exit */
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_INACTIVE);
}
#else /* __WORK_STEALING__ */
if((sample + parallel_samples) >= end_sample) {
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_INACTIVE);
}
#endif /* __WORK_STEALING__ */
if(IS_STATE(ray_state, ray_index, RAY_TO_REGENERATE)) {
#ifdef __WORK_STEALING__
work_array[ray_index] = my_work;
kernel_split_state.work_array[ray_index] = work_index;
/* Get the sample associated with the current work */
sample = get_my_sample(my_work, sw, sh, parallel_samples, ray_index) + start_sample;
sample = get_work_sample(kg, work_index, ray_index) + kernel_split_params.start_sample;
/* Get pixel and tile position associated with current work */
get_pixel_tile_position(&pixel_x, &pixel_y, &tile_x, &tile_y, my_work, sw, sh, sx, sy, parallel_samples, ray_index);
get_work_pixel_tile_position(kg, &pixel_x, &pixel_y, &tile_x, &tile_y, work_index, ray_index);
my_sample_tile = 0;
/* Remap rng_state according to the current work */
rng_state = initial_rng + ((rng_state_offset_x + tile_x) + (rng_state_offset_y + tile_y) * rng_state_stride);
rng_state = initial_rng + kernel_split_params.offset + pixel_x + pixel_y*kernel_split_params.stride;
/* Remap per_sample_output_buffers according to the current work */
per_sample_output_buffers = initial_per_sample_output_buffers
+ (((tile_x + (tile_y * stride)) * parallel_samples) + my_sample_tile) * kernel_data.film.pass_stride;
#else /* __WORK_STEALING__ */
work_array[ray_index] = sample + parallel_samples;
sample = work_array[ray_index];
/* Get ray position from ray index */
pixel_x = sx + ((ray_index / parallel_samples) % sw);
pixel_y = sy + ((ray_index / parallel_samples) / sw);
#endif /* __WORK_STEALING__ */
per_sample_output_buffers = kernel_split_state.per_sample_output_buffers
+ ((tile_x + (tile_y * stride)) + my_sample_tile) * kernel_data.film.pass_stride;
/* Initialize random numbers and ray. */
kernel_path_trace_setup(kg, rng_state, sample, pixel_x, pixel_y, rng, ray);
@ -226,7 +210,7 @@ ccl_device char kernel_background_buffer_update(
*throughput = make_float3(1.0f, 1.0f, 1.0f);
*L_transparent = 0.0f;
path_radiance_init(L, kernel_data.film.use_light_pass);
path_state_init(kg, kg->sd_input, state, rng, sample, ray);
path_state_init(kg, kernel_split_state.sd_DL_shadow, state, rng, sample, ray);
#ifdef __KERNEL_DEBUG__
debug_data_init(debug_data);
#endif
@ -244,5 +228,22 @@ ccl_device char kernel_background_buffer_update(
}
}
}
return enqueue_flag;
#ifndef __COMPUTE_DEVICE_GPU__
}
#endif
/* Enqueue RAY_REGENERATED rays into QUEUE_ACTIVE_AND_REGENERATED_RAYS;
* These rays will be made active during next SceneIntersectkernel.
*/
enqueue_ray_index_local(ray_index,
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
enqueue_flag,
kernel_split_params.queue_size,
&local_queue_atomics,
kernel_split_state.queue_data,
kernel_split_params.queue_index);
}
CCL_NAMESPACE_END

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "kernel_split_common.h"
CCL_NAMESPACE_BEGIN
/* Note on kernel_data_initialization kernel
* This kernel Initializes structures needed in path-iteration kernels.
@ -50,72 +50,77 @@
* All slots in queues are initialized to queue empty slot;
* The number of elements in the queues is initialized to 0;
*/
ccl_device void kernel_data_init(
KernelGlobals *kg,
ShaderData *sd_DL_shadow,
ccl_constant KernelData *data,
ccl_global float *per_sample_output_buffers,
ccl_global void *split_data_buffer,
int num_elements,
ccl_global char *ray_state,
ccl_global uint *rng_state,
ccl_global uint *rng_coop, /* rng array to store rng values for all rays */
ccl_global float3 *throughput_coop, /* throughput array to store throughput values for all rays */
ccl_global float *L_transparent_coop, /* L_transparent array to store L_transparent values for all rays */
PathRadiance *PathRadiance_coop, /* PathRadiance array to store PathRadiance values for all rays */
ccl_global Ray *Ray_coop, /* Ray array to store Ray information for all rays */
ccl_global PathState *PathState_coop, /* PathState array to store PathState information for all rays */
Intersection *Intersection_coop_shadow,
ccl_global char *ray_state, /* Stores information on current state of a ray */
#ifdef __KERNEL_OPENCL__
#define KERNEL_TEX(type, ttype, name) \
ccl_global type *name,
#include "../kernel_textures.h"
#endif
int start_sample, int sx, int sy, int sw, int sh, int offset, int stride,
int rng_state_offset_x,
int rng_state_offset_y,
int rng_state_stride,
ccl_global int *Queue_data, /* Memory for queues */
int start_sample,
int end_sample,
int sx, int sy, int sw, int sh, int offset, int stride,
ccl_global int *Queue_index, /* Tracks the number of elements in queues */
int queuesize, /* size (capacity) of the queue */
ccl_global char *use_queues_flag, /* flag to decide if scene-intersect kernel should use queues to fetch ray index */
ccl_global unsigned int *work_array, /* work array to store which work each ray belongs to */
#ifdef __WORK_STEALING__
ccl_global unsigned int *work_pool_wgs, /* Work pool for each work group */
unsigned int num_samples, /* Total number of samples per pixel */
#endif
#ifdef __KERNEL_DEBUG__
DebugData *debugdata_coop,
#endif
int parallel_samples) /* Number of samples to be processed in parallel */
ccl_global unsigned int *work_pools, /* Work pool for each work group */
unsigned int num_samples,
ccl_global float *buffer)
{
#ifdef __KERNEL_OPENCL__
kg->data = data;
kg->sd_input = sd_DL_shadow;
kg->isect_shadow = Intersection_coop_shadow;
#endif
kernel_split_params.x = sx;
kernel_split_params.y = sy;
kernel_split_params.w = sw;
kernel_split_params.h = sh;
kernel_split_params.offset = offset;
kernel_split_params.stride = stride;
kernel_split_params.rng_state = rng_state;
kernel_split_params.start_sample = start_sample;
kernel_split_params.end_sample = end_sample;
kernel_split_params.work_pools = work_pools;
kernel_split_params.num_samples = num_samples;
kernel_split_params.queue_index = Queue_index;
kernel_split_params.queue_size = queuesize;
kernel_split_params.use_queues_flag = use_queues_flag;
kernel_split_params.buffer = buffer;
split_data_init(&kernel_split_state, num_elements, split_data_buffer, ray_state);
#ifdef __KERNEL_OPENCL__
#define KERNEL_TEX(type, ttype, name) \
kg->name = name;
#include "../kernel_textures.h"
#endif
int thread_index = get_global_id(1) * get_global_size(0) + get_global_id(0);
#ifdef __WORK_STEALING__
int lid = get_local_id(1) * get_local_size(0) + get_local_id(0);
/* Initialize work_pool_wgs */
if(lid == 0) {
int group_index = get_group_id(1) * get_num_groups(0) + get_group_id(0);
work_pool_wgs[group_index] = 0;
}
barrier(CLK_LOCAL_MEM_FENCE);
#endif /* __WORK_STEALING__ */
int thread_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
/* Initialize queue data and queue index. */
if(thread_index < queuesize) {
/* Initialize active ray queue. */
Queue_data[QUEUE_ACTIVE_AND_REGENERATED_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
kernel_split_state.queue_data[QUEUE_ACTIVE_AND_REGENERATED_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
/* Initialize background and buffer update queue. */
Queue_data[QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
kernel_split_state.queue_data[QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
/* Initialize shadow ray cast of AO queue. */
Queue_data[QUEUE_SHADOW_RAY_CAST_AO_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
kernel_split_state.queue_data[QUEUE_SHADOW_RAY_CAST_AO_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
/* Initialize shadow ray cast of direct lighting queue. */
Queue_data[QUEUE_SHADOW_RAY_CAST_DL_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
kernel_split_state.queue_data[QUEUE_SHADOW_RAY_CAST_DL_RAYS * queuesize + thread_index] = QUEUE_EMPTY_SLOT;
}
if(thread_index == 0) {
@ -126,109 +131,83 @@ ccl_device void kernel_data_init(
/* The scene-intersect kernel should not use the queues very first time.
* since the queue would be empty.
*/
use_queues_flag[0] = 0;
*use_queues_flag = 0;
}
int x = get_global_id(0);
int y = get_global_id(1);
int ray_index = ccl_global_id(0) + ccl_global_id(1) * ccl_global_size(0);
if(x < (sw * parallel_samples) && y < sh) {
int ray_index = x + y * (sw * parallel_samples);
/* This is the first assignment to ray_state;
* So we dont use ASSIGN_RAY_STATE macro.
*/
kernel_split_state.ray_state[ray_index] = RAY_ACTIVE;
/* This is the first assignment to ray_state;
* So we dont use ASSIGN_RAY_STATE macro.
unsigned int my_sample;
unsigned int pixel_x;
unsigned int pixel_y;
unsigned int tile_x;
unsigned int tile_y;
unsigned int my_sample_tile;
unsigned int work_index = 0;
/* Get work. */
if(!get_next_work(kg, &work_index, ray_index)) {
/* No more work, mark ray as inactive */
kernel_split_state.ray_state[ray_index] = RAY_INACTIVE;
return;
}
/* Get the sample associated with the work. */
my_sample = get_work_sample(kg, work_index, ray_index) + start_sample;
my_sample_tile = 0;
/* Get pixel and tile position associated with the work. */
get_work_pixel_tile_position(kg, &pixel_x, &pixel_y,
&tile_x, &tile_y,
work_index,
ray_index);
kernel_split_state.work_array[ray_index] = work_index;
rng_state += kernel_split_params.offset + pixel_x + pixel_y*stride;
ccl_global float *per_sample_output_buffers = kernel_split_state.per_sample_output_buffers;
per_sample_output_buffers += ((tile_x + (tile_y * stride)) + (my_sample_tile)) * kernel_data.film.pass_stride;
/* Initialize random numbers and ray. */
kernel_path_trace_setup(kg,
rng_state,
my_sample,
pixel_x, pixel_y,
&kernel_split_state.rng[ray_index],
&kernel_split_state.ray[ray_index]);
if(kernel_split_state.ray[ray_index].t != 0.0f) {
/* Initialize throughput, L_transparent, Ray, PathState;
* These rays proceed with path-iteration.
*/
ray_state[ray_index] = RAY_ACTIVE;
unsigned int my_sample;
unsigned int pixel_x;
unsigned int pixel_y;
unsigned int tile_x;
unsigned int tile_y;
unsigned int my_sample_tile;
#ifdef __WORK_STEALING__
unsigned int my_work = 0;
/* Get work. */
get_next_work(work_pool_wgs, &my_work, sw, sh, num_samples, parallel_samples, ray_index);
/* Get the sample associated with the work. */
my_sample = get_my_sample(my_work, sw, sh, parallel_samples, ray_index) + start_sample;
my_sample_tile = 0;
/* Get pixel and tile position associated with the work. */
get_pixel_tile_position(&pixel_x, &pixel_y,
&tile_x, &tile_y,
my_work,
sw, sh, sx, sy,
parallel_samples,
ray_index);
work_array[ray_index] = my_work;
#else /* __WORK_STEALING__ */
unsigned int tile_index = ray_index / parallel_samples;
tile_x = tile_index % sw;
tile_y = tile_index / sw;
my_sample_tile = ray_index - (tile_index * parallel_samples);
my_sample = my_sample_tile + start_sample;
/* Initialize work array. */
work_array[ray_index] = my_sample ;
/* Calculate pixel position of this ray. */
pixel_x = sx + tile_x;
pixel_y = sy + tile_y;
#endif /* __WORK_STEALING__ */
rng_state += (rng_state_offset_x + tile_x) + (rng_state_offset_y + tile_y) * rng_state_stride;
/* Initialise per_sample_output_buffers to all zeros. */
per_sample_output_buffers += (((tile_x + (tile_y * stride)) * parallel_samples) + (my_sample_tile)) * kernel_data.film.pass_stride;
int per_sample_output_buffers_iterator = 0;
for(per_sample_output_buffers_iterator = 0;
per_sample_output_buffers_iterator < kernel_data.film.pass_stride;
per_sample_output_buffers_iterator++)
{
per_sample_output_buffers[per_sample_output_buffers_iterator] = 0.0f;
}
/* Initialize random numbers and ray. */
kernel_path_trace_setup(kg,
rng_state,
my_sample,
pixel_x, pixel_y,
&rng_coop[ray_index],
&Ray_coop[ray_index]);
if(Ray_coop[ray_index].t != 0.0f) {
/* Initialize throughput, L_transparent, Ray, PathState;
* These rays proceed with path-iteration.
*/
throughput_coop[ray_index] = make_float3(1.0f, 1.0f, 1.0f);
L_transparent_coop[ray_index] = 0.0f;
path_radiance_init(&PathRadiance_coop[ray_index], kernel_data.film.use_light_pass);
path_state_init(kg,
kg->sd_input,
&PathState_coop[ray_index],
&rng_coop[ray_index],
my_sample,
&Ray_coop[ray_index]);
kernel_split_state.throughput[ray_index] = make_float3(1.0f, 1.0f, 1.0f);
kernel_split_state.L_transparent[ray_index] = 0.0f;
path_radiance_init(&kernel_split_state.path_radiance[ray_index], kernel_data.film.use_light_pass);
path_state_init(kg,
kernel_split_state.sd_DL_shadow,
&kernel_split_state.path_state[ray_index],
&kernel_split_state.rng[ray_index],
my_sample,
&kernel_split_state.ray[ray_index]);
#ifdef __KERNEL_DEBUG__
debug_data_init(&debugdata_coop[ray_index]);
debug_data_init(&kernel_split_state.debug_data[ray_index]);
#endif
}
else {
/* These rays do not participate in path-iteration. */
float4 L_rad = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
/* Accumulate result in output buffer. */
kernel_write_pass_float4(per_sample_output_buffers, my_sample, L_rad);
path_rng_end(kg, rng_state, rng_coop[ray_index]);
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_TO_REGENERATE);
}
}
/* Mark rest of the ray-state indices as RAY_INACTIVE. */
if(thread_index < (get_global_size(0) * get_global_size(1)) - (sh * (sw * parallel_samples))) {
/* First assignment, hence we dont use ASSIGN_RAY_STATE macro */
ray_state[((sw * parallel_samples) * sh) + thread_index] = RAY_INACTIVE;
else {
/* These rays do not participate in path-iteration. */
float4 L_rad = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
/* Accumulate result in output buffer. */
kernel_write_pass_float4(per_sample_output_buffers, my_sample, L_rad);
path_rng_end(kg, rng_state, kernel_split_state.rng[ray_index]);
ASSIGN_RAY_STATE(kernel_split_state.ray_state, ray_index, RAY_TO_REGENERATE);
}
}
CCL_NAMESPACE_END

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "kernel_split_common.h"
CCL_NAMESPACE_BEGIN
/* Note on kernel_direct_lighting kernel.
* This is the eighth kernel in the ray tracing logic. This is the seventh
@ -47,20 +47,42 @@
* QUEUE_SHADOW_RAY_CAST_DL_RAYS queue will be filled with rays for which a shadow_blocked function must be executed, after this
* kernel call. Before this kernel call the QUEUE_SHADOW_RAY_CAST_DL_RAYS will be empty.
*/
ccl_device char kernel_direct_lighting(
KernelGlobals *kg,
ShaderData *sd, /* Required for direct lighting */
ccl_global uint *rng_coop, /* Required for direct lighting */
ccl_global PathState *PathState_coop, /* Required for direct lighting */
ccl_global int *ISLamp_coop, /* Required for direct lighting */
ccl_global Ray *LightRay_coop, /* Required for direct lighting */
ccl_global BsdfEval *BSDFEval_coop, /* Required for direct lighting */
ccl_global char *ray_state, /* Denotes the state of each ray */
int ray_index)
ccl_device void kernel_direct_lighting(KernelGlobals *kg)
{
ccl_local unsigned int local_queue_atomics;
if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
local_queue_atomics = 0;
}
ccl_barrier(CCL_LOCAL_MEM_FENCE);
char enqueue_flag = 0;
if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
ccl_global PathState *state = &PathState_coop[ray_index];
int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
ray_index = get_ray_index(kg, ray_index,
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
kernel_split_state.queue_data,
kernel_split_params.queue_size,
0);
#ifdef __COMPUTE_DEVICE_GPU__
/* If we are executing on a GPU device, we exit all threads that are not
* required.
*
* If we are executing on a CPU device, then we need to keep all threads
* active since we have barrier() calls later in the kernel. CPU devices,
* expect all threads to execute barrier statement.
*/
if(ray_index == QUEUE_EMPTY_SLOT) {
return;
}
#endif
#ifndef __COMPUTE_DEVICE_GPU__
if(ray_index != QUEUE_EMPTY_SLOT) {
#endif
if(IS_STATE(kernel_split_state.ray_state, ray_index, RAY_ACTIVE)) {
ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
ShaderData *sd = kernel_split_state.sd;
/* direct lighting */
#ifdef __EMISSION__
@ -68,7 +90,7 @@ ccl_device char kernel_direct_lighting(
(ccl_fetch(sd, flag) & SD_BSDF_HAS_EVAL)))
{
/* Sample illumination from lights to find path contribution. */
ccl_global RNG* rng = &rng_coop[ray_index];
ccl_global RNG* rng = &kernel_split_state.rng[ray_index];
float light_t = path_state_rng_1D(kg, rng, state, PRNG_LIGHT);
float light_u, light_v;
path_state_rng_2D(kg, rng, state, PRNG_LIGHT_U, &light_u, &light_v);
@ -89,20 +111,36 @@ ccl_device char kernel_direct_lighting(
BsdfEval L_light;
bool is_lamp;
if(direct_emission(kg, sd, kg->sd_input, &ls, state, &light_ray, &L_light, &is_lamp, terminate)) {
if(direct_emission(kg, sd, kernel_split_state.sd_DL_shadow, &ls, state, &light_ray, &L_light, &is_lamp, terminate)) {
/* Write intermediate data to global memory to access from
* the next kernel.
*/
LightRay_coop[ray_index] = light_ray;
BSDFEval_coop[ray_index] = L_light;
ISLamp_coop[ray_index] = is_lamp;
kernel_split_state.light_ray[ray_index] = light_ray;
kernel_split_state.bsdf_eval[ray_index] = L_light;
kernel_split_state.is_lamp[ray_index] = is_lamp;
/* Mark ray state for next shadow kernel. */
ADD_RAY_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_DL);
ADD_RAY_FLAG(kernel_split_state.ray_state, ray_index, RAY_SHADOW_RAY_CAST_DL);
enqueue_flag = 1;
}
}
}
#endif /* __EMISSION__ */
}
return enqueue_flag;
#ifndef __COMPUTE_DEVICE_GPU__
}
#endif
#ifdef __EMISSION__
/* Enqueue RAY_SHADOW_RAY_CAST_DL rays. */
enqueue_ray_index_local(ray_index,
QUEUE_SHADOW_RAY_CAST_DL_RAYS,
enqueue_flag,
kernel_split_params.queue_size,
&local_queue_atomics,
kernel_split_state.queue_data,
kernel_split_params.queue_index);
#endif
}
CCL_NAMESPACE_END

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "kernel_split_common.h"
CCL_NAMESPACE_BEGIN
/* Note on kernel_holdout_emission_blurring_pathtermination_ao kernel.
* This is the sixth kernel in the ray tracing logic. This is the fifth
@ -70,35 +70,48 @@
* QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with RAY_TO_REGENERATE and RAY_UPDATE_BUFFER rays
* QUEUE_SHADOW_RAY_CAST_AO_RAYS will be filled with rays marked with flag RAY_SHADOW_RAY_CAST_AO
*/
ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
KernelGlobals *kg,
ShaderData *sd, /* Required throughout the kernel except probabilistic path termination and AO */
ccl_global float *per_sample_output_buffers,
ccl_global uint *rng_coop, /* Required for "kernel_write_data_passes" and AO */
ccl_global float3 *throughput_coop, /* Required for handling holdout material and AO */
ccl_global float *L_transparent_coop, /* Required for handling holdout material */
PathRadiance *PathRadiance_coop, /* Required for "kernel_write_data_passes" and indirect primitive emission */
ccl_global PathState *PathState_coop, /* Required throughout the kernel and AO */
Intersection *Intersection_coop, /* Required for indirect primitive emission */
ccl_global float3 *AOAlpha_coop, /* Required for AO */
ccl_global float3 *AOBSDF_coop, /* Required for AO */
ccl_global Ray *AOLightRay_coop, /* Required for AO */
int sw, int sh, int sx, int sy, int stride,
ccl_global char *ray_state, /* Denotes the state of each ray */
ccl_global unsigned int *work_array, /* Denotes the work that each ray belongs to */
#ifdef __WORK_STEALING__
unsigned int start_sample,
#endif
int parallel_samples, /* Number of samples to be processed in parallel */
int ray_index,
char *enqueue_flag,
char *enqueue_flag_AO_SHADOW_RAY_CAST)
ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(KernelGlobals *kg)
{
#ifdef __WORK_STEALING__
unsigned int my_work;
ccl_local unsigned int local_queue_atomics_bg;
ccl_local unsigned int local_queue_atomics_ao;
if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
local_queue_atomics_bg = 0;
local_queue_atomics_ao = 0;
}
ccl_barrier(CCL_LOCAL_MEM_FENCE);
char enqueue_flag = 0;
char enqueue_flag_AO_SHADOW_RAY_CAST = 0;
int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
ray_index = get_ray_index(kg, ray_index,
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
kernel_split_state.queue_data,
kernel_split_params.queue_size,
0);
#ifdef __COMPUTE_DEVICE_GPU__
/* If we are executing on a GPU device, we exit all threads that are not
* required.
*
* If we are executing on a CPU device, then we need to keep all threads
* active since we have barrier() calls later in the kernel. CPU devices,
* expect all threads to execute barrier statement.
*/
if(ray_index == QUEUE_EMPTY_SLOT) {
return;
}
#endif /* __COMPUTE_DEVICE_GPU__ */
#ifndef __COMPUTE_DEVICE_GPU__
if(ray_index != QUEUE_EMPTY_SLOT) {
#endif
int stride = kernel_split_params.stride;
unsigned int work_index;
unsigned int pixel_x;
unsigned int pixel_y;
#endif
unsigned int tile_x;
unsigned int tile_y;
int my_sample_tile;
@ -108,31 +121,26 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
ccl_global PathState *state = 0x0;
float3 throughput;
ccl_global char *ray_state = kernel_split_state.ray_state;
ShaderData *sd = kernel_split_state.sd;
ccl_global float *per_sample_output_buffers = kernel_split_state.per_sample_output_buffers;
if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
throughput = throughput_coop[ray_index];
state = &PathState_coop[ray_index];
rng = &rng_coop[ray_index];
#ifdef __WORK_STEALING__
my_work = work_array[ray_index];
sample = get_my_sample(my_work, sw, sh, parallel_samples, ray_index) + start_sample;
get_pixel_tile_position(&pixel_x, &pixel_y,
throughput = kernel_split_state.throughput[ray_index];
state = &kernel_split_state.path_state[ray_index];
rng = &kernel_split_state.rng[ray_index];
work_index = kernel_split_state.work_array[ray_index];
sample = get_work_sample(kg, work_index, ray_index) + kernel_split_params.start_sample;
get_work_pixel_tile_position(kg, &pixel_x, &pixel_y,
&tile_x, &tile_y,
my_work,
sw, sh, sx, sy,
parallel_samples,
work_index,
ray_index);
my_sample_tile = 0;
#else /* __WORK_STEALING__ */
sample = work_array[ray_index];
/* Buffer's stride is "stride"; Find x and y using ray_index. */
int tile_index = ray_index / parallel_samples;
tile_x = tile_index % sw;
tile_y = tile_index / sw;
my_sample_tile = ray_index - (tile_index * parallel_samples);
#endif /* __WORK_STEALING__ */
per_sample_output_buffers +=
(((tile_x + (tile_y * stride)) * parallel_samples) + my_sample_tile) *
((tile_x + (tile_y * stride)) + my_sample_tile) *
kernel_data.film.pass_stride;
/* holdout */
@ -150,18 +158,18 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
holdout_weight = shader_holdout_eval(kg, sd);
}
/* any throughput is ok, should all be identical here */
L_transparent_coop[ray_index] += average(holdout_weight*throughput);
kernel_split_state.L_transparent[ray_index] += average(holdout_weight*throughput);
}
if(ccl_fetch(sd, object_flag) & SD_OBJECT_HOLDOUT_MASK) {
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER);
*enqueue_flag = 1;
enqueue_flag = 1;
}
}
#endif /* __HOLDOUT__ */
}
if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
PathRadiance *L = &PathRadiance_coop[ray_index];
PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
/* Holdout mask objects do not write data passes. */
kernel_write_data_passes(kg,
per_sample_output_buffers,
@ -188,7 +196,7 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
float3 emission = indirect_primitive_emission(
kg,
sd,
Intersection_coop[ray_index].t,
kernel_split_state.isect[ray_index].t,
state->flag,
state->ray_pdf);
path_radiance_accum_emission(L, throughput, emission, state->bounce);
@ -203,7 +211,7 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
if(probability == 0.0f) {
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER);
*enqueue_flag = 1;
enqueue_flag = 1;
}
if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
@ -211,10 +219,10 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
float terminate = path_state_rng_1D_for_decision(kg, rng, state, PRNG_TERMINATE);
if(terminate >= probability) {
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER);
*enqueue_flag = 1;
enqueue_flag = 1;
}
else {
throughput_coop[ray_index] = throughput/probability;
kernel_split_state.throughput[ray_index] = throughput/probability;
}
}
}
@ -232,8 +240,8 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
float ao_factor = kernel_data.background.ao_factor;
float3 ao_N;
AOBSDF_coop[ray_index] = shader_bsdf_ao(kg, sd, ao_factor, &ao_N);
AOAlpha_coop[ray_index] = shader_bsdf_alpha(kg, sd);
kernel_split_state.ao_bsdf[ray_index] = shader_bsdf_ao(kg, sd, ao_factor, &ao_N);
kernel_split_state.ao_alpha[ray_index] = shader_bsdf_alpha(kg, sd);
float3 ao_D;
float ao_pdf;
@ -249,12 +257,39 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
#endif
_ray.dP = ccl_fetch(sd, dP);
_ray.dD = differential3_zero();
AOLightRay_coop[ray_index] = _ray;
kernel_split_state.ao_light_ray[ray_index] = _ray;
ADD_RAY_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_AO);
*enqueue_flag_AO_SHADOW_RAY_CAST = 1;
enqueue_flag_AO_SHADOW_RAY_CAST = 1;
}
}
}
#endif /* __AO__ */
#ifndef __COMPUTE_DEVICE_GPU__
}
#endif
/* Enqueue RAY_UPDATE_BUFFER rays. */
enqueue_ray_index_local(ray_index,
QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
enqueue_flag,
kernel_split_params.queue_size,
&local_queue_atomics_bg,
kernel_split_state.queue_data,
kernel_split_params.queue_index);
#ifdef __AO__
/* Enqueue to-shadow-ray-cast rays. */
enqueue_ray_index_local(ray_index,
QUEUE_SHADOW_RAY_CAST_AO_RAYS,
enqueue_flag_AO_SHADOW_RAY_CAST,
kernel_split_params.queue_size,
&local_queue_atomics_ao,
kernel_split_state.queue_data,
kernel_split_params.queue_index);
#endif
}
CCL_NAMESPACE_END

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "kernel_split_common.h"
CCL_NAMESPACE_BEGIN
/* Note on kernel_lamp_emission
* This is the 3rd kernel in the ray-tracing logic. This is the second of the
@ -36,28 +36,39 @@
* sw -------------------------------------------------| |
* sh -------------------------------------------------| |
*/
ccl_device void kernel_lamp_emission(
KernelGlobals *kg,
ccl_global float3 *throughput_coop, /* Required for lamp emission */
PathRadiance *PathRadiance_coop, /* Required for lamp emission */
ccl_global Ray *Ray_coop, /* Required for lamp emission */
ccl_global PathState *PathState_coop, /* Required for lamp emission */
Intersection *Intersection_coop, /* Required for lamp emission */
ccl_global char *ray_state, /* Denotes the state of each ray */
int sw, int sh,
ccl_global char *use_queues_flag, /* Used to decide if this kernel should use
* queues to fetch ray index
*/
int ray_index)
ccl_device void kernel_lamp_emission(KernelGlobals *kg)
{
if(IS_STATE(ray_state, ray_index, RAY_ACTIVE) ||
IS_STATE(ray_state, ray_index, RAY_HIT_BACKGROUND))
{
PathRadiance *L = &PathRadiance_coop[ray_index];
ccl_global PathState *state = &PathState_coop[ray_index];
/* We will empty this queue in this kernel. */
if(ccl_global_id(0) == 0 && ccl_global_id(1) == 0) {
kernel_split_params.queue_index[QUEUE_ACTIVE_AND_REGENERATED_RAYS] = 0;
}
/* Fetch use_queues_flag. */
ccl_local char local_use_queues_flag;
if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
local_use_queues_flag = *kernel_split_params.use_queues_flag;
}
ccl_barrier(CCL_LOCAL_MEM_FENCE);
float3 throughput = throughput_coop[ray_index];
Ray ray = Ray_coop[ray_index];
int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
if(local_use_queues_flag) {
ray_index = get_ray_index(kg, ray_index,
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
kernel_split_state.queue_data,
kernel_split_params.queue_size,
1);
if(ray_index == QUEUE_EMPTY_SLOT) {
return;
}
}
if(IS_STATE(kernel_split_state.ray_state, ray_index, RAY_ACTIVE) ||
IS_STATE(kernel_split_state.ray_state, ray_index, RAY_HIT_BACKGROUND))
{
PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
float3 throughput = kernel_split_state.throughput[ray_index];
Ray ray = kernel_split_state.ray[ray_index];
#ifdef __LAMP_MIS__
if(kernel_data.integrator.use_lamp_mis && !(state->flag & PATH_RAY_CAMERA)) {
@ -65,7 +76,7 @@ ccl_device void kernel_lamp_emission(
Ray light_ray;
light_ray.P = ray.P - state->ray_t*ray.D;
state->ray_t += Intersection_coop[ray_index].t;
state->ray_t += kernel_split_state.isect[ray_index].t;
light_ray.D = ray.D;
light_ray.t = state->ray_t;
light_ray.time = ray.time;
@ -74,10 +85,13 @@ ccl_device void kernel_lamp_emission(
/* intersect with lamp */
float3 emission;
if(indirect_lamp_emission(kg, kg->sd_input, state, &light_ray, &emission)) {
if(indirect_lamp_emission(kg, kernel_split_state.sd_DL_shadow, state, &light_ray, &emission)) {
path_radiance_accum_emission(L, throughput, emission, state->bounce);
}
}
#endif /* __LAMP_MIS__ */
}
}
CCL_NAMESPACE_END

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "kernel_split_common.h"
CCL_NAMESPACE_BEGIN
/* Note on kernel_setup_next_iteration kernel.
* This is the tenth kernel in the ray tracing logic. This is the ninth
@ -59,47 +59,76 @@
* QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE, RAY_REGENERATED and more RAY_UPDATE_BUFFER rays.
* QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with RAY_TO_REGENERATE and more RAY_UPDATE_BUFFER rays
*/
ccl_device char kernel_next_iteration_setup(
KernelGlobals *kg,
ShaderData *sd, /* Required for setting up ray for next iteration */
ccl_global uint *rng_coop, /* Required for setting up ray for next iteration */
ccl_global float3 *throughput_coop, /* Required for setting up ray for next iteration */
PathRadiance *PathRadiance_coop, /* Required for setting up ray for next iteration */
ccl_global Ray *Ray_coop, /* Required for setting up ray for next iteration */
ccl_global PathState *PathState_coop, /* Required for setting up ray for next iteration */
ccl_global Ray *LightRay_dl_coop, /* Required for radiance update - direct lighting */
ccl_global int *ISLamp_coop, /* Required for radiance update - direct lighting */
ccl_global BsdfEval *BSDFEval_coop, /* Required for radiance update - direct lighting */
ccl_global Ray *LightRay_ao_coop, /* Required for radiance update - AO */
ccl_global float3 *AOBSDF_coop, /* Required for radiance update - AO */
ccl_global float3 *AOAlpha_coop, /* Required for radiance update - AO */
ccl_global char *ray_state, /* Denotes the state of each ray */
ccl_global char *use_queues_flag, /* flag to decide if scene_intersect kernel should
* use queues to fetch ray index */
int ray_index)
ccl_device void kernel_next_iteration_setup(KernelGlobals *kg)
{
ccl_local unsigned int local_queue_atomics;
if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
local_queue_atomics = 0;
}
ccl_barrier(CCL_LOCAL_MEM_FENCE);
if(ccl_global_id(0) == 0 && ccl_global_id(1) == 0) {
/* If we are here, then it means that scene-intersect kernel
* has already been executed atleast once. From the next time,
* scene-intersect kernel may operate on queues to fetch ray index
*/
*kernel_split_params.use_queues_flag = 1;
/* Mark queue indices of QUEUE_SHADOW_RAY_CAST_AO_RAYS and
* QUEUE_SHADOW_RAY_CAST_DL_RAYS queues that were made empty during the
* previous kernel.
*/
kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_AO_RAYS] = 0;
kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_DL_RAYS] = 0;
}
char enqueue_flag = 0;
int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
ray_index = get_ray_index(kg, ray_index,
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
kernel_split_state.queue_data,
kernel_split_params.queue_size,
0);
#ifdef __COMPUTE_DEVICE_GPU__
/* If we are executing on a GPU device, we exit all threads that are not
* required.
*
* If we are executing on a CPU device, then we need to keep all threads
* active since we have barrier() calls later in the kernel. CPU devices,
* expect all threads to execute barrier statement.
*/
if(ray_index == QUEUE_EMPTY_SLOT) {
return;
}
#endif
#ifndef __COMPUTE_DEVICE_GPU__
if(ray_index != QUEUE_EMPTY_SLOT) {
#endif
/* Load ShaderData structure. */
PathRadiance *L = NULL;
ccl_global PathState *state = NULL;
ccl_global char *ray_state = kernel_split_state.ray_state;
/* Path radiance update for AO/Direct_lighting's shadow blocked. */
if(IS_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_DL) ||
IS_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_AO))
{
state = &PathState_coop[ray_index];
L = &PathRadiance_coop[ray_index];
float3 _throughput = throughput_coop[ray_index];
state = &kernel_split_state.path_state[ray_index];
L = &kernel_split_state.path_radiance[ray_index];
float3 _throughput = kernel_split_state.throughput[ray_index];
if(IS_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_AO)) {
float3 shadow = LightRay_ao_coop[ray_index].P;
char update_path_radiance = LightRay_ao_coop[ray_index].t;
float3 shadow = kernel_split_state.ao_light_ray[ray_index].P;
// TODO(mai): investigate correctness here
char update_path_radiance = (char)kernel_split_state.ao_light_ray[ray_index].t;
if(update_path_radiance) {
path_radiance_accum_ao(L,
_throughput,
AOAlpha_coop[ray_index],
AOBSDF_coop[ray_index],
kernel_split_state.ao_alpha[ray_index],
kernel_split_state.ao_bsdf[ray_index],
shadow,
state->bounce);
}
@ -107,35 +136,50 @@ ccl_device char kernel_next_iteration_setup(
}
if(IS_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_DL)) {
float3 shadow = LightRay_dl_coop[ray_index].P;
char update_path_radiance = LightRay_dl_coop[ray_index].t;
float3 shadow = kernel_split_state.light_ray[ray_index].P;
// TODO(mai): investigate correctness here
char update_path_radiance = (char)kernel_split_state.light_ray[ray_index].t;
if(update_path_radiance) {
BsdfEval L_light = BSDFEval_coop[ray_index];
BsdfEval L_light = kernel_split_state.bsdf_eval[ray_index];
path_radiance_accum_light(L,
_throughput,
&L_light,
shadow,
1.0f,
state->bounce,
ISLamp_coop[ray_index]);
kernel_split_state.is_lamp[ray_index]);
}
REMOVE_RAY_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_DL);
}
}
if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
ccl_global float3 *throughput = &throughput_coop[ray_index];
ccl_global Ray *ray = &Ray_coop[ray_index];
ccl_global RNG *rng = &rng_coop[ray_index];
state = &PathState_coop[ray_index];
L = &PathRadiance_coop[ray_index];
ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
ccl_global RNG *rng = &kernel_split_state.rng[ray_index];
state = &kernel_split_state.path_state[ray_index];
L = &kernel_split_state.path_radiance[ray_index];
/* Compute direct lighting and next bounce. */
if(!kernel_path_surface_bounce(kg, rng, sd, throughput, state, L, ray)) {
if(!kernel_path_surface_bounce(kg, rng, kernel_split_state.sd, throughput, state, L, ray)) {
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER);
enqueue_flag = 1;
}
}
return enqueue_flag;
#ifndef __COMPUTE_DEVICE_GPU__
}
#endif
/* Enqueue RAY_UPDATE_BUFFER rays. */
enqueue_ray_index_local(ray_index,
QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
enqueue_flag,
kernel_split_params.queue_size,
&local_queue_atomics,
kernel_split_state.queue_data,
kernel_split_params.queue_index);
}
CCL_NAMESPACE_END

View File

@ -0,0 +1,102 @@
/*
* Copyright 2011-2016 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CCL_NAMESPACE_BEGIN
/*
* The kernel "kernel_queue_enqueue" enqueues rays of
* different ray state into their appropriate Queues;
* 1. Rays that have been determined to hit the background from the
* "kernel_scene_intersect" kernel
* are enqueued in QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS;
* 2. Rays that have been determined to be actively participating in path-iteration will be enqueued into QUEUE_ACTIVE_AND_REGENERATED_RAYS.
*
* The input and output of the kernel is as follows,
*
* ray_state -------------------------------------------|--- kernel_queue_enqueue --|--- Queue_data (QUEUE_ACTIVE_AND_REGENERATED_RAYS & QUEUE_HITBF_BUFF_UPDATE_TOREGEN_RAYS)
* Queue_index(QUEUE_ACTIVE_AND_REGENERATED_RAYS) ------| |--- Queue_index (QUEUE_ACTIVE_AND_REGENERATED_RAYS & QUEUE_HITBF_BUFF_UPDATE_TOREGEN_RAYS)
* Queue_index(QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS) ---| |
* queuesize -------------------------------------------| |
*
* Note on Queues :
* State of queues during the first time this kernel is called :
* At entry,
* Both QUEUE_ACTIVE_AND_REGENERATED_RAYS and QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be empty.
* At exit,
* QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE rays
* QUEUE_HITBF_BUFF_UPDATE_TOREGEN_RAYS will be filled with RAY_HIT_BACKGROUND rays.
*
* State of queue during other times this kernel is called :
* At entry,
* QUEUE_ACTIVE_AND_REGENERATED_RAYS will be empty.
* QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will contain RAY_TO_REGENERATE and RAY_UPDATE_BUFFER rays.
* At exit,
* QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE rays.
* QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with RAY_TO_REGENERATE, RAY_UPDATE_BUFFER, RAY_HIT_BACKGROUND rays.
*/
ccl_device void kernel_queue_enqueue(KernelGlobals *kg)
{
/* We have only 2 cases (Hit/Not-Hit) */
ccl_local unsigned int local_queue_atomics[2];
int lidx = ccl_local_id(1) * ccl_local_size(0) + ccl_local_id(0);
int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
if(lidx == 0) {
local_queue_atomics[0] = 0;
local_queue_atomics[1] = 0;
}
ccl_barrier(CCL_LOCAL_MEM_FENCE);
int queue_number = -1;
if(IS_STATE(kernel_split_state.ray_state, ray_index, RAY_HIT_BACKGROUND)) {
queue_number = QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS;
}
else if(IS_STATE(kernel_split_state.ray_state, ray_index, RAY_ACTIVE)) {
queue_number = QUEUE_ACTIVE_AND_REGENERATED_RAYS;
}
unsigned int my_lqidx;
if(queue_number != -1) {
my_lqidx = get_local_queue_index(queue_number, local_queue_atomics);
}
ccl_barrier(CCL_LOCAL_MEM_FENCE);
if(lidx == 0) {
local_queue_atomics[QUEUE_ACTIVE_AND_REGENERATED_RAYS] =
get_global_per_queue_offset(QUEUE_ACTIVE_AND_REGENERATED_RAYS,
local_queue_atomics,
kernel_split_params.queue_index);
local_queue_atomics[QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS] =
get_global_per_queue_offset(QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
local_queue_atomics,
kernel_split_params.queue_index);
}
ccl_barrier(CCL_LOCAL_MEM_FENCE);
unsigned int my_gqidx;
if(queue_number != -1) {
my_gqidx = get_global_queue_index(queue_number,
kernel_split_params.queue_size,
my_lqidx,
local_queue_atomics);
kernel_split_state.queue_data[my_gqidx] = ray_index;
}
}
CCL_NAMESPACE_END

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "kernel_split_common.h"
CCL_NAMESPACE_BEGIN
/* Note on kernel_scene_intersect kernel.
* This is the second kernel in the ray tracing logic. This is the first
@ -61,34 +61,41 @@
* QUEUE_HITBF_BUFF_UPDATE_TOREGEN_RAYS - no change
*/
ccl_device void kernel_scene_intersect(
KernelGlobals *kg,
ccl_global uint *rng_coop,
ccl_global Ray *Ray_coop, /* Required for scene_intersect */
ccl_global PathState *PathState_coop, /* Required for scene_intersect */
Intersection *Intersection_coop, /* Required for scene_intersect */
ccl_global char *ray_state, /* Denotes the state of each ray */
int sw, int sh,
ccl_global char *use_queues_flag, /* used to decide if this kernel should use
* queues to fetch ray index */
#ifdef __KERNEL_DEBUG__
DebugData *debugdata_coop,
#endif
int ray_index)
ccl_device void kernel_scene_intersect(KernelGlobals *kg)
{
/* All regenerated rays become active here */
if(IS_STATE(ray_state, ray_index, RAY_REGENERATED))
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_ACTIVE);
/* Fetch use_queues_flag */
ccl_local char local_use_queues_flag;
if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
local_use_queues_flag = *kernel_split_params.use_queues_flag;
}
ccl_barrier(CCL_LOCAL_MEM_FENCE);
if(!IS_STATE(ray_state, ray_index, RAY_ACTIVE))
int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
if(local_use_queues_flag) {
ray_index = get_ray_index(kg, ray_index,
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
kernel_split_state.queue_data,
kernel_split_params.queue_size,
0);
if(ray_index == QUEUE_EMPTY_SLOT) {
return;
}
}
/* All regenerated rays become active here */
if(IS_STATE(kernel_split_state.ray_state, ray_index, RAY_REGENERATED))
ASSIGN_RAY_STATE(kernel_split_state.ray_state, ray_index, RAY_ACTIVE);
if(!IS_STATE(kernel_split_state.ray_state, ray_index, RAY_ACTIVE))
return;
#ifdef __KERNEL_DEBUG__
DebugData *debug_data = &debugdata_coop[ray_index];
DebugData *debug_data = &kernel_split_state.debug_data[ray_index];
#endif
Intersection *isect = &Intersection_coop[ray_index];
PathState state = PathState_coop[ray_index];
Ray ray = Ray_coop[ray_index];
Intersection *isect = &kernel_split_state.isect[ray_index];
PathState state = kernel_split_state.path_state[ray_index];
Ray ray = kernel_split_state.ray[ray_index];
/* intersect scene */
uint visibility = path_state_ray_visibility(kg, &state);
@ -96,7 +103,7 @@ ccl_device void kernel_scene_intersect(
#ifdef __HAIR__
float difl = 0.0f, extmax = 0.0f;
uint lcg_state = 0;
RNG rng = rng_coop[ray_index];
RNG rng = kernel_split_state.rng[ray_index];
if(kernel_data.bvh.have_curves) {
if((kernel_data.cam.resolution == 1) && (state.flag & PATH_RAY_CAMERA)) {
@ -128,6 +135,9 @@ ccl_device void kernel_scene_intersect(
* These rays undergo special processing in the
* background_bufferUpdate kernel.
*/
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_HIT_BACKGROUND);
ASSIGN_RAY_STATE(kernel_split_state.ray_state, ray_index, RAY_HIT_BACKGROUND);
}
}
CCL_NAMESPACE_END

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "kernel_split_common.h"
CCL_NAMESPACE_BEGIN
/* Note on kernel_shader_eval kernel
* This kernel is the 5th kernel in the ray tracing logic. This is
@ -44,27 +44,51 @@
* QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE and RAY_REGENERATED rays
* QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with RAY_TO_REGENERATE rays
*/
ccl_device void kernel_shader_eval(
KernelGlobals *kg,
ShaderData *sd, /* Output ShaderData structure to be filled */
ccl_global uint *rng_coop, /* Required for rbsdf calculation */
ccl_global Ray *Ray_coop, /* Required for setting up shader from ray */
ccl_global PathState *PathState_coop, /* Required for all functions in this kernel */
Intersection *Intersection_coop, /* Required for setting up shader from ray */
ccl_global char *ray_state, /* Denotes the state of each ray */
int ray_index)
ccl_device void kernel_shader_eval(KernelGlobals *kg)
{
if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
Intersection *isect = &Intersection_coop[ray_index];
ccl_global uint *rng = &rng_coop[ray_index];
ccl_global PathState *state = &PathState_coop[ray_index];
Ray ray = Ray_coop[ray_index];
/* Enqeueue RAY_TO_REGENERATE rays into QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS queue. */
ccl_local unsigned int local_queue_atomics;
if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
local_queue_atomics = 0;
}
ccl_barrier(CCL_LOCAL_MEM_FENCE);
int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
ray_index = get_ray_index(kg, ray_index,
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
kernel_split_state.queue_data,
kernel_split_params.queue_size,
0);
if(ray_index == QUEUE_EMPTY_SLOT) {
return;
}
char enqueue_flag = (IS_STATE(kernel_split_state.ray_state, ray_index, RAY_TO_REGENERATE)) ? 1 : 0;
enqueue_ray_index_local(ray_index,
QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
enqueue_flag,
kernel_split_params.queue_size,
&local_queue_atomics,
kernel_split_state.queue_data,
kernel_split_params.queue_index);
/* Continue on with shader evaluation. */
if(IS_STATE(kernel_split_state.ray_state, ray_index, RAY_ACTIVE)) {
Intersection *isect = &kernel_split_state.isect[ray_index];
ccl_global uint *rng = &kernel_split_state.rng[ray_index];
ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
Ray ray = kernel_split_state.ray[ray_index];
shader_setup_from_ray(kg,
sd,
kernel_split_state.sd,
isect,
&ray);
float rbsdf = path_state_rng_1D_for_decision(kg, rng, state, PRNG_BSDF);
shader_eval_surface(kg, sd, rng, state, rbsdf, state->flag, SHADER_CONTEXT_MAIN);
shader_eval_surface(kg, kernel_split_state.sd, rng, state, rbsdf, state->flag, SHADER_CONTEXT_MAIN);
}
}
CCL_NAMESPACE_END

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "kernel_split_common.h"
CCL_NAMESPACE_BEGIN
/* Note on kernel_shadow_blocked kernel.
* This is the ninth kernel in the ray tracing logic. This is the eighth
@ -45,24 +45,47 @@
* and RAY_SHADOW_RAY_CAST_DL respectively, during kernel entry.
* QUEUE_SHADOW_RAY_CAST_AO_RAYS and QUEUE_SHADOW_RAY_CAST_DL_RAYS will be empty at kernel exit.
*/
ccl_device void kernel_shadow_blocked(
KernelGlobals *kg,
ccl_global PathState *PathState_coop, /* Required for shadow blocked */
ccl_global Ray *LightRay_dl_coop, /* Required for direct lighting's shadow blocked */
ccl_global Ray *LightRay_ao_coop, /* Required for AO's shadow blocked */
ccl_global char *ray_state,
char shadow_blocked_type,
int ray_index)
ccl_device void kernel_shadow_blocked(KernelGlobals *kg)
{
int lidx = ccl_local_id(1) * ccl_local_id(0) + ccl_local_id(0);
ccl_local unsigned int ao_queue_length;
ccl_local unsigned int dl_queue_length;
if(lidx == 0) {
ao_queue_length = kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_AO_RAYS];
dl_queue_length = kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_DL_RAYS];
}
ccl_barrier(CCL_LOCAL_MEM_FENCE);
/* flag determining if the current ray is to process shadow ray for AO or DL */
char shadow_blocked_type = -1;
int ray_index = QUEUE_EMPTY_SLOT;
int thread_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
if(thread_index < ao_queue_length + dl_queue_length) {
if(thread_index < ao_queue_length) {
ray_index = get_ray_index(kg, thread_index, QUEUE_SHADOW_RAY_CAST_AO_RAYS,
kernel_split_state.queue_data, kernel_split_params.queue_size, 1);
shadow_blocked_type = RAY_SHADOW_RAY_CAST_AO;
} else {
ray_index = get_ray_index(kg, thread_index - ao_queue_length, QUEUE_SHADOW_RAY_CAST_DL_RAYS,
kernel_split_state.queue_data, kernel_split_params.queue_size, 1);
shadow_blocked_type = RAY_SHADOW_RAY_CAST_DL;
}
}
if(ray_index == QUEUE_EMPTY_SLOT)
return;
/* Flag determining if we need to update L. */
char update_path_radiance = 0;
if(IS_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_DL) ||
IS_FLAG(ray_state, ray_index, RAY_SHADOW_RAY_CAST_AO))
if(IS_FLAG(kernel_split_state.ray_state, ray_index, RAY_SHADOW_RAY_CAST_DL) ||
IS_FLAG(kernel_split_state.ray_state, ray_index, RAY_SHADOW_RAY_CAST_AO))
{
ccl_global PathState *state = &PathState_coop[ray_index];
ccl_global Ray *light_ray_dl_global = &LightRay_dl_coop[ray_index];
ccl_global Ray *light_ray_ao_global = &LightRay_ao_coop[ray_index];
ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
ccl_global Ray *light_ray_dl_global = &kernel_split_state.light_ray[ray_index];
ccl_global Ray *light_ray_ao_global = &kernel_split_state.ao_light_ray[ray_index];
ccl_global Ray *light_ray_global =
shadow_blocked_type == RAY_SHADOW_RAY_CAST_AO
@ -71,7 +94,7 @@ ccl_device void kernel_shadow_blocked(
float3 shadow;
update_path_radiance = !(shadow_blocked(kg,
kg->sd_input,
kernel_split_state.sd_DL_shadow,
state,
light_ray_global,
&shadow));
@ -83,3 +106,6 @@ ccl_device void kernel_shadow_blocked(
light_ray_global->t = update_path_radiance;
}
}
CCL_NAMESPACE_END

View File

@ -17,9 +17,11 @@
#ifndef __KERNEL_SPLIT_H__
#define __KERNEL_SPLIT_H__
#include "kernel_compat_opencl.h"
#include "kernel_math.h"
#include "kernel_types.h"
#include "kernel_split_data.h"
#include "kernel_globals.h"
#include "kernel_image_opencl.h"

View File

@ -0,0 +1,153 @@
/*
* Copyright 2011-2016 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __KERNEL_SPLIT_DATA_H__
#define __KERNEL_SPLIT_DATA_H__
CCL_NAMESPACE_BEGIN
/* parameters used by the split kernels, we use a single struct to avoid passing these to each kernel */
typedef struct SplitParams {
int x;
int y;
int w;
int h;
int offset;
int stride;
ccl_global uint *rng_state;
int start_sample;
int end_sample;
ccl_global unsigned int *work_pools;
unsigned int num_samples;
ccl_global int *queue_index;
int queue_size;
ccl_global char *use_queues_flag;
ccl_global float *buffer;
} SplitParams;
/* Global memory variables [porting]; These memory is used for
* co-operation between different kernels; Data written by one
* kernel will be available to another kernel via this global
* memory.
*/
/* SPLIT_DATA_ENTRY(type, name, num) */
#if defined(WITH_CYCLES_DEBUG) || defined(__KERNEL_DEBUG__)
/* DebugData memory */
# define SPLIT_DATA_DEBUG_ENTRIES \
SPLIT_DATA_ENTRY(DebugData, debug_data, 1)
#else
# define SPLIT_DATA_DEBUG_ENTRIES
#endif
#define SPLIT_DATA_ENTRIES \
SPLIT_DATA_ENTRY(ccl_global RNG, rng, 1) \
SPLIT_DATA_ENTRY(ccl_global float3, throughput, 1) \
SPLIT_DATA_ENTRY(ccl_global float, L_transparent, 1) \
SPLIT_DATA_ENTRY(PathRadiance, path_radiance, 1) \
SPLIT_DATA_ENTRY(ccl_global Ray, ray, 1) \
SPLIT_DATA_ENTRY(ccl_global PathState, path_state, 1) \
SPLIT_DATA_ENTRY(Intersection, isect, 1) \
SPLIT_DATA_ENTRY(ccl_global float3, ao_alpha, 1) \
SPLIT_DATA_ENTRY(ccl_global float3, ao_bsdf, 1) \
SPLIT_DATA_ENTRY(ccl_global Ray, ao_light_ray, 1) \
SPLIT_DATA_ENTRY(ccl_global BsdfEval, bsdf_eval, 1) \
SPLIT_DATA_ENTRY(ccl_global int, is_lamp, 1) \
SPLIT_DATA_ENTRY(ccl_global Ray, light_ray, 1) \
SPLIT_DATA_ENTRY(Intersection, isect_shadow, 2) \
SPLIT_DATA_ENTRY(ccl_global int, queue_data, (NUM_QUEUES*2)) /* TODO(mai): this is too large? */ \
SPLIT_DATA_ENTRY(ccl_global uint, work_array, 1) \
SPLIT_DATA_DEBUG_ENTRIES \
/* struct that holds pointers to data in the shared state buffer */
typedef struct SplitData {
#define SPLIT_DATA_ENTRY(type, name, num) type *name;
SPLIT_DATA_ENTRIES
#undef SPLIT_DATA_ENTRY
/* size calculation for these is non trivial, so they are left out of SPLIT_DATA_ENTRIES and handled separately */
ShaderData *sd;
ShaderData *sd_DL_shadow;
ccl_global float *per_sample_output_buffers;
/* this is actually in a separate buffer from the rest of the split state data (so it can be read back from
* the host easily) but is still used the same as the other data so we have it here in this struct as well
*/
ccl_global char *ray_state;
} SplitData;
#define SIZEOF_SD(max_closure) (sizeof(ShaderData) - (sizeof(ShaderClosure) * (MAX_CLOSURE - (max_closure))))
ccl_device_inline size_t split_data_buffer_size(size_t num_elements,
size_t max_closure,
size_t per_thread_output_buffer_size)
{
size_t size = 0;
#define SPLIT_DATA_ENTRY(type, name, num) + align_up(num_elements * num * sizeof(type), 16)
size = size SPLIT_DATA_ENTRIES;
#undef SPLIT_DATA_ENTRY
/* TODO(sergey): This will actually over-allocate if
* particular kernel does not support multiclosure.
*/
size += align_up(num_elements * SIZEOF_SD(max_closure), 16); /* sd */
size += align_up(2 * num_elements * SIZEOF_SD(max_closure), 16); /* sd_DL_shadow */
size += align_up(num_elements * per_thread_output_buffer_size, 16); /* per_sample_output_buffers */
return size;
}
ccl_device_inline void split_data_init(ccl_global SplitData *split_data,
size_t num_elements,
ccl_global void *data,
ccl_global char *ray_state)
{
ccl_global char *p = (ccl_global char*)data;
#define SPLIT_DATA_ENTRY(type, name, num) \
split_data->name = (type*)p; p += align_up(num_elements * num * sizeof(type), 16);
SPLIT_DATA_ENTRIES
#undef SPLIT_DATA_ENTRY
split_data->sd = (ShaderData*)p;
p += align_up(num_elements * SIZEOF_SD(MAX_CLOSURE), 16);
split_data->sd_DL_shadow = (ShaderData*)p;
p += align_up(2 * num_elements * SIZEOF_SD(MAX_CLOSURE), 16);
split_data->per_sample_output_buffers = (ccl_global float*)p;
//p += align_up(num_elements * per_thread_output_buffer_size, 16);
split_data->ray_state = ray_state;
}
#define kernel_split_state (kg->split_data)
#define kernel_split_params (kg->split_param_data)
CCL_NAMESPACE_END
#endif /* __KERNEL_SPLIT_DATA_H__ */

View File

@ -14,46 +14,44 @@
* limitations under the License.
*/
#include "../kernel_compat_opencl.h"
#include "../kernel_math.h"
#include "../kernel_types.h"
#include "../kernel_globals.h"
CCL_NAMESPACE_BEGIN
/* Since we process various samples in parallel; The output radiance of different samples
* are stored in different locations; This kernel combines the output radiance contributed
* by all different samples and stores them in the RenderTile's output buffer.
*/
ccl_device void kernel_sum_all_radiance(
ccl_constant KernelData *data, /* To get pass_stride to offet into buffer */
ccl_global float *buffer, /* Output buffer of RenderTile */
ccl_global float *per_sample_output_buffer, /* Radiance contributed by all samples */
int parallel_samples, int sw, int sh, int stride,
int buffer_offset_x,
int buffer_offset_y,
int buffer_stride,
int start_sample)
ccl_device void kernel_sum_all_radiance(KernelGlobals *kg)
{
int x = get_global_id(0);
int y = get_global_id(1);
int x = ccl_global_id(0);
int y = ccl_global_id(1);
ccl_global float *buffer = kernel_split_params.buffer;
int sw = kernel_split_params.w;
int sh = kernel_split_params.h;
int stride = kernel_split_params.stride;
int start_sample = kernel_split_params.start_sample;
if(x < sw && y < sh) {
buffer += ((buffer_offset_x + x) + (buffer_offset_y + y) * buffer_stride) * (data->film.pass_stride);
per_sample_output_buffer += ((x + y * stride) * parallel_samples) * (data->film.pass_stride);
ccl_global float *per_sample_output_buffer = kernel_split_state.per_sample_output_buffers;
per_sample_output_buffer += (x + y * stride) * (kernel_data.film.pass_stride);
int sample_stride = (data->film.pass_stride);
x += kernel_split_params.x;
y += kernel_split_params.y;
buffer += (kernel_split_params.offset + x + y*stride) * (kernel_data.film.pass_stride);
int sample_iterator = 0;
int pass_stride_iterator = 0;
int num_floats = data->film.pass_stride;
int num_floats = kernel_data.film.pass_stride;
for(sample_iterator = 0; sample_iterator < parallel_samples; sample_iterator++) {
for(pass_stride_iterator = 0; pass_stride_iterator < num_floats; pass_stride_iterator++) {
*(buffer + pass_stride_iterator) =
(start_sample == 0 && sample_iterator == 0)
? *(per_sample_output_buffer + pass_stride_iterator)
: *(buffer + pass_stride_iterator) + *(per_sample_output_buffer + pass_stride_iterator);
}
per_sample_output_buffer += sample_stride;
for(pass_stride_iterator = 0; pass_stride_iterator < num_floats; pass_stride_iterator++) {
*(buffer + pass_stride_iterator) =
(start_sample == 0)
? *(per_sample_output_buffer + pass_stride_iterator)
: *(buffer + pass_stride_iterator) + *(per_sample_output_buffer + pass_stride_iterator);
}
}
}
CCL_NAMESPACE_END

View File

@ -397,11 +397,6 @@ ccl_device_inline float4 make_float4(float x, float y, float z, float w)
return a;
}
ccl_device_inline int align_up(int offset, int alignment)
{
return (offset + alignment - 1) & ~(alignment - 1);
}
ccl_device_inline int3 make_int3(int i)
{
#ifdef __KERNEL_SSE__
@ -476,6 +471,21 @@ ccl_device_inline int4 make_int4(const float3& f)
#endif
ccl_device_inline int align_up(int offset, int alignment)
{
return (offset + alignment - 1) & ~(alignment - 1);
}
ccl_device_inline int round_up(int x, int multiple)
{
return ((x + multiple - 1) / multiple) * multiple;
}
ccl_device_inline int round_down(int x, int multiple)
{
return (x / multiple) * multiple;
}
/* Interpolation types for textures
* cuda also use texture space to store other objects */
enum InterpolationType {