Cycles: Fix usage of memory barriers in split kernel
On user level this fixes dead-lock of OpenCL render on Intel Iris GPUs. Note that this patch does not include change in the logic which allows or disallows OpenCL platforms to be used, that will happen after the kernel fix is known to be fine for the currently officially supported platforms. The dead-lock was caused by wrong usage of memory barriers: as per the OpenCL specification the barrier is to be executed by the entire work group. This means, that the following code is invalid: void foo() { if (some_condition) { return; } barrier(CLK_LOCAL_MEM_FENCE); } void bar() { foo(); } The Cycles code was mentioning this as an invalid code on CPU, while in fact this is invalid as per specification. From the implementation side this change removes the ifdefs around the CPU-only barrier logic, and is implementing similar logic in the shader setup kernel. Tested on NUC8i7HVK NUC. The root cause of the dead-lock was identified by Max Dmitrichenko. There is no measurable difference in performance of currently supported OpenCL platforms. Differential Revision: https://developer.blender.org/D9039
This commit is contained in:
parent
dac242b993
commit
23bf3b09dd
Notes:
blender-bot
2023-02-14 05:36:11 +01:00
Referenced by issue #77348, Blender LTS: Maintenance Task 2.83
|
@ -59,23 +59,7 @@ ccl_device void kernel_buffer_update(KernelGlobals *kg,
|
|||
kernel_split_params.queue_size,
|
||||
1);
|
||||
|
||||
#ifdef __COMPUTE_DEVICE_GPU__
|
||||
/* If we are executing on a GPU device, we exit all threads that are not
|
||||
* required.
|
||||
*
|
||||
* If we are executing on a CPU device, then we need to keep all threads
|
||||
* active since we have barrier() calls later in the kernel. CPU devices,
|
||||
* expect all threads to execute barrier statement.
|
||||
*/
|
||||
if (ray_index == QUEUE_EMPTY_SLOT) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef __COMPUTE_DEVICE_GPU__
|
||||
if (ray_index != QUEUE_EMPTY_SLOT) {
|
||||
#endif
|
||||
|
||||
ccl_global char *ray_state = kernel_split_state.ray_state;
|
||||
ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
|
||||
PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
|
||||
|
@ -153,10 +137,7 @@ ccl_device void kernel_buffer_update(KernelGlobals *kg,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef __COMPUTE_DEVICE_GPU__
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Enqueue RAY_REGENERATED rays into QUEUE_ACTIVE_AND_REGENERATED_RAYS;
|
||||
* These rays will be made active during next SceneIntersectkernel.
|
||||
|
|
|
@ -73,23 +73,7 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
|
|||
kernel_split_params.queue_size,
|
||||
0);
|
||||
|
||||
#ifdef __COMPUTE_DEVICE_GPU__
|
||||
/* If we are executing on a GPU device, we exit all threads that are not
|
||||
* required.
|
||||
*
|
||||
* If we are executing on a CPU device, then we need to keep all threads
|
||||
* active since we have barrier() calls later in the kernel. CPU devices,
|
||||
* expect all threads to execute barrier statement.
|
||||
*/
|
||||
if (ray_index == QUEUE_EMPTY_SLOT) {
|
||||
return;
|
||||
}
|
||||
#endif /* __COMPUTE_DEVICE_GPU__ */
|
||||
|
||||
#ifndef __COMPUTE_DEVICE_GPU__
|
||||
if (ray_index != QUEUE_EMPTY_SLOT) {
|
||||
#endif
|
||||
|
||||
ccl_global PathState *state = 0x0;
|
||||
float3 throughput;
|
||||
|
||||
|
@ -148,10 +132,7 @@ ccl_device void kernel_holdout_emission_blurring_pathtermination_ao(
|
|||
}
|
||||
}
|
||||
#endif /* __AO__ */
|
||||
|
||||
#ifndef __COMPUTE_DEVICE_GPU__
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __AO__
|
||||
/* Enqueue to-shadow-ray-cast rays. */
|
||||
|
|
|
@ -33,18 +33,15 @@ ccl_device void kernel_shader_setup(KernelGlobals *kg,
|
|||
|
||||
int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
|
||||
int queue_index = kernel_split_params.queue_index[QUEUE_ACTIVE_AND_REGENERATED_RAYS];
|
||||
if (ray_index >= queue_index) {
|
||||
return;
|
||||
}
|
||||
ray_index = get_ray_index(kg,
|
||||
ray_index,
|
||||
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
|
||||
kernel_split_state.queue_data,
|
||||
kernel_split_params.queue_size,
|
||||
0);
|
||||
|
||||
if (ray_index == QUEUE_EMPTY_SLOT) {
|
||||
return;
|
||||
if (ray_index < queue_index) {
|
||||
ray_index = get_ray_index(kg,
|
||||
ray_index,
|
||||
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
|
||||
kernel_split_state.queue_data,
|
||||
kernel_split_params.queue_size,
|
||||
0);
|
||||
} else {
|
||||
ray_index = QUEUE_EMPTY_SLOT;
|
||||
}
|
||||
|
||||
char enqueue_flag = (IS_STATE(kernel_split_state.ray_state, ray_index, RAY_TO_REGENERATE)) ? 1 :
|
||||
|
|
Loading…
Reference in New Issue