Cleanup: remove some unused Cycles GPU code

To make porting to other architectures easier, clarifying that this does not
need to be supported. The unused parallel_reduce implementation assumed warp
size 32, but is easy to update if we ever need it in the future.
This commit is contained in:
Brecht Van Lommel 2022-03-16 14:38:23 +01:00
parent 213d00607e
commit 076079454f
5 changed files with 0 additions and 78 deletions

View File

@ -50,7 +50,6 @@ set(SRC_KERNEL_DEVICE_GPU_HEADERS
device/gpu/kernel.h
device/gpu/parallel_active_index.h
device/gpu/parallel_prefix_sum.h
device/gpu/parallel_reduce.h
device/gpu/parallel_sorted_index.h
device/gpu/work_stealing.h
)

View File

@ -72,7 +72,6 @@ typedef unsigned long long uint64_t;
#define ccl_gpu_syncthreads() __syncthreads()
#define ccl_gpu_ballot(predicate) __ballot_sync(0xFFFFFFFF, predicate)
#define ccl_gpu_shfl_down_sync(mask, var, detla) __shfl_down_sync(mask, var, detla)
/* GPU texture objects */

View File

@ -1,74 +0,0 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright 2021-2022 Blender Foundation */
#pragma once
CCL_NAMESPACE_BEGIN
/* Parallel sum of array input_data with size n into output_sum.
*
* Adapted from "Optimizing Parallel Reduction in GPU", Mark Harris.
*
* This version adds multiple elements per thread sequentially. This reduces
* the overall cost of the algorithm while keeping the work complexity O(n) and
* the step complexity O(log n). (Brent's Theorem optimization) */
#ifdef __HIP__
# define GPU_PARALLEL_SUM_DEFAULT_BLOCK_SIZE 1024
#else
# define GPU_PARALLEL_SUM_DEFAULT_BLOCK_SIZE 512
#endif
template<uint blocksize, typename InputT, typename OutputT, typename ConvertOp>
__device__ void gpu_parallel_sum(
const InputT *input_data, const uint n, OutputT *output_sum, OutputT zero, ConvertOp convert)
{
extern ccl_gpu_shared OutputT shared_data[];
const uint tid = ccl_gpu_thread_idx_x;
const uint gridsize = blocksize * ccl_gpu_grid_dim_x();
OutputT sum = zero;
for (uint i = ccl_gpu_block_idx_x * blocksize + tid; i < n; i += gridsize) {
sum += convert(input_data[i]);
}
shared_data[tid] = sum;
ccl_gpu_syncthreads();
if (blocksize >= 512 && tid < 256) {
shared_data[tid] = sum = sum + shared_data[tid + 256];
}
ccl_gpu_syncthreads();
if (blocksize >= 256 && tid < 128) {
shared_data[tid] = sum = sum + shared_data[tid + 128];
}
ccl_gpu_syncthreads();
if (blocksize >= 128 && tid < 64) {
shared_data[tid] = sum = sum + shared_data[tid + 64];
}
ccl_gpu_syncthreads();
if (blocksize >= 64 && tid < 32) {
shared_data[tid] = sum = sum + shared_data[tid + 32];
}
ccl_gpu_syncthreads();
if (tid < 32) {
for (int offset = ccl_gpu_warp_size / 2; offset > 0; offset /= 2) {
sum += ccl_shfl_down_sync(0xFFFFFFFF, sum, offset);
}
}
if (tid == 0) {
output_sum[ccl_gpu_block_idx_x] = sum;
}
}
CCL_NAMESPACE_END

View File

@ -71,7 +71,6 @@ typedef unsigned long long uint64_t;
#define ccl_gpu_syncthreads() __syncthreads()
#define ccl_gpu_ballot(predicate) __ballot(predicate)
#define ccl_gpu_shfl_down_sync(mask, var, detla) __shfl_down(var, detla)
/* GPU texture objects */
typedef hipTextureObject_t ccl_gpu_tex_object;

View File

@ -74,7 +74,6 @@ typedef unsigned long long uint64_t;
#define ccl_gpu_syncthreads() __syncthreads()
#define ccl_gpu_ballot(predicate) __ballot_sync(0xFFFFFFFF, predicate)
#define ccl_gpu_shfl_down_sync(mask, var, detla) __shfl_down_sync(mask, var, detla)
/* GPU texture objects */