Cleanup: consistently use parallel_for without tbb namespace in Cycles

This commit is contained in:
Brecht Van Lommel 2022-04-18 18:32:56 +02:00
parent 029b0df81a
commit 2cb76a6c8d
5 changed files with 26 additions and 26 deletions

View File

@ -459,7 +459,7 @@ bool MetalDeviceKernels::load(MetalDevice *device, int kernel_type)
tbb::task_arena local_arena(max_mtlcompiler_threads);
local_arena.execute([&]() {
tbb::parallel_for(int(0), int(DEVICE_KERNEL_NUM), [&](int i) {
parallel_for(int(0), int(DEVICE_KERNEL_NUM), [&](int i) {
/* skip megakernel */
if (i == DEVICE_KERNEL_INTEGRATOR_MEGAKERNEL) {
return;

View File

@ -44,7 +44,7 @@ inline void PassAccessorCPU::run_get_pass_kernel_processor_float(
const int pixel_stride = destination.pixel_stride ? destination.pixel_stride :
destination.num_components;
tbb::parallel_for(0, buffer_params.window_height, [&](int64_t y) {
parallel_for(0, buffer_params.window_height, [&](int64_t y) {
const float *buffer = window_data + y * buffer_row_stride;
float *pixel = destination.pixels +
(y * buffer_params.width + destination.offset) * pixel_stride;
@ -69,7 +69,7 @@ inline void PassAccessorCPU::run_get_pass_kernel_processor_half_rgba(
const int destination_stride = destination.stride != 0 ? destination.stride :
buffer_params.width;
tbb::parallel_for(0, buffer_params.window_height, [&](int64_t y) {
parallel_for(0, buffer_params.window_height, [&](int64_t y) {
const float *buffer = window_data + y * buffer_row_stride;
half4 *pixel = dst_start + y * destination_stride;
func(kfilm_convert, buffer, pixel, buffer_params.window_width, pass_stride);

View File

@ -334,7 +334,7 @@ void PathTrace::init_render_buffers(const RenderWork &render_work)
/* Handle initialization scheduled by the render scheduler. */
if (render_work.init_render_buffers) {
tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->zero_render_buffers();
});
@ -357,7 +357,7 @@ void PathTrace::path_trace(RenderWork &render_work)
thread_capture_fp_settings();
tbb::parallel_for(0, num_works, [&](int i) {
parallel_for(0, num_works, [&](int i) {
const double work_start_time = time_dt();
const int num_samples = render_work.path_trace.num_samples;
@ -407,7 +407,7 @@ void PathTrace::adaptive_sample(RenderWork &render_work)
const double start_time = time_dt();
uint num_active_pixels = 0;
tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
const uint num_active_pixels_in_work =
path_trace_work->adaptive_sampling_converge_filter_count_active(
render_work.adaptive_sampling.threshold, render_work.adaptive_sampling.reset);
@ -485,7 +485,7 @@ void PathTrace::cryptomatte_postprocess(const RenderWork &render_work)
}
VLOG(3) << "Perform cryptomatte work.";
tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->cryptomatte_postproces();
});
}
@ -538,7 +538,7 @@ void PathTrace::denoise(const RenderWork &render_work)
if (multi_device_buffers) {
multi_device_buffers->copy_from_device();
tbb::parallel_for_each(
parallel_for_each(
path_trace_works_, [&multi_device_buffers](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->copy_from_denoised_render_buffers(multi_device_buffers.get());
});
@ -808,7 +808,7 @@ void PathTrace::tile_buffer_read()
}
/* Read buffers back from device. */
tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->copy_render_buffers_from_device();
});
@ -816,7 +816,7 @@ void PathTrace::tile_buffer_read()
PathTraceTile tile(*this);
if (output_driver_->read_render_tile(tile)) {
/* Copy buffers to device again. */
tbb::parallel_for_each(path_trace_works_, [](unique_ptr<PathTraceWork> &path_trace_work) {
parallel_for_each(path_trace_works_, [](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->copy_render_buffers_to_device();
});
}
@ -880,20 +880,20 @@ void PathTrace::progress_set_status(const string &status, const string &substatu
void PathTrace::copy_to_render_buffers(RenderBuffers *render_buffers)
{
tbb::parallel_for_each(path_trace_works_,
[&render_buffers](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->copy_to_render_buffers(render_buffers);
});
parallel_for_each(path_trace_works_,
[&render_buffers](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->copy_to_render_buffers(render_buffers);
});
render_buffers->copy_to_device();
}
void PathTrace::copy_from_render_buffers(RenderBuffers *render_buffers)
{
render_buffers->copy_from_device();
tbb::parallel_for_each(path_trace_works_,
[&render_buffers](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->copy_from_render_buffers(render_buffers);
});
parallel_for_each(path_trace_works_,
[&render_buffers](unique_ptr<PathTraceWork> &path_trace_work) {
path_trace_work->copy_from_render_buffers(render_buffers);
});
}
bool PathTrace::copy_render_tile_from_device()
@ -905,7 +905,7 @@ bool PathTrace::copy_render_tile_from_device()
bool success = true;
tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
if (!success) {
return;
}
@ -1006,7 +1006,7 @@ bool PathTrace::get_render_tile_pixels(const PassAccessor &pass_accessor,
bool success = true;
tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
if (!success) {
return;
}
@ -1023,7 +1023,7 @@ bool PathTrace::set_render_tile_pixels(PassAccessor &pass_accessor,
{
bool success = true;
tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
if (!success) {
return;
}

View File

@ -73,7 +73,7 @@ void PathTraceWorkCPU::render_samples(RenderStatistics &statistics,
tbb::task_arena local_arena = local_tbb_arena_create(device_);
local_arena.execute([&]() {
tbb::parallel_for(int64_t(0), total_pixels_num, [&](int64_t work_index) {
parallel_for(int64_t(0), total_pixels_num, [&](int64_t work_index) {
if (is_cancel_requested()) {
return;
}
@ -219,7 +219,7 @@ int PathTraceWorkCPU::adaptive_sampling_converge_filter_count_active(float thres
/* Check convergency and do x-filter in a single `parallel_for`, to reduce threading overhead. */
local_arena.execute([&]() {
tbb::parallel_for(full_y, full_y + height, [&](int y) {
parallel_for(full_y, full_y + height, [&](int y) {
CPUKernelThreadGlobals *kernel_globals = &kernel_thread_globals_[0];
bool row_converged = true;
@ -243,7 +243,7 @@ int PathTraceWorkCPU::adaptive_sampling_converge_filter_count_active(float thres
if (num_active_pixels) {
local_arena.execute([&]() {
tbb::parallel_for(full_x, full_x + width, [&](int x) {
parallel_for(full_x, full_x + width, [&](int x) {
CPUKernelThreadGlobals *kernel_globals = &kernel_thread_globals_[0];
kernels_.adaptive_sampling_filter_y(
kernel_globals, render_buffer, x, full_y, height, offset, stride);
@ -265,7 +265,7 @@ void PathTraceWorkCPU::cryptomatte_postproces()
/* Check convergency and do x-filter in a single `parallel_for`, to reduce threading overhead. */
local_arena.execute([&]() {
tbb::parallel_for(0, height, [&](int y) {
parallel_for(0, height, [&](int y) {
CPUKernelThreadGlobals *kernel_globals = &kernel_thread_globals_[0];
int pixel_index = y * width;

View File

@ -92,7 +92,7 @@ bool ShaderEval::eval_cpu(Device *device,
tbb::task_arena local_arena(device->info.cpu_threads);
local_arena.execute([&]() {
tbb::parallel_for(int64_t(0), work_size, [&](int64_t work_index) {
parallel_for(int64_t(0), work_size, [&](int64_t work_index) {
/* TODO: is this fast enough? */
if (progress_.get_cancel()) {
success = false;