BLI_task: Add pooled threaded index range iterator, Take II.

This code allows to push a set of different operations all based on
iterations over a range of indices, and then process them all at once
over multiple threads.

This commit also adds unit tests for both old un-pooled, and new pooled
task_parallel_range family of functions, as well as some basic
performances tests.

This is mainly interesting for relatively low amount of individual
tasks, as expected.

E.g. performance tests on a 32 threads machine, for a set of 10
different tasks, shows following improvements when using pooled version
instead of ten sequential calls to BLI_task_parallel_range():

| Num Items | Sequential | Pooled  | Speed-up |
| --------- | ---------- | ------- | -------- |
|       10K |     365 us |  138 us |   2.5  x |
|      100K |     877 us |  530 us |   1.66 x |
|     1000K |    5521 us | 4625 us |   1.25 x |

Differential Revision: https://developer.blender.org/D6189

Note: Compared to previous commit yesterday, this reworks atomic handling in
parallel iter code, and fixes a dummy double-free bug.

Now we should only use the two critical values for synchronization from
atomic calls results, which is the proper way to do things.

Reading a value after an atomic operation does not guarantee you will
get the latest value in all cases (especially on Windows release builds
it seems).
This commit is contained in:
Bastien Montagne 2019-11-26 14:26:47 +01:00
parent 9ecc30250a
commit fcbec6e97e
4 changed files with 604 additions and 88 deletions

View File

@ -196,9 +196,22 @@ void BLI_task_parallel_range(const int start,
const int stop,
void *userdata,
TaskParallelRangeFunc func,
const TaskParallelSettings *settings);
TaskParallelSettings *settings);
/* This data is shared between all tasks, its access needs thread lock or similar protection. */
typedef struct TaskParallelRangePool TaskParallelRangePool;
struct TaskParallelRangePool *BLI_task_parallel_range_pool_init(
const struct TaskParallelSettings *settings);
void BLI_task_parallel_range_pool_push(struct TaskParallelRangePool *range_pool,
const int start,
const int stop,
void *userdata,
TaskParallelRangeFunc func,
const struct TaskParallelSettings *settings);
void BLI_task_parallel_range_pool_work_and_wait(struct TaskParallelRangePool *range_pool);
void BLI_task_parallel_range_pool_free(struct TaskParallelRangePool *range_pool);
/* This data is shared between all tasks, its access needs thread lock or similar protection.
*/
typedef struct TaskParallelIteratorStateShared {
/* Maximum amount of items to acquire at once. */
int chunk_size;

View File

@ -1042,15 +1042,56 @@ void BLI_task_pool_delayed_push_end(TaskPool *pool, int thread_id)
if (((_mem) != NULL) && ((_size) > 8192)) \
MEM_freeN((_mem))
typedef struct ParallelRangeState {
int start, stop;
void *userdata;
/* Stores all needed data to perform a parallelized iteration,
* with a same operation (callback function).
* It can be chained with other tasks in a single-linked list way. */
typedef struct TaskParallelRangeState {
struct TaskParallelRangeState *next;
/* Start and end point of integer value iteration. */
int start, stop;
/* User-defined data, shared between all worker threads. */
void *userdata_shared;
/* User-defined callback function called for each value in [start, stop[ specified range. */
TaskParallelRangeFunc func;
int iter;
/* Each instance of looping chunks will get a copy of this data
* (similar to OpenMP's firstprivate).
*/
void *initial_tls_memory; /* Pointer to actual user-defined 'tls' data. */
size_t tls_data_size; /* Size of that data. */
void *flatten_tls_storage; /* 'tls' copies of initial_tls_memory for each running task. */
/* Number of 'tls' copies in the array, i.e. number of worker threads. */
size_t num_elements_in_tls_storage;
/* Function called from calling thread once whole range have been processed. */
TaskParallelFinalizeFunc func_finalize;
/* Current value of the iterator, shared between all threads (atomically updated). */
int iter_value;
int iter_chunk_num; /* Amount of iterations to process in a single step. */
} TaskParallelRangeState;
/* Stores all the parallel tasks for a single pool. */
typedef struct TaskParallelRangePool {
/* The workers' task pool. */
TaskPool *pool;
/* The number of worker tasks we need to create. */
int num_tasks;
/* The total number of iterations in all the added ranges. */
int num_total_iters;
/* The size (number of items) processed at once by a worker task. */
int chunk_size;
} ParallelRangeState;
/* Linked list of range tasks to process. */
TaskParallelRangeState *parallel_range_states;
/* Current range task beeing processed, swapped atomically. */
TaskParallelRangeState *current_state;
/* Scheduling settings common to all tasks. */
TaskParallelSettings *settings;
} TaskParallelRangePool;
BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settings,
const int tot_items,
@ -1113,66 +1154,114 @@ BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settin
}
}
BLI_INLINE void task_parallel_range_calc_chunk_size(const TaskParallelSettings *settings,
const int num_tasks,
ParallelRangeState *state)
BLI_INLINE void task_parallel_range_calc_chunk_size(TaskParallelRangePool *range_pool)
{
int num_iters = 0;
int min_num_iters = INT_MAX;
for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
state = state->next) {
const int ni = state->stop - state->start;
num_iters += ni;
if (min_num_iters > ni) {
min_num_iters = ni;
}
}
range_pool->num_total_iters = num_iters;
/* Note: Passing min_num_iters here instead of num_iters kind of partially breaks the 'static'
* scheduling, but pooled range iterator is inherently non-static anyway, so adding a small level
* of dynamic scheduling here should be fine. */
task_parallel_calc_chunk_size(
settings, state->stop - state->start, num_tasks, &state->chunk_size);
range_pool->settings, min_num_iters, range_pool->num_tasks, &range_pool->chunk_size);
}
BLI_INLINE bool parallel_range_next_iter_get(ParallelRangeState *__restrict state,
int *__restrict iter,
int *__restrict count)
BLI_INLINE bool parallel_range_next_iter_get(TaskParallelRangePool *__restrict range_pool,
int *__restrict r_iter,
int *__restrict r_count,
TaskParallelRangeState **__restrict r_state)
{
int previter = atomic_fetch_and_add_int32(&state->iter, state->chunk_size);
/* We need an atomic op here as well to fetch the initial state, since some other thread might
* have already updated it. */
TaskParallelRangeState *current_state = atomic_cas_ptr(
(void **)&range_pool->current_state, NULL, NULL);
*iter = previter;
*count = max_ii(0, min_ii(state->chunk_size, state->stop - previter));
int previter = INT32_MAX;
return (previter < state->stop);
while (current_state != NULL && previter >= current_state->stop) {
previter = atomic_fetch_and_add_int32(&current_state->iter_value, range_pool->chunk_size);
*r_iter = previter;
*r_count = max_ii(0, min_ii(range_pool->chunk_size, current_state->stop - previter));
if (previter >= current_state->stop) {
/* At this point the state we got is done, we need to go to the next one. In case some other
* thread already did it, then this does nothing, and we'll just get current valid state
* at start of the next loop. */
TaskParallelRangeState *current_state_from_atomic_cas = atomic_cas_ptr(
(void **)&range_pool->current_state, current_state, current_state->next);
if (current_state == current_state_from_atomic_cas) {
/* The atomic CAS operation was successful, we did update range_pool->current_state, so we
* can safely switch to next state. */
current_state = current_state->next;
}
else {
/* The atomic CAS operation failed, but we still got range_pool->current_state value out of
* it, just use it as our new current state. */
current_state = current_state_from_atomic_cas;
}
}
}
*r_state = current_state;
return (current_state != NULL && previter < current_state->stop);
}
static void parallel_range_func(TaskPool *__restrict pool, void *userdata_chunk, int thread_id)
static void parallel_range_func(TaskPool *__restrict pool, void *tls_data_idx, int thread_id)
{
ParallelRangeState *__restrict state = BLI_task_pool_userdata(pool);
TaskParallelRangePool *__restrict range_pool = BLI_task_pool_userdata(pool);
TaskParallelTLS tls = {
.thread_id = thread_id,
.userdata_chunk = userdata_chunk,
.userdata_chunk = NULL,
};
TaskParallelRangeState *state;
int iter, count;
while (parallel_range_next_iter_get(state, &iter, &count)) {
while (parallel_range_next_iter_get(range_pool, &iter, &count, &state)) {
tls.userdata_chunk = (char *)state->flatten_tls_storage +
(((size_t)POINTER_AS_INT(tls_data_idx)) * state->tls_data_size);
for (int i = 0; i < count; i++) {
state->func(state->userdata, iter + i, &tls);
state->func(state->userdata_shared, iter + i, &tls);
}
}
}
static void parallel_range_single_thread(const int start,
int const stop,
void *userdata,
TaskParallelRangeFunc func,
const TaskParallelSettings *settings)
static void parallel_range_single_thread(TaskParallelRangePool *range_pool)
{
void *userdata_chunk = settings->userdata_chunk;
const size_t userdata_chunk_size = settings->userdata_chunk_size;
void *userdata_chunk_local = NULL;
const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
if (use_userdata_chunk) {
userdata_chunk_local = MALLOCA(userdata_chunk_size);
memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
state = state->next) {
const int start = state->start;
const int stop = state->stop;
void *userdata = state->userdata_shared;
TaskParallelRangeFunc func = state->func;
void *initial_tls_memory = state->initial_tls_memory;
const size_t tls_data_size = state->tls_data_size;
void *flatten_tls_storage = NULL;
const bool use_tls_data = (tls_data_size != 0) && (initial_tls_memory != NULL);
if (use_tls_data) {
flatten_tls_storage = MALLOCA(tls_data_size);
memcpy(flatten_tls_storage, initial_tls_memory, tls_data_size);
}
TaskParallelTLS tls = {
.thread_id = 0,
.userdata_chunk = flatten_tls_storage,
};
for (int i = start; i < stop; i++) {
func(userdata, i, &tls);
}
if (state->func_finalize != NULL) {
state->func_finalize(userdata, flatten_tls_storage);
}
MALLOCA_FREE(flatten_tls_storage, tls_data_size);
}
TaskParallelTLS tls = {
.thread_id = 0,
.userdata_chunk = userdata_chunk_local,
};
for (int i = start; i < stop; i++) {
func(userdata, i, &tls);
}
if (settings->func_finalize != NULL) {
settings->func_finalize(userdata, userdata_chunk_local);
}
MALLOCA_FREE(userdata_chunk_local, userdata_chunk_size);
}
/**
@ -1185,78 +1274,82 @@ void BLI_task_parallel_range(const int start,
const int stop,
void *userdata,
TaskParallelRangeFunc func,
const TaskParallelSettings *settings)
TaskParallelSettings *settings)
{
TaskScheduler *task_scheduler;
TaskPool *task_pool;
ParallelRangeState state;
int i, num_threads, num_tasks;
void *userdata_chunk = settings->userdata_chunk;
const size_t userdata_chunk_size = settings->userdata_chunk_size;
void *userdata_chunk_local = NULL;
void *userdata_chunk_array = NULL;
const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
if (start == stop) {
return;
}
BLI_assert(start < stop);
if (userdata_chunk_size != 0) {
BLI_assert(userdata_chunk != NULL);
TaskParallelRangeState state = {
.next = NULL,
.start = start,
.stop = stop,
.userdata_shared = userdata,
.func = func,
.iter_value = start,
.initial_tls_memory = settings->userdata_chunk,
.tls_data_size = settings->userdata_chunk_size,
.func_finalize = settings->func_finalize,
};
TaskParallelRangePool range_pool = {
.pool = NULL, .parallel_range_states = &state, .current_state = NULL, .settings = settings};
int i, num_threads, num_tasks;
void *tls_data = settings->userdata_chunk;
const size_t tls_data_size = settings->userdata_chunk_size;
if (tls_data_size != 0) {
BLI_assert(tls_data != NULL);
}
const bool use_tls_data = (tls_data_size != 0) && (tls_data != NULL);
void *flatten_tls_storage = NULL;
/* If it's not enough data to be crunched, don't bother with tasks at all,
* do everything from the main thread.
* do everything from the current thread.
*/
if (!settings->use_threading) {
parallel_range_single_thread(start, stop, userdata, func, settings);
parallel_range_single_thread(&range_pool);
return;
}
task_scheduler = BLI_task_scheduler_get();
TaskScheduler *task_scheduler = BLI_task_scheduler_get();
num_threads = BLI_task_scheduler_num_threads(task_scheduler);
/* The idea here is to prevent creating task for each of the loop iterations
* and instead have tasks which are evenly distributed across CPU cores and
* pull next iter to be crunched using the queue.
*/
num_tasks = num_threads + 2;
range_pool.num_tasks = num_tasks = num_threads + 2;
state.start = start;
state.stop = stop;
state.userdata = userdata;
state.func = func;
state.iter = start;
task_parallel_range_calc_chunk_size(settings, num_tasks, &state);
num_tasks = min_ii(num_tasks, max_ii(1, (stop - start) / state.chunk_size));
task_parallel_range_calc_chunk_size(&range_pool);
range_pool.num_tasks = num_tasks = min_ii(num_tasks,
max_ii(1, (stop - start) / range_pool.chunk_size));
if (num_tasks == 1) {
parallel_range_single_thread(start, stop, userdata, func, settings);
parallel_range_single_thread(&range_pool);
return;
}
task_pool = BLI_task_pool_create_suspended(task_scheduler, &state);
TaskPool *task_pool = range_pool.pool = BLI_task_pool_create_suspended(task_scheduler,
&range_pool);
/* NOTE: This way we are adding a memory barrier and ensure all worker
* threads can read and modify the value, without any locks. */
atomic_fetch_and_add_int32(&state.iter, 0);
range_pool.current_state = &state;
if (use_userdata_chunk) {
userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks);
if (use_tls_data) {
state.flatten_tls_storage = flatten_tls_storage = MALLOCA(tls_data_size * (size_t)num_tasks);
state.tls_data_size = tls_data_size;
}
for (i = 0; i < num_tasks; i++) {
if (use_userdata_chunk) {
userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
if (use_tls_data) {
void *userdata_chunk_local = (char *)flatten_tls_storage + (tls_data_size * (size_t)i);
memcpy(userdata_chunk_local, tls_data, tls_data_size);
}
/* Use this pool's pre-allocated tasks. */
BLI_task_pool_push_from_thread(task_pool,
parallel_range_func,
userdata_chunk_local,
POINTER_FROM_INT(i),
false,
TASK_PRIORITY_HIGH,
task_pool->thread_id);
@ -1265,17 +1358,221 @@ void BLI_task_parallel_range(const int start,
BLI_task_pool_work_and_wait(task_pool);
BLI_task_pool_free(task_pool);
if (use_userdata_chunk) {
if (use_tls_data) {
if (settings->func_finalize != NULL) {
for (i = 0; i < num_tasks; i++) {
userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
void *userdata_chunk_local = (char *)flatten_tls_storage + (tls_data_size * (size_t)i);
settings->func_finalize(userdata, userdata_chunk_local);
}
}
MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * num_tasks);
MALLOCA_FREE(flatten_tls_storage, tls_data_size * (size_t)num_tasks);
}
}
/**
* Initialize a task pool to parallelize several for loops at the same time.
*
* See public API doc of ParallelRangeSettings for description of all settings.
* Note that loop-specific settings (like 'tls' data or finalize function) must be left NULL here.
* Only settings controlling how iteration is parallelized must be defined, as those will affect
* all loops added to that pool.
*/
TaskParallelRangePool *BLI_task_parallel_range_pool_init(const TaskParallelSettings *settings)
{
TaskParallelRangePool *range_pool = MEM_callocN(sizeof(*range_pool), __func__);
BLI_assert(settings->userdata_chunk == NULL);
BLI_assert(settings->func_finalize == NULL);
range_pool->settings = MEM_mallocN(sizeof(*range_pool->settings), __func__);
*range_pool->settings = *settings;
return range_pool;
}
/**
* Add a loop task to the pool. It does not execute it at all.
*
* See public API doc of ParallelRangeSettings for description of all settings.
* Note that only 'tls'-related data are used here.
*/
void BLI_task_parallel_range_pool_push(TaskParallelRangePool *range_pool,
const int start,
const int stop,
void *userdata,
TaskParallelRangeFunc func,
const TaskParallelSettings *settings)
{
BLI_assert(range_pool->pool == NULL);
if (start == stop) {
return;
}
BLI_assert(start < stop);
if (settings->userdata_chunk_size != 0) {
BLI_assert(settings->userdata_chunk != NULL);
}
TaskParallelRangeState *state = MEM_callocN(sizeof(*state), __func__);
state->start = start;
state->stop = stop;
state->userdata_shared = userdata;
state->func = func;
state->iter_value = start;
state->initial_tls_memory = settings->userdata_chunk;
state->tls_data_size = settings->userdata_chunk_size;
state->func_finalize = settings->func_finalize;
state->next = range_pool->parallel_range_states;
range_pool->parallel_range_states = state;
}
static void parallel_range_func_finalize(TaskPool *__restrict pool,
void *v_state,
int UNUSED(thread_id))
{
TaskParallelRangePool *__restrict range_pool = BLI_task_pool_userdata(pool);
TaskParallelRangeState *state = v_state;
for (int i = 0; i < range_pool->num_tasks; i++) {
void *tls_data = (char *)state->flatten_tls_storage + (state->tls_data_size * (size_t)i);
state->func_finalize(state->userdata_shared, tls_data);
}
}
/**
* Run all tasks pushed to the range_pool.
*
* Note that the range pool is re-usable (you may push new tasks into it and call this function
* again).
*/
void BLI_task_parallel_range_pool_work_and_wait(TaskParallelRangePool *range_pool)
{
BLI_assert(range_pool->pool == NULL);
/* If it's not enough data to be crunched, don't bother with tasks at all,
* do everything from the current thread.
*/
if (!range_pool->settings->use_threading) {
parallel_range_single_thread(range_pool);
return;
}
TaskScheduler *task_scheduler = BLI_task_scheduler_get();
const int num_threads = BLI_task_scheduler_num_threads(task_scheduler);
/* The idea here is to prevent creating task for each of the loop iterations
* and instead have tasks which are evenly distributed across CPU cores and
* pull next iter to be crunched using the queue.
*/
int num_tasks = num_threads + 2;
range_pool->num_tasks = num_tasks;
task_parallel_range_calc_chunk_size(range_pool);
range_pool->num_tasks = num_tasks = min_ii(
num_tasks, max_ii(1, range_pool->num_total_iters / range_pool->chunk_size));
if (num_tasks == 1) {
parallel_range_single_thread(range_pool);
return;
}
/* We create all 'tls' data here in a single loop. */
for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
state = state->next) {
void *userdata_chunk = state->initial_tls_memory;
const size_t userdata_chunk_size = state->tls_data_size;
if (userdata_chunk_size == 0) {
BLI_assert(userdata_chunk == NULL);
continue;
}
void *userdata_chunk_array = NULL;
state->flatten_tls_storage = userdata_chunk_array = MALLOCA(userdata_chunk_size *
(size_t)num_tasks);
for (int i = 0; i < num_tasks; i++) {
void *userdata_chunk_local = (char *)userdata_chunk_array +
(userdata_chunk_size * (size_t)i);
memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
}
}
TaskPool *task_pool = range_pool->pool = BLI_task_pool_create_suspended(task_scheduler,
range_pool);
range_pool->current_state = range_pool->parallel_range_states;
for (int i = 0; i < num_tasks; i++) {
BLI_task_pool_push_from_thread(task_pool,
parallel_range_func,
POINTER_FROM_INT(i),
false,
TASK_PRIORITY_HIGH,
task_pool->thread_id);
}
BLI_task_pool_work_and_wait(task_pool);
BLI_assert(atomic_cas_ptr((void **)&range_pool->current_state, NULL, NULL) == NULL);
/* Finalize all tasks. */
for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
state = state->next) {
const size_t userdata_chunk_size = state->tls_data_size;
void *userdata_chunk_array = state->flatten_tls_storage;
UNUSED_VARS_NDEBUG(userdata_chunk_array);
if (userdata_chunk_size == 0) {
BLI_assert(userdata_chunk_array == NULL);
continue;
}
if (state->func_finalize != NULL) {
BLI_task_pool_push_from_thread(task_pool,
parallel_range_func_finalize,
state,
false,
TASK_PRIORITY_HIGH,
task_pool->thread_id);
}
}
BLI_task_pool_work_and_wait(task_pool);
BLI_task_pool_free(task_pool);
range_pool->pool = NULL;
/* Cleanup all tasks. */
TaskParallelRangeState *state_next;
for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
state = state_next) {
state_next = state->next;
const size_t userdata_chunk_size = state->tls_data_size;
void *userdata_chunk_array = state->flatten_tls_storage;
if (userdata_chunk_size != 0) {
BLI_assert(userdata_chunk_array != NULL);
MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * (size_t)num_tasks);
}
MEM_freeN(state);
}
range_pool->parallel_range_states = NULL;
}
/**
* Clear/free given \a range_pool.
*/
void BLI_task_parallel_range_pool_free(TaskParallelRangePool *range_pool)
{
TaskParallelRangeState *state_next = NULL;
for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL;
state = state_next) {
state_next = state->next;
MEM_freeN(state);
}
MEM_freeN(range_pool->settings);
MEM_freeN(range_pool);
}
typedef struct TaskParallelIteratorState {
void *userdata;
TaskParallelIteratorIterFunc iter_func;

View File

@ -19,8 +19,6 @@ extern "C" {
#include "MEM_guardedalloc.h"
}
/* *** Parallel iterations over double-linked list items. *** */
#define NUM_RUN_AVERAGED 100
static uint gen_pseudo_random_number(uint num)
@ -38,6 +36,94 @@ static uint gen_pseudo_random_number(uint num)
return ((num & 255) << 6) + 1;
}
/* *** Parallel iterations over range of indices. *** */
static void task_parallel_range_func(void *UNUSED(userdata),
int index,
const TaskParallelTLS *__restrict UNUSED(tls))
{
const uint limit = gen_pseudo_random_number((uint)index);
for (uint i = (uint)index; i < limit;) {
i += gen_pseudo_random_number(i);
}
}
static void task_parallel_range_test_do(const char *id,
const int num_items,
const bool use_threads)
{
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.use_threading = use_threads;
double averaged_timing = 0.0;
for (int i = 0; i < NUM_RUN_AVERAGED; i++) {
const double init_time = PIL_check_seconds_timer();
for (int j = 0; j < 10; j++) {
BLI_task_parallel_range(i + j, i + j + num_items, NULL, task_parallel_range_func, &settings);
}
averaged_timing += PIL_check_seconds_timer() - init_time;
}
printf("\t%s: non-pooled done in %fs on average over %d runs\n",
id,
averaged_timing / NUM_RUN_AVERAGED,
NUM_RUN_AVERAGED);
averaged_timing = 0.0;
for (int i = 0; i < NUM_RUN_AVERAGED; i++) {
const double init_time = PIL_check_seconds_timer();
TaskParallelRangePool *range_pool = BLI_task_parallel_range_pool_init(&settings);
for (int j = 0; j < 10; j++) {
BLI_task_parallel_range_pool_push(
range_pool, i + j, i + j + num_items, NULL, task_parallel_range_func, &settings);
}
BLI_task_parallel_range_pool_work_and_wait(range_pool);
BLI_task_parallel_range_pool_free(range_pool);
averaged_timing += PIL_check_seconds_timer() - init_time;
}
printf("\t%s: pooled done in %fs on average over %d runs\n",
id,
averaged_timing / NUM_RUN_AVERAGED,
NUM_RUN_AVERAGED);
}
TEST(task, RangeIter10KNoThread)
{
task_parallel_range_test_do(
"Range parallel iteration - Single thread - 10K items", 10000, false);
}
TEST(task, RangeIter10k)
{
task_parallel_range_test_do("Range parallel iteration - Threaded - 10K items", 10000, true);
}
TEST(task, RangeIter100KNoThread)
{
task_parallel_range_test_do(
"Range parallel iteration - Single thread - 100K items", 100000, false);
}
TEST(task, RangeIter100k)
{
task_parallel_range_test_do("Range parallel iteration - Threaded - 100K items", 100000, true);
}
TEST(task, RangeIter1000KNoThread)
{
task_parallel_range_test_do(
"Range parallel iteration - Single thread - 1000K items", 1000000, false);
}
TEST(task, RangeIter1000k)
{
task_parallel_range_test_do("Range parallel iteration - Threaded - 1000K items", 1000000, true);
}
/* *** Parallel iterations over double-linked list items. *** */
static void task_listbase_light_iter_func(void *UNUSED(userdata),
void *item,
int index,

View File

@ -17,6 +17,126 @@ extern "C" {
#define NUM_ITEMS 10000
/* *** Parallel iterations over range of integer values. *** */
static void task_range_iter_func(void *userdata, int index, const TaskParallelTLS *__restrict tls)
{
int *data = (int *)userdata;
data[index] = index;
*((int *)tls->userdata_chunk) += index;
// printf("%d, %d, %d\n", index, data[index], *((int *)tls->userdata_chunk));
}
static void task_range_iter_finalize_func(void *__restrict userdata,
void *__restrict userdata_chunk)
{
int *data = (int *)userdata;
data[NUM_ITEMS] += *(int *)userdata_chunk;
// printf("%d, %d\n", data[NUM_ITEMS], *((int *)userdata_chunk));
}
TEST(task, RangeIter)
{
int data[NUM_ITEMS + 1] = {0};
int sum = 0;
BLI_threadapi_init();
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.min_iter_per_thread = 1;
settings.userdata_chunk = &sum;
settings.userdata_chunk_size = sizeof(sum);
settings.func_finalize = task_range_iter_finalize_func;
BLI_task_parallel_range(0, NUM_ITEMS, data, task_range_iter_func, &settings);
/* Those checks should ensure us all items of the listbase were processed once, and only once -
* as expected. */
int expected_sum = 0;
for (int i = 0; i < NUM_ITEMS; i++) {
EXPECT_EQ(data[i], i);
expected_sum += i;
}
EXPECT_EQ(data[NUM_ITEMS], expected_sum);
BLI_threadapi_exit();
}
TEST(task, RangeIterPool)
{
const int num_tasks = 10;
int data[num_tasks][NUM_ITEMS + 1] = {{0}};
int sum = 0;
BLI_threadapi_init();
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.min_iter_per_thread = 1;
TaskParallelRangePool *range_pool = BLI_task_parallel_range_pool_init(&settings);
for (int j = 0; j < num_tasks; j++) {
settings.userdata_chunk = &sum;
settings.userdata_chunk_size = sizeof(sum);
settings.func_finalize = task_range_iter_finalize_func;
BLI_task_parallel_range_pool_push(
range_pool, 0, NUM_ITEMS, data[j], task_range_iter_func, &settings);
}
BLI_task_parallel_range_pool_work_and_wait(range_pool);
/* Those checks should ensure us all items of the listbase were processed once, and only once -
* as expected. */
for (int j = 0; j < num_tasks; j++) {
int expected_sum = 0;
for (int i = 0; i < NUM_ITEMS; i++) {
// EXPECT_EQ(data[j][i], i);
expected_sum += i;
}
EXPECT_EQ(data[j][NUM_ITEMS], expected_sum);
}
/* A pool can be re-used untill it is freed. */
for (int j = 0; j < num_tasks; j++) {
memset(data[j], 0, sizeof(data[j]));
}
sum = 0;
for (int j = 0; j < num_tasks; j++) {
settings.userdata_chunk = &sum;
settings.userdata_chunk_size = sizeof(sum);
settings.func_finalize = task_range_iter_finalize_func;
BLI_task_parallel_range_pool_push(
range_pool, 0, NUM_ITEMS, data[j], task_range_iter_func, &settings);
}
BLI_task_parallel_range_pool_work_and_wait(range_pool);
BLI_task_parallel_range_pool_free(range_pool);
/* Those checks should ensure us all items of the listbase were processed once, and only once -
* as expected. */
for (int j = 0; j < num_tasks; j++) {
int expected_sum = 0;
for (int i = 0; i < NUM_ITEMS; i++) {
// EXPECT_EQ(data[j][i], i);
expected_sum += i;
}
EXPECT_EQ(data[j][NUM_ITEMS], expected_sum);
}
BLI_threadapi_exit();
}
/* *** Parallel iterations over mempool items. *** */
static void task_mempool_iter_func(void *userdata, MempoolIterData *item)