Add a new parallel looper for MemPool items to BLI_task.

It merely uses the new thread-safe iterators system of mempool, quite
straight forward.

Note that to avoid possible confusion with two void pointers as
parameters of the callback, a dummy opaque struct pointer is used
instead for the second parameter (pointer generated by iteration over
mempool), callback functions must explicitely convert it to expected
real type.

Also added a basic gtest for this new feature.
This commit is contained in:
Bastien Montagne 2017-11-23 21:14:43 +01:00
parent b84e6dfee4
commit efb86b712d
4 changed files with 176 additions and 0 deletions

View File

@ -35,6 +35,8 @@ extern "C" {
#include "BLI_threads.h"
#include "BLI_utildefines.h"
struct BLI_mempool;
/* Task Scheduler
*
* Central scheduler that holds running threads ready to execute tasks. A single
@ -150,6 +152,15 @@ void BLI_task_parallel_listbase(
TaskParallelListbaseFunc func,
const bool use_threading);
typedef struct MempoolIterData MempoolIterData;
typedef void (*TaskParallelMempoolFunc)(void *userdata,
MempoolIterData *iter);
void BLI_task_parallel_mempool(
struct BLI_mempool *mempool,
void *userdata,
TaskParallelMempoolFunc func,
const bool use_threading);
#ifdef __cplusplus
}
#endif

View File

@ -32,6 +32,7 @@
#include "BLI_listbase.h"
#include "BLI_math.h"
#include "BLI_mempool.h"
#include "BLI_task.h"
#include "BLI_threads.h"
@ -1354,3 +1355,89 @@ void BLI_task_parallel_listbase(
BLI_spin_end(&state.lock);
}
typedef struct ParallelMempoolState {
void *userdata;
TaskParallelMempoolFunc func;
} ParallelMempoolState;
static void parallel_mempool_func(
TaskPool * __restrict pool,
void *taskdata,
int UNUSED(threadid))
{
ParallelMempoolState * __restrict state = BLI_task_pool_userdata(pool);
BLI_mempool_iter *iter = taskdata;
MempoolIterData *item;
while ((item = BLI_mempool_iterstep(iter)) != NULL) {
state->func(state->userdata, item);
}
}
/**
* This function allows to parallelize for loops over Mempool items.
*
* \param pool The iterable BLI_mempool to loop over.
* \param userdata Common userdata passed to all instances of \a func.
* \param func Callback function.
* \param use_threading If \a true, actually split-execute loop in threads, else just do a sequential forloop
* (allows caller to use any kind of test to switch on parallelization or not).
*
* \note There is no static scheduling here.
*/
void BLI_task_parallel_mempool(
BLI_mempool *mempool,
void *userdata,
TaskParallelMempoolFunc func,
const bool use_threading)
{
TaskScheduler *task_scheduler;
TaskPool *task_pool;
ParallelMempoolState state;
int i, num_threads, num_tasks;
if (BLI_mempool_count(mempool) == 0) {
return;
}
if (!use_threading) {
BLI_mempool_iter iter;
BLI_mempool_iternew(mempool, &iter);
for (void *item = BLI_mempool_iterstep(&iter); item != NULL; item = BLI_mempool_iterstep(&iter)) {
func(userdata, item);
}
return;
}
task_scheduler = BLI_task_scheduler_get();
task_pool = BLI_task_pool_create(task_scheduler, &state);
num_threads = BLI_task_scheduler_num_threads(task_scheduler);
/* The idea here is to prevent creating task for each of the loop iterations
* and instead have tasks which are evenly distributed across CPU cores and
* pull next item to be crunched using the threaded-aware BLI_mempool_iter.
*/
num_tasks = num_threads * 2;
state.userdata = userdata;
state.func = func;
BLI_mempool_iter *mempool_iterators = BLI_mempool_iter_threadsafe_create(mempool, (size_t)num_tasks);
for (i = 0; i < num_tasks; i++) {
/* Use this pool's pre-allocated tasks. */
BLI_task_pool_push_from_thread(task_pool,
parallel_mempool_func,
&mempool_iterators[i], false,
TASK_PRIORITY_HIGH,
task_pool->thread_id);
}
BLI_task_pool_work_and_wait(task_pool);
BLI_task_pool_free(task_pool);
BLI_mempool_iter_threadsafe_free(mempool_iterators);
}

View File

@ -0,0 +1,76 @@
/* Apache License, Version 2.0 */
#include "testing/testing.h"
#include <string.h>
#include "atomic_ops.h"
extern "C" {
#include "BLI_mempool.h"
#include "BLI_task.h"
#include "BLI_utildefines.h"
};
#define NUM_ITEMS 10000
static void task_mempool_iter_func(void *userdata, MempoolIterData *item) {
int *data = (int *)item;
int *count = (int *)userdata;
EXPECT_TRUE(data != NULL);
*data += 1;
atomic_sub_and_fetch_uint32((uint32_t *)count, 1);
}
TEST(task, MempoolIter)
{
int *data[NUM_ITEMS];
BLI_mempool *mempool = BLI_mempool_create(sizeof(*data[0]), NUM_ITEMS, 32, BLI_MEMPOOL_ALLOW_ITER);
int i;
/* 'Randomly' add and remove some items from mempool, to create a non-homogenous one. */
int num_items = 0;
for (i = 0; i < NUM_ITEMS; i++) {
data[i] = (int *)BLI_mempool_alloc(mempool);
*data[i] = i - 1;
num_items++;
}
for (i = 0; i < NUM_ITEMS; i += 3) {
BLI_mempool_free(mempool, data[i]);
data[i] = NULL;
num_items--;
}
for (i = 0; i < NUM_ITEMS; i += 7) {
if (data[i] == NULL) {
data[i] = (int *)BLI_mempool_alloc(mempool);
*data[i] = i - 1;
num_items++;
}
}
for (i = 0; i < NUM_ITEMS - 5; i += 23) {
for (int j = 0; j < 5; j++) {
if (data[i + j] != NULL) {
BLI_mempool_free(mempool, data[i + j]);
data[i + j] = NULL;
num_items--;
}
}
}
BLI_task_parallel_mempool(mempool, &num_items, task_mempool_iter_func, true);
/* Those checks should ensure us all items of the mempool were processed once, and only once - as expected. */
EXPECT_EQ(num_items, 0);
for (i = 0; i < NUM_ITEMS; i++) {
if (data[i] != NULL) {
EXPECT_EQ(*data[i], i);
}
}
BLI_mempool_destroy(mempool);
}

View File

@ -27,6 +27,7 @@ set(INC
../../../source/blender/blenlib
../../../source/blender/makesdna
../../../intern/guardedalloc
../../../intern/atomic
)
include_directories(${INC})
@ -55,6 +56,7 @@ BLENDER_TEST(BLI_polyfill2d "bf_blenlib")
BLENDER_TEST(BLI_stack "bf_blenlib")
BLENDER_TEST(BLI_string "bf_blenlib")
BLENDER_TEST(BLI_string_utf8 "bf_blenlib")
BLENDER_TEST(BLI_task "bf_blenlib")
BLENDER_TEST_PERFORMANCE(BLI_ghash_performance "bf_blenlib")