Fluid Particles: fix viscoelastic spring threading crash again after D7394.
Since D6133 fluid particle code uses thread local storage to collect springs created during a time step before adding them to the actual spring array. Prior to the switch to TBB there was a single finalize callback which was called on the main thread, so it could use psys_sph_flush_springs and insert the new entries into the final buffer. However in D7394 it was replaced with a reduce callback, which is supposed to be thread safe and have no side effects. This means that the only thing it can safely do is copy entries to the other temporary buffer. In addition, careful checking reveals that the 'classical' solver doesn't actually add springs, so reduce isn't needed there. Differential Revision: https://developer.blender.org/D9632
This commit is contained in:
parent
8330e19cb2
commit
02a0f6b04a
|
@ -3694,12 +3694,20 @@ typedef struct DynamicStepSolverTaskData {
|
|||
} DynamicStepSolverTaskData;
|
||||
|
||||
static void dynamics_step_sphdata_reduce(const void *__restrict UNUSED(userdata),
|
||||
void *__restrict UNUSED(join_v),
|
||||
void *__restrict join_v,
|
||||
void *__restrict chunk_v)
|
||||
{
|
||||
SPHData *sphdata = chunk_v;
|
||||
SPHData *sphdata_to = join_v;
|
||||
SPHData *sphdata_from = chunk_v;
|
||||
|
||||
psys_sph_flush_springs(sphdata);
|
||||
if (sphdata_from->new_springs.count > 0) {
|
||||
BLI_buffer_append_array(&sphdata_to->new_springs,
|
||||
ParticleSpring,
|
||||
&BLI_buffer_at(&sphdata_from->new_springs, ParticleSpring, 0),
|
||||
sphdata_from->new_springs.count);
|
||||
}
|
||||
|
||||
BLI_buffer_field_free(&sphdata_from->new_springs);
|
||||
}
|
||||
|
||||
static void dynamics_step_sph_ddr_task_cb_ex(void *__restrict userdata,
|
||||
|
@ -4020,7 +4028,6 @@ static void dynamics_step(ParticleSimulationData *sim, float cfra)
|
|||
settings.use_threading = (psys->totpart > 100);
|
||||
settings.userdata_chunk = &sphdata;
|
||||
settings.userdata_chunk_size = sizeof(sphdata);
|
||||
settings.func_reduce = dynamics_step_sphdata_reduce;
|
||||
BLI_task_parallel_range(0,
|
||||
psys->totpart,
|
||||
&task_data,
|
||||
|
@ -4035,7 +4042,6 @@ static void dynamics_step(ParticleSimulationData *sim, float cfra)
|
|||
settings.use_threading = (psys->totpart > 100);
|
||||
settings.userdata_chunk = &sphdata;
|
||||
settings.userdata_chunk_size = sizeof(sphdata);
|
||||
settings.func_reduce = dynamics_step_sphdata_reduce;
|
||||
BLI_task_parallel_range(0,
|
||||
psys->totpart,
|
||||
&task_data,
|
||||
|
|
|
@ -77,6 +77,16 @@ void BLI_buffer_resize(BLI_Buffer *buffer, const size_t new_count);
|
|||
/* Ensure size, throwing away old data, respecting BLI_BUFFER_USE_CALLOC */
|
||||
void BLI_buffer_reinit(BLI_Buffer *buffer, const size_t new_count);
|
||||
|
||||
/* Append an array of elements. */
|
||||
void _bli_buffer_append_array(BLI_Buffer *buffer, void *data, size_t count);
|
||||
#define BLI_buffer_append_array(buffer_, type_, data_, count_) \
|
||||
{ \
|
||||
type_ *__tmp = (data_); \
|
||||
BLI_assert(sizeof(type_) == (buffer_)->elem_size); \
|
||||
_bli_buffer_append_array(buffer_, __tmp, count_); \
|
||||
} \
|
||||
(void)0
|
||||
|
||||
/* Does not free the buffer structure itself */
|
||||
void _bli_buffer_free(BLI_Buffer *buffer);
|
||||
#define BLI_buffer_free(name_) \
|
||||
|
|
|
@ -114,6 +114,16 @@ void BLI_buffer_reinit(BLI_Buffer *buffer, const size_t new_count)
|
|||
buffer->count = new_count;
|
||||
}
|
||||
|
||||
/* Callers use BLI_buffer_append_array. */
|
||||
void _bli_buffer_append_array(BLI_Buffer *buffer, void *new_data, size_t count)
|
||||
{
|
||||
size_t size = buffer->count;
|
||||
BLI_buffer_resize(buffer, size + count);
|
||||
|
||||
uint8_t *bytes = (uint8_t *)buffer->data;
|
||||
memcpy(bytes + size * buffer->elem_size, new_data, count * buffer->elem_size);
|
||||
}
|
||||
|
||||
/* callers use BLI_buffer_free */
|
||||
void _bli_buffer_free(BLI_Buffer *buffer)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue