Fix T44979: Crash when rendering with more threads than the system ones
Revert "Nodes: Remove hardcoded BLENDER_MAX_THREADS number of threads"
This reverts commit fdc653e8ce
.
The threads override is not affected by the scene, and hence the limit of the
threads was not giving correct result. Need to re-consider some things here.
This commit is contained in:
parent
d63615272c
commit
ec0ba4095f
Notes:
blender-bot
2023-02-14 09:02:40 +01:00
Referenced by issue #44979, Blender 2.75 test build crashes on render - Blender internal - 2.74 renders this scene ok
|
@ -29,7 +29,6 @@
|
|||
* \ingroup nodes
|
||||
*/
|
||||
|
||||
#include <stdlib.h> /* for abort() */
|
||||
|
||||
#include "DNA_node_types.h"
|
||||
|
||||
|
@ -263,7 +262,7 @@ bNodeThreadStack *ntreeGetThreadStack(bNodeTreeExec *exec, int thread)
|
|||
{
|
||||
ListBase *lb = &exec->threadstack[thread];
|
||||
bNodeThreadStack *nts;
|
||||
BLI_assert(thread < exec->tot_thread);
|
||||
|
||||
for (nts = lb->first; nts; nts = nts->next) {
|
||||
if (!nts->used) {
|
||||
nts->used = true;
|
||||
|
|
|
@ -65,7 +65,6 @@ typedef struct bNodeTreeExec {
|
|||
int stacksize;
|
||||
struct bNodeStack *stack; /* socket data stack */
|
||||
/* only used by material and texture trees to keep one stack for each thread */
|
||||
int tot_thread;
|
||||
ListBase *threadstack; /* one instance of the stack for each thread */
|
||||
} bNodeTreeExec;
|
||||
|
||||
|
|
|
@ -235,8 +235,7 @@ bNodeTreeExec *ntreeShaderBeginExecTree_internal(bNodeExecContext *context, bNod
|
|||
exec = ntree_exec_begin(context, ntree, parent_key);
|
||||
|
||||
/* allocate the thread stack listbase array */
|
||||
exec->tot_thread = BLI_system_thread_count();
|
||||
exec->threadstack = MEM_callocN(exec->tot_thread * sizeof(ListBase), "thread stack array");
|
||||
exec->threadstack = MEM_callocN(BLENDER_MAX_THREADS * sizeof(ListBase), "thread stack array");
|
||||
|
||||
for (node = exec->nodetree->nodes.first; node; node = node->next)
|
||||
node->need_exec = 1;
|
||||
|
@ -273,7 +272,7 @@ void ntreeShaderEndExecTree_internal(bNodeTreeExec *exec)
|
|||
int a;
|
||||
|
||||
if (exec->threadstack) {
|
||||
for (a = 0; a < exec->tot_thread; a++) {
|
||||
for (a = 0; a < BLENDER_MAX_THREADS; a++) {
|
||||
for (nts = exec->threadstack[a].first; nts; nts = nts->next)
|
||||
if (nts->stack) MEM_freeN(nts->stack);
|
||||
BLI_freelistN(&exec->threadstack[a]);
|
||||
|
|
|
@ -232,8 +232,7 @@ bNodeTreeExec *ntreeTexBeginExecTree_internal(bNodeExecContext *context, bNodeTr
|
|||
exec = ntree_exec_begin(context, ntree, parent_key);
|
||||
|
||||
/* allocate the thread stack listbase array */
|
||||
exec->tot_thread = BLI_system_thread_count();
|
||||
exec->threadstack = MEM_callocN(exec->tot_thread * sizeof(ListBase), "thread stack array");
|
||||
exec->threadstack = MEM_callocN(BLENDER_MAX_THREADS * sizeof(ListBase), "thread stack array");
|
||||
|
||||
for (node = exec->nodetree->nodes.first; node; node = node->next)
|
||||
node->need_exec = 1;
|
||||
|
@ -271,7 +270,7 @@ static void tex_free_delegates(bNodeTreeExec *exec)
|
|||
bNodeStack *ns;
|
||||
int th, a;
|
||||
|
||||
for (th = 0; th < exec->tot_thread; th++)
|
||||
for (th = 0; th < BLENDER_MAX_THREADS; th++)
|
||||
for (nts = exec->threadstack[th].first; nts; nts = nts->next)
|
||||
for (ns = nts->stack, a = 0; a < exec->stacksize; a++, ns++)
|
||||
if (ns->data && !ns->is_copy)
|
||||
|
@ -286,7 +285,7 @@ void ntreeTexEndExecTree_internal(bNodeTreeExec *exec)
|
|||
if (exec->threadstack) {
|
||||
tex_free_delegates(exec);
|
||||
|
||||
for (a = 0; a < exec->tot_thread; a++) {
|
||||
for (a = 0; a < BLENDER_MAX_THREADS; a++) {
|
||||
for (nts = exec->threadstack[a].first; nts; nts = nts->next)
|
||||
if (nts->stack) MEM_freeN(nts->stack);
|
||||
BLI_freelistN(&exec->threadstack[a]);
|
||||
|
|
Loading…
Reference in New Issue