Functions: speedup multi-function procedure executor

This improves performance of the procedure executor on secondary metrics
(i.e. not for the main use case when many elements are processed together,
but for the use case when a single element is processed at a time).

In my benchmark I'm measuring a 50-60% improvement:
* Procedure with a single function (executed many times): `5.8s -> 2.7s`.
* Procedure with 1000 functions (executed many times): `2.4 -> 1.0s`.

The speedup is mainly achieved in multiple ways:
* Store an `Array` of variable states, instead of a map. The array is indexed
  with indices stored in each variable. This also avoids separately allocating
  variable states.
* Move less data around in the scheduler and use a `Stack` instead of `Map`.
  `Map` was used before because it allows for some optimizations that might
  be more important in the future, but they don't matter right now (e.g. joining
  execution paths that diverged earlier).
* Avoid memory allocations by giving the `LinearAllocator` some memory
  from the stack.
This commit is contained in:
Jacques Lucke 2022-06-19 14:25:21 +02:00
parent 575884b827
commit d48735cca2
3 changed files with 193 additions and 191 deletions

View File

@ -87,7 +87,7 @@ class MFVariable : NonCopyable, NonMovable {
MFDataType data_type_;
Vector<MFInstruction *> users_;
std::string name_;
int id_;
int index_in_graph_;
friend MFProcedure;
friend MFCallInstruction;
@ -101,7 +101,7 @@ class MFVariable : NonCopyable, NonMovable {
StringRefNull name() const;
void set_name(std::string name);
int id() const;
int index_in_procedure() const;
};
/** Base class for all instruction types. */
@ -376,9 +376,9 @@ inline StringRefNull MFVariable::name() const
return name_;
}
inline int MFVariable::id() const
inline int MFVariable::index_in_procedure() const
{
return id_;
return index_in_graph_;
}
/** \} */

View File

@ -173,7 +173,7 @@ MFVariable &MFProcedure::new_variable(MFDataType data_type, std::string name)
MFVariable &variable = *allocator_.construct<MFVariable>().release();
variable.name_ = std::move(name);
variable.data_type_ = data_type;
variable.id_ = variables_.size();
variable.index_in_graph_ = variables_.size();
variables_.append(&variable);
return variable;
}
@ -753,7 +753,7 @@ class MFProcedureDotExport {
ss << "null";
}
else {
ss << "$" << variable->id();
ss << "$" << variable->index_in_procedure();
if (!variable->name().is_empty()) {
ss << "(" << variable->name() << ")";
}

View File

@ -157,10 +157,6 @@ class ValueAllocator : NonCopyable, NonMovable {
{
}
template<typename... Args> VariableState *obtain_variable_state(Args &&...args);
void release_variable_state(VariableState *state);
VariableValue_GVArray *obtain_GVArray(const GVArray &varray)
{
return this->obtain<VariableValue_GVArray>(varray);
@ -294,32 +290,27 @@ class ValueAllocator : NonCopyable, NonMovable {
* This class keeps track of a single variable during evaluation.
*/
class VariableState : NonCopyable, NonMovable {
private:
public:
/** The current value of the variable. The storage format may change over time. */
VariableValue *value_;
VariableValue *value_ = nullptr;
/** Number of indices that are currently initialized in this variable. */
int tot_initialized_;
int tot_initialized_ = 0;
/* This a non-owning pointer to either span buffer or #GVectorArray or null. */
void *caller_provided_storage_ = nullptr;
public:
VariableState(VariableValue &value, int tot_initialized, void *caller_provided_storage = nullptr)
: value_(&value),
tot_initialized_(tot_initialized),
caller_provided_storage_(caller_provided_storage)
{
}
void destruct_self(ValueAllocator &value_allocator, const MFDataType &data_type)
void destruct_value(ValueAllocator &value_allocator, const MFDataType &data_type)
{
value_allocator.release_value(value_, data_type);
value_allocator.release_variable_state(this);
value_ = nullptr;
}
/* True if this contains only one value for all indices, i.e. the value for all indices is
* the same. */
bool is_one() const
{
if (value_ == nullptr) {
return true;
}
switch (value_->type) {
case ValueType::GVArray:
return this->value_as<VariableValue_GVArray>()->data.is_single();
@ -353,6 +344,7 @@ class VariableState : NonCopyable, NonMovable {
{
/* Sanity check to make sure that enough values are initialized. */
BLI_assert(mask.size() <= tot_initialized_);
BLI_assert(value_ != nullptr);
switch (value_->type) {
case ValueType::GVArray: {
@ -391,7 +383,7 @@ class VariableState : NonCopyable, NonMovable {
const MFDataType &data_type,
ValueAllocator &value_allocator)
{
if (ELEM(value_->type, ValueType::Span, ValueType::GVectorArray)) {
if (value_ != nullptr && ELEM(value_->type, ValueType::Span, ValueType::GVectorArray)) {
return;
}
@ -408,22 +400,24 @@ class VariableState : NonCopyable, NonMovable {
/* Reuse the storage provided caller when possible. */
new_value = value_allocator.obtain_Span_not_owned(caller_provided_storage_);
}
if (value_->type == ValueType::GVArray) {
/* Fill new buffer with data from virtual array. */
this->value_as<VariableValue_GVArray>()->data.materialize_to_uninitialized(
full_mask, new_value->data);
}
else if (value_->type == ValueType::OneSingle) {
auto *old_value_typed_ = this->value_as<VariableValue_OneSingle>();
if (old_value_typed_->is_initialized) {
/* Fill the buffer with a single value. */
type.fill_construct_indices(old_value_typed_->data, new_value->data, full_mask);
if (value_ != nullptr) {
if (value_->type == ValueType::GVArray) {
/* Fill new buffer with data from virtual array. */
this->value_as<VariableValue_GVArray>()->data.materialize_to_uninitialized(
full_mask, new_value->data);
}
else if (value_->type == ValueType::OneSingle) {
auto *old_value_typed_ = this->value_as<VariableValue_OneSingle>();
if (old_value_typed_->is_initialized) {
/* Fill the buffer with a single value. */
type.fill_construct_indices(old_value_typed_->data, new_value->data, full_mask);
}
}
else {
BLI_assert_unreachable();
}
value_allocator.release_value(value_, data_type);
}
else {
BLI_assert_unreachable();
}
value_allocator.release_value(value_, data_type);
value_ = new_value;
break;
}
@ -437,19 +431,21 @@ class VariableState : NonCopyable, NonMovable {
new_value = value_allocator.obtain_GVectorArray_not_owned(
*(GVectorArray *)caller_provided_storage_);
}
if (value_->type == ValueType::GVVectorArray) {
/* Fill new vector array with data from virtual vector array. */
new_value->data.extend(full_mask, this->value_as<VariableValue_GVVectorArray>()->data);
if (value_ != nullptr) {
if (value_->type == ValueType::GVVectorArray) {
/* Fill new vector array with data from virtual vector array. */
new_value->data.extend(full_mask, this->value_as<VariableValue_GVVectorArray>()->data);
}
else if (value_->type == ValueType::OneVector) {
/* Fill all indices with the same value. */
const GSpan vector = this->value_as<VariableValue_OneVector>()->data[0];
new_value->data.extend(full_mask, GVVectorArray_For_SingleGSpan{vector, array_size});
}
else {
BLI_assert_unreachable();
}
value_allocator.release_value(value_, data_type);
}
else if (value_->type == ValueType::OneVector) {
/* Fill all indices with the same value. */
const GSpan vector = this->value_as<VariableValue_OneVector>()->data[0];
new_value->data.extend(full_mask, GVVectorArray_For_SingleGSpan{vector, array_size});
}
else {
BLI_assert_unreachable();
}
value_allocator.release_value(value_, data_type);
value_ = new_value;
break;
}
@ -466,6 +462,7 @@ class VariableState : NonCopyable, NonMovable {
BLI_assert(mask.size() <= tot_initialized_);
this->ensure_is_mutable(full_mask, data_type, value_allocator);
BLI_assert(value_ != nullptr);
switch (value_->type) {
case ValueType::Span: {
@ -497,6 +494,7 @@ class VariableState : NonCopyable, NonMovable {
/* Sanity check to make sure that enough values are not initialized. */
BLI_assert(mask.size() <= full_mask.size() - tot_initialized_);
this->ensure_is_mutable(full_mask, data_type, value_allocator);
BLI_assert(value_ != nullptr);
switch (value_->type) {
case ValueType::Span: {
@ -524,6 +522,7 @@ class VariableState : NonCopyable, NonMovable {
void add_as_input__one(MFParamsBuilder &params, const MFDataType &data_type) const
{
BLI_assert(this->is_one());
BLI_assert(value_ != nullptr);
switch (value_->type) {
case ValueType::GVArray: {
@ -556,7 +555,7 @@ class VariableState : NonCopyable, NonMovable {
void ensure_is_mutable__one(const MFDataType &data_type, ValueAllocator &value_allocator)
{
BLI_assert(this->is_one());
if (ELEM(value_->type, ValueType::OneSingle, ValueType::OneVector)) {
if (value_ != nullptr && ELEM(value_->type, ValueType::OneSingle, ValueType::OneVector)) {
return;
}
@ -564,38 +563,42 @@ class VariableState : NonCopyable, NonMovable {
case MFDataType::Single: {
const CPPType &type = data_type.single_type();
VariableValue_OneSingle *new_value = value_allocator.obtain_OneSingle(type);
if (value_->type == ValueType::GVArray) {
this->value_as<VariableValue_GVArray>()->data.get_internal_single_to_uninitialized(
new_value->data);
new_value->is_initialized = true;
if (value_ != nullptr) {
if (value_->type == ValueType::GVArray) {
this->value_as<VariableValue_GVArray>()->data.get_internal_single_to_uninitialized(
new_value->data);
new_value->is_initialized = true;
}
else if (value_->type == ValueType::Span) {
BLI_assert(tot_initialized_ == 0);
/* Nothing to do, the single value is uninitialized already. */
}
else {
BLI_assert_unreachable();
}
value_allocator.release_value(value_, data_type);
}
else if (value_->type == ValueType::Span) {
BLI_assert(tot_initialized_ == 0);
/* Nothing to do, the single value is uninitialized already. */
}
else {
BLI_assert_unreachable();
}
value_allocator.release_value(value_, data_type);
value_ = new_value;
break;
}
case MFDataType::Vector: {
const CPPType &type = data_type.vector_base_type();
VariableValue_OneVector *new_value = value_allocator.obtain_OneVector(type);
if (value_->type == ValueType::GVVectorArray) {
const GVVectorArray &old_vector_array =
this->value_as<VariableValue_GVVectorArray>()->data;
new_value->data.extend(IndexRange(1), old_vector_array);
if (value_ != nullptr) {
if (value_->type == ValueType::GVVectorArray) {
const GVVectorArray &old_vector_array =
this->value_as<VariableValue_GVVectorArray>()->data;
new_value->data.extend(IndexRange(1), old_vector_array);
}
else if (value_->type == ValueType::GVectorArray) {
BLI_assert(tot_initialized_ == 0);
/* Nothing to do. */
}
else {
BLI_assert_unreachable();
}
value_allocator.release_value(value_, data_type);
}
else if (value_->type == ValueType::GVectorArray) {
BLI_assert(tot_initialized_ == 0);
/* Nothing to do. */
}
else {
BLI_assert_unreachable();
}
value_allocator.release_value(value_, data_type);
value_ = new_value;
break;
}
@ -608,6 +611,7 @@ class VariableState : NonCopyable, NonMovable {
{
BLI_assert(this->is_one());
this->ensure_is_mutable__one(data_type, value_allocator);
BLI_assert(value_ != nullptr);
switch (value_->type) {
case ValueType::OneSingle: {
@ -637,6 +641,7 @@ class VariableState : NonCopyable, NonMovable {
{
BLI_assert(this->is_one());
this->ensure_is_mutable__one(data_type, value_allocator);
BLI_assert(value_ != nullptr);
switch (value_->type) {
case ValueType::OneSingle: {
@ -676,6 +681,7 @@ class VariableState : NonCopyable, NonMovable {
const MFDataType &data_type,
ValueAllocator &value_allocator)
{
BLI_assert(value_ != nullptr);
int new_tot_initialized = tot_initialized_ - mask.size();
/* Sanity check to make sure that enough indices can be destructed. */
@ -743,6 +749,7 @@ class VariableState : NonCopyable, NonMovable {
void indices_split(IndexMask mask, IndicesSplitVectors &r_indices)
{
BLI_assert(mask.size() <= tot_initialized_);
BLI_assert(value_ != nullptr);
switch (value_->type) {
case ValueType::GVArray: {
@ -778,51 +785,47 @@ class VariableState : NonCopyable, NonMovable {
template<typename T> T *value_as()
{
BLI_assert(value_ != nullptr);
BLI_assert(value_->type == T::static_type);
return static_cast<T *>(value_);
}
template<typename T> const T *value_as() const
{
BLI_assert(value_ != nullptr);
BLI_assert(value_->type == T::static_type);
return static_cast<T *>(value_);
}
};
template<typename... Args> VariableState *ValueAllocator::obtain_variable_state(Args &&...args)
{
if (variable_state_free_list_.is_empty()) {
void *buffer = linear_allocator_.allocate(sizeof(VariableState), alignof(VariableState));
return new (buffer) VariableState(std::forward<Args>(args)...);
}
return new (variable_state_free_list_.pop()) VariableState(std::forward<Args>(args)...);
}
void ValueAllocator::release_variable_state(VariableState *state)
{
state->~VariableState();
variable_state_free_list_.push(state);
}
/** Keeps track of the states of all variables during evaluation. */
class VariableStates {
private:
ValueAllocator value_allocator_;
Map<const MFVariable *, VariableState *> variable_states_;
const MFProcedure &procedure_;
/** The state of every variable, indexed by #MFVariable::index_in_procedure(). */
Array<VariableState> variable_states_;
IndexMask full_mask_;
public:
VariableStates(LinearAllocator<> &linear_allocator, IndexMask full_mask)
: value_allocator_(linear_allocator), full_mask_(full_mask)
VariableStates(LinearAllocator<> &linear_allocator,
const MFProcedure &procedure,
IndexMask full_mask)
: value_allocator_(linear_allocator),
procedure_(procedure),
variable_states_(procedure.variables().size()),
full_mask_(full_mask)
{
}
~VariableStates()
{
for (auto &&item : variable_states_.items()) {
const MFVariable *variable = item.key;
VariableState *state = item.value;
state->destruct_self(value_allocator_, variable->data_type());
for (const int variable_i : procedure_.variables().index_range()) {
VariableState &state = variable_states_[variable_i];
if (state.value_ != nullptr) {
const MFVariable *variable = procedure_.variables()[variable_i];
state.destruct_value(value_allocator_, variable->data_type());
}
}
}
@ -848,9 +851,12 @@ class VariableStates {
bool input_is_initialized,
void *caller_provided_storage = nullptr) {
const int tot_initialized = input_is_initialized ? full_mask_.size() : 0;
variable_states_.add_new(variable,
value_allocator_.obtain_variable_state(
*value, tot_initialized, caller_provided_storage));
const int variable_i = variable->index_in_procedure();
VariableState &variable_state = variable_states_[variable_i];
BLI_assert(variable_state.value_ == nullptr);
variable_state.value_ = value;
variable_state.tot_initialized_ = tot_initialized;
variable_state.caller_provided_storage_ = caller_provided_storage;
};
switch (param_type.category()) {
@ -936,32 +942,15 @@ class VariableStates {
{
VariableState &variable_state = this->get_variable_state(variable);
if (variable_state.destruct(mask, full_mask_, variable.data_type(), value_allocator_)) {
variable_state.destruct_self(value_allocator_, variable.data_type());
variable_states_.remove_contained(&variable);
variable_state.destruct_value(value_allocator_, variable.data_type());
}
}
VariableState &get_variable_state(const MFVariable &variable)
{
return *variable_states_.lookup_or_add_cb(
&variable, [&]() { return this->create_new_state_for_variable(variable); });
}
VariableState *create_new_state_for_variable(const MFVariable &variable)
{
MFDataType data_type = variable.data_type();
switch (data_type.category()) {
case MFDataType::Single: {
const CPPType &type = data_type.single_type();
return value_allocator_.obtain_variable_state(*value_allocator_.obtain_OneSingle(type), 0);
}
case MFDataType::Vector: {
const CPPType &type = data_type.vector_base_type();
return value_allocator_.obtain_variable_state(*value_allocator_.obtain_OneVector(type), 0);
}
}
BLI_assert_unreachable();
return nullptr;
const int variable_i = variable.index_in_procedure();
VariableState &variable_state = variable_states_[variable_i];
return variable_state;
}
};
@ -977,15 +966,68 @@ static bool evaluate_as_one(const MultiFunction &fn,
return false;
}
for (VariableState *state : param_variable_states) {
if (state != nullptr && !state->is_one()) {
if (state != nullptr && state->value_ != nullptr && !state->is_one()) {
return false;
}
}
return true;
}
static void gather_parameter_variable_states(const MultiFunction &fn,
const MFCallInstruction &instruction,
VariableStates &variable_states,
MutableSpan<VariableState *> r_param_variable_states)
{
for (const int param_index : fn.param_indices()) {
const MFVariable *variable = instruction.params()[param_index];
if (variable == nullptr) {
r_param_variable_states[param_index] = nullptr;
}
else {
VariableState &variable_state = variable_states.get_variable_state(*variable);
r_param_variable_states[param_index] = &variable_state;
}
}
}
static void fill_params__one(const MultiFunction &fn,
const IndexMask mask,
MFParamsBuilder &params,
VariableStates &variable_states,
const Span<VariableState *> param_variable_states)
{
for (const int param_index : fn.param_indices()) {
const MFParamType param_type = fn.param_type(param_index);
VariableState *variable_state = param_variable_states[param_index];
if (variable_state == nullptr) {
params.add_ignored_single_output();
}
else {
variable_states.add_as_param__one(*variable_state, params, param_type, mask);
}
}
}
static void fill_params(const MultiFunction &fn,
const IndexMask mask,
MFParamsBuilder &params,
VariableStates &variable_states,
const Span<VariableState *> param_variable_states)
{
for (const int param_index : fn.param_indices()) {
const MFParamType param_type = fn.param_type(param_index);
VariableState *variable_state = param_variable_states[param_index];
if (variable_state == nullptr) {
params.add_ignored_single_output();
}
else {
variable_states.add_as_param(*variable_state, params, param_type, mask);
}
}
}
static void execute_call_instruction(const MFCallInstruction &instruction,
IndexMask mask,
const IndexMask mask,
VariableStates &variable_states,
const MFContext &context)
{
@ -993,33 +1035,13 @@ static void execute_call_instruction(const MFCallInstruction &instruction,
Vector<VariableState *> param_variable_states;
param_variable_states.resize(fn.param_amount());
for (const int param_index : fn.param_indices()) {
const MFVariable *variable = instruction.params()[param_index];
if (variable == nullptr) {
param_variable_states[param_index] = nullptr;
}
else {
VariableState &variable_state = variable_states.get_variable_state(*variable);
param_variable_states[param_index] = &variable_state;
}
}
gather_parameter_variable_states(fn, instruction, variable_states, param_variable_states);
/* If all inputs to the function are constant, it's enough to call the function only once instead
* of for every index. */
if (evaluate_as_one(fn, param_variable_states, mask, variable_states.full_mask())) {
MFParamsBuilder params(fn, 1);
for (const int param_index : fn.param_indices()) {
const MFParamType param_type = fn.param_type(param_index);
VariableState *variable_state = param_variable_states[param_index];
if (variable_state == nullptr) {
params.add_ignored_single_output();
}
else {
variable_states.add_as_param__one(*variable_state, params, param_type, mask);
}
}
fill_params__one(fn, mask, params, variable_states, param_variable_states);
try {
fn.call(IndexRange(1), params, context);
@ -1031,17 +1053,7 @@ static void execute_call_instruction(const MFCallInstruction &instruction,
}
else {
MFParamsBuilder params(fn, &mask);
for (const int param_index : fn.param_indices()) {
const MFParamType param_type = fn.param_type(param_index);
VariableState *variable_state = param_variable_states[param_index];
if (variable_state == nullptr) {
params.add_ignored_single_output();
}
else {
variable_states.add_as_param(*variable_state, params, param_type, mask);
}
}
fill_params(fn, mask, params, variable_states, param_variable_states);
try {
fn.call_auto(mask, params, context);
@ -1090,7 +1102,7 @@ struct NextInstructionInfo {
*/
class InstructionScheduler {
private:
Map<const MFInstruction *, Vector<InstructionIndices>> indices_by_instruction_;
Stack<NextInstructionInfo> next_instructions_;
public:
InstructionScheduler() = default;
@ -1103,7 +1115,7 @@ class InstructionScheduler {
InstructionIndices new_indices;
new_indices.is_owned = false;
new_indices.referenced_indices = mask;
indices_by_instruction_.lookup_or_add_default(&instruction).append(std::move(new_indices));
next_instructions_.push({&instruction, std::move(new_indices)});
}
void add_owned_indices(const MFInstruction &instruction, Vector<int64_t> indices)
@ -1116,43 +1128,28 @@ class InstructionScheduler {
InstructionIndices new_indices;
new_indices.is_owned = true;
new_indices.owned_indices = std::move(indices);
indices_by_instruction_.lookup_or_add_default(&instruction).append(std::move(new_indices));
next_instructions_.push({&instruction, std::move(new_indices)});
}
void add_previous_instruction_indices(const MFInstruction &instruction,
NextInstructionInfo &instr_info)
bool is_done() const
{
indices_by_instruction_.lookup_or_add_default(&instruction)
.append(std::move(instr_info.indices));
return next_instructions_.is_empty();
}
NextInstructionInfo pop_next()
const NextInstructionInfo &peek() const
{
if (indices_by_instruction_.is_empty()) {
return {};
}
/* TODO: Implement better mechanism to determine next instruction. */
const MFInstruction *instruction = *indices_by_instruction_.keys().begin();
NextInstructionInfo next_instruction_info;
next_instruction_info.instruction = instruction;
next_instruction_info.indices = this->pop_indices_array(instruction);
return next_instruction_info;
BLI_assert(!this->is_done());
return next_instructions_.peek();
}
private:
InstructionIndices pop_indices_array(const MFInstruction *instruction)
void update_instruction_pointer(const MFInstruction &instruction)
{
Vector<InstructionIndices> *indices = indices_by_instruction_.lookup_ptr(instruction);
if (indices == nullptr) {
return {};
}
InstructionIndices r_indices = (*indices).pop_last();
BLI_assert(!r_indices.mask().is_empty());
if (indices->is_empty()) {
indices_by_instruction_.remove_contained(instruction);
}
return r_indices;
next_instructions_.peek().instruction = &instruction;
}
NextInstructionInfo pop()
{
return next_instructions_.pop();
}
};
@ -1160,23 +1157,26 @@ void MFProcedureExecutor::call(IndexMask full_mask, MFParams params, MFContext c
{
BLI_assert(procedure_.validate());
AlignedBuffer<512, 64> local_buffer;
LinearAllocator<> linear_allocator;
linear_allocator.provide_buffer(local_buffer);
VariableStates variable_states{linear_allocator, full_mask};
VariableStates variable_states{linear_allocator, procedure_, full_mask};
variable_states.add_initial_variable_states(*this, procedure_, params);
InstructionScheduler scheduler;
scheduler.add_referenced_indices(*procedure_.entry(), full_mask);
/* Loop until all indices got to a return instruction. */
while (NextInstructionInfo instr_info = scheduler.pop_next()) {
while (!scheduler.is_done()) {
const NextInstructionInfo &instr_info = scheduler.peek();
const MFInstruction &instruction = *instr_info.instruction;
switch (instruction.type()) {
case MFInstructionType::Call: {
const MFCallInstruction &call_instruction = static_cast<const MFCallInstruction &>(
instruction);
execute_call_instruction(call_instruction, instr_info.mask(), variable_states, context);
scheduler.add_previous_instruction_indices(*call_instruction.next(), instr_info);
scheduler.update_instruction_pointer(*call_instruction.next());
break;
}
case MFInstructionType::Branch: {
@ -1187,6 +1187,7 @@ void MFProcedureExecutor::call(IndexMask full_mask, MFParams params, MFContext c
IndicesSplitVectors new_indices;
variable_state.indices_split(instr_info.mask(), new_indices);
scheduler.pop();
scheduler.add_owned_indices(*branch_instruction.branch_false(), new_indices[false]);
scheduler.add_owned_indices(*branch_instruction.branch_true(), new_indices[true]);
break;
@ -1196,17 +1197,18 @@ void MFProcedureExecutor::call(IndexMask full_mask, MFParams params, MFContext c
static_cast<const MFDestructInstruction &>(instruction);
const MFVariable *variable = destruct_instruction.variable();
variable_states.destruct(*variable, instr_info.mask());
scheduler.add_previous_instruction_indices(*destruct_instruction.next(), instr_info);
scheduler.update_instruction_pointer(*destruct_instruction.next());
break;
}
case MFInstructionType::Dummy: {
const MFDummyInstruction &dummy_instruction = static_cast<const MFDummyInstruction &>(
instruction);
scheduler.add_previous_instruction_indices(*dummy_instruction.next(), instr_info);
scheduler.update_instruction_pointer(*dummy_instruction.next());
break;
}
case MFInstructionType::Return: {
/* Don't insert the indices back into the scheduler. */
scheduler.pop();
break;
}
}