Merge branch 'master' into sculpt-dev

This commit is contained in:
Pablo Dobarro 2021-06-16 21:01:43 +02:00
commit 59f0811ac9
170 changed files with 2741 additions and 1053 deletions

View File

@ -388,6 +388,10 @@ endif()
if(WITH_TBB)
find_package(TBB)
if(NOT TBB_FOUND)
message(WARNING "TBB not found, disabling WITH_TBB")
set(WITH_TBB OFF)
endif()
endif()
if(WITH_POTRACE)

View File

@ -457,6 +457,10 @@ endif()
if(WITH_TBB)
find_package_wrapper(TBB)
if(NOT TBB_FOUND)
message(WARNING "TBB not found, disabling WITH_TBB")
set(WITH_TBB OFF)
endif()
endif()
if(WITH_XR_OPENXR)

View File

@ -289,11 +289,10 @@ static PyObject *render_func(PyObject * /*self*/, PyObject *args)
RNA_pointer_create(NULL, &RNA_Depsgraph, (ID *)PyLong_AsVoidPtr(pydepsgraph), &depsgraphptr);
BL::Depsgraph b_depsgraph(depsgraphptr);
/* Allow Blender to execute other Python scripts, and isolate TBB tasks so we
* don't get deadlocks with Blender threads accessing shared data like images. */
/* Allow Blender to execute other Python scripts. */
python_thread_state_save(&session->python_thread_state);
tbb::this_task_arena::isolate([&] { session->render(b_depsgraph); });
session->render(b_depsgraph);
python_thread_state_restore(&session->python_thread_state);
@ -330,8 +329,7 @@ static PyObject *bake_func(PyObject * /*self*/, PyObject *args)
python_thread_state_save(&session->python_thread_state);
tbb::this_task_arena::isolate(
[&] { session->bake(b_depsgraph, b_object, pass_type, pass_filter, width, height); });
session->bake(b_depsgraph, b_object, pass_type, pass_filter, width, height);
python_thread_state_restore(&session->python_thread_state);
@ -377,7 +375,7 @@ static PyObject *reset_func(PyObject * /*self*/, PyObject *args)
python_thread_state_save(&session->python_thread_state);
tbb::this_task_arena::isolate([&] { session->reset_session(b_data, b_depsgraph); });
session->reset_session(b_data, b_depsgraph);
python_thread_state_restore(&session->python_thread_state);
@ -399,7 +397,7 @@ static PyObject *sync_func(PyObject * /*self*/, PyObject *args)
python_thread_state_save(&session->python_thread_state);
tbb::this_task_arena::isolate([&] { session->synchronize(b_depsgraph); });
session->synchronize(b_depsgraph);
python_thread_state_restore(&session->python_thread_state);

View File

@ -736,13 +736,14 @@ static void process_uvs(CachedData &cache,
const IV2fGeomParam::Sample &sample,
double time)
{
if (scope != kFacevaryingScope) {
if (scope != kFacevaryingScope && scope != kVaryingScope && scope != kVertexScope) {
return;
}
const array<int> *uv_loops = cache.uv_loops.data_for_time_no_check(time).get_data_or_null();
if (!uv_loops) {
/* It's ok to not have loop indices, as long as the scope is not face-varying. */
if (!uv_loops && scope == kFacevaryingScope) {
return;
}
@ -766,9 +767,27 @@ static void process_uvs(CachedData &cache,
const uint32_t *indices = sample.getIndices()->get();
const V2f *values = sample.getVals()->get();
for (const int uv_loop_index : *uv_loops) {
const uint32_t index = indices[uv_loop_index];
*data_float2++ = make_float2(values[index][0], values[index][1]);
if (scope == kFacevaryingScope) {
for (const int uv_loop_index : *uv_loops) {
const uint32_t index = indices[uv_loop_index];
*data_float2++ = make_float2(values[index][0], values[index][1]);
}
}
else if (scope == kVaryingScope || scope == kVertexScope) {
if (triangles) {
for (size_t i = 0; i < triangles->size(); i++) {
const int3 t = (*triangles)[i];
*data_float2++ = make_float2(values[t.x][0], values[t.x][1]);
*data_float2++ = make_float2(values[t.y][0], values[t.y][1]);
*data_float2++ = make_float2(values[t.z][0], values[t.z][1]);
}
}
else if (corners) {
for (size_t i = 0; i < corners->size(); i++) {
const int c = (*corners)[i];
*data_float2++ = make_float2(values[c][0], values[c][1]);
}
}
}
attribute.data.add_data(data, time);

View File

@ -139,7 +139,6 @@ static void initRawInput()
#undef DEVICE_COUNT
}
typedef HRESULT(API *GHOST_WIN32_SetProcessDpiAwareness)(PROCESS_DPI_AWARENESS);
typedef BOOL(API *GHOST_WIN32_EnableNonClientDpiScaling)(HWND);
GHOST_SystemWin32::GHOST_SystemWin32()

View File

@ -55,9 +55,6 @@ typedef BOOL(API *GHOST_WIN32_WTOverlap)(HCTX, BOOL);
// typedefs for user32 functions to allow dynamic loading of Windows 10 DPI scaling functions
typedef UINT(API *GHOST_WIN32_GetDpiForWindow)(HWND);
#ifndef USER_DEFAULT_SCREEN_DPI
# define USER_DEFAULT_SCREEN_DPI 96
#endif // USER_DEFAULT_SCREEN_DPI
struct GHOST_PointerInfoWin32 {
GHOST_TInt32 pointerId;

View File

@ -1572,7 +1572,7 @@ class I18n:
if not os.path.isfile(dst):
print("WARNING: trying to write as python code into {}, which is not a file! Aborting.".format(dst))
return
prev, txt, nxt, has_trans = self._parser_check_file(dst)
prev, txt, nxt, _has_trans = self._parser_check_file(dst)
if prev is None and nxt is None:
print("WARNING: Looks like given python file {} has no auto-generated translations yet, will be added "
"at the end of the file, you can move that section later if needed...".format(dst))

View File

@ -71,7 +71,7 @@ def rtl_process_po(args, settings):
po.write(kind="PO", dest=args.dst)
def language_menu(args, settings):
def language_menu(_args, settings):
# 'DEFAULT' and en_US are always valid, fully-translated "languages"!
stats = {"DEFAULT": 1.0, "en_US": 1.0}

View File

@ -84,10 +84,10 @@ def protect_format_seq(msg):
# LRM = "\u200E"
# RLM = "\u200F"
LRE = "\u202A"
RLE = "\u202B"
# RLE = "\u202B"
PDF = "\u202C"
LRO = "\u202D"
RLO = "\u202E"
# RLO = "\u202E"
# uctrl = {LRE, RLE, PDF, LRO, RLO}
# Most likely incomplete, but seems to cover current needs.
format_codes = set("tslfd")

View File

@ -240,7 +240,7 @@ def RKS_GEN_custom_props(_ksi, _context, ks, data):
prop_path = '["%s"]' % bpy.utils.escape_identifier(cprop_name)
try:
rna_property = data.path_resolve(prop_path, False)
except ValueError as ex:
except ValueError:
# This happens when a custom property is set to None. In that case it cannot
# be converted to an FCurve-compatible value, so we can't keyframe it anyway.
continue

View File

@ -23,7 +23,7 @@ from bpy.app.handlers import persistent
@persistent
def load_handler(dummy):
def load_handler(_):
import bpy
# 2D Animation

View File

@ -21,7 +21,7 @@ from bpy.app.handlers import persistent
@persistent
def load_handler(dummy):
def load_handler(_):
import bpy
# Apply subdivision modifier on startup
bpy.ops.object.mode_set(mode='OBJECT')

View File

@ -21,7 +21,7 @@ from bpy.app.handlers import persistent
@persistent
def load_handler(dummy):
def load_handler(_):
from bpy import context
screen = context.screen
for area in screen.areas:

View File

@ -331,14 +331,15 @@ class NODE_OT_active_preview_toggle(Operator):
active_node = ntree.nodes.active
if active_node.active_preview:
self.disable_preview(context, ntree, active_node)
self._disable_preview(context, active_node)
else:
self.enable_preview(context, node_editor, ntree, active_node)
self._enable_preview(context, node_editor, ntree, active_node)
return {'FINISHED'}
def enable_preview(self, context, node_editor, ntree, active_node):
spreadsheets = self.find_unpinned_spreadsheets(context)
@classmethod
def _enable_preview(cls, context, node_editor, ntree, active_node):
spreadsheets = cls._find_unpinned_spreadsheets(context)
for spreadsheet in spreadsheets:
spreadsheet.set_geometry_node_context(node_editor, active_node)
@ -347,14 +348,16 @@ class NODE_OT_active_preview_toggle(Operator):
node.active_preview = False
active_node.active_preview = True
def disable_preview(self, context, ntree, active_node):
spreadsheets = self.find_unpinned_spreadsheets(context)
@classmethod
def _disable_preview(cls, context, active_node):
spreadsheets = cls._find_unpinned_spreadsheets(context)
for spreadsheet in spreadsheets:
spreadsheet.context_path.clear()
active_node.active_preview = False
def find_unpinned_spreadsheets(self, context):
@staticmethod
def _find_unpinned_spreadsheets(context):
spreadsheets = []
for window in context.window_manager.windows:
for area in window.screen.areas:

View File

@ -594,9 +594,9 @@ class DOPESHEET_MT_delete(Menu):
class DOPESHEET_MT_context_menu(Menu):
bl_label = "Dope Sheet Context Menu"
def draw(self, _context):
def draw(self, context):
layout = self.layout
st = _context.space_data
st = context.space_data
layout.operator_context = 'INVOKE_DEFAULT'

View File

@ -914,6 +914,12 @@ class IMAGE_PT_active_mask_point(MASK_PT_point, Panel):
bl_category = "Mask"
class IMAGE_PT_mask_display(MASK_PT_display, Panel):
bl_space_type = 'IMAGE_EDITOR'
bl_region_type = 'UI'
bl_category = "Mask"
# --- end mask ---
class IMAGE_PT_snapping(Panel):
@ -1616,6 +1622,7 @@ classes = (
IMAGE_PT_active_tool,
IMAGE_PT_mask,
IMAGE_PT_mask_layers,
IMAGE_PT_mask_display,
IMAGE_PT_active_mask_spline,
IMAGE_PT_active_mask_point,
IMAGE_PT_snapping,

View File

@ -614,9 +614,7 @@ class USERPREF_PT_system_os_settings(SystemPanel, CenterAlignMixIn, Panel):
import sys
return sys.platform[:3] == "win"
def draw_centered(self, context, layout):
prefs = context.preferences
def draw_centered(self, _context, layout):
layout.label(text="Make this installation your default Blender")
split = layout.split(factor=0.4)
split.alignment = 'RIGHT'

View File

@ -4025,7 +4025,7 @@ class VIEW3D_MT_edit_mesh_vertices(Menu):
class VIEW3D_MT_edit_mesh_edges_data(Menu):
bl_label = "Edge Data"
def draw(self, context):
def draw(self, _context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'

View File

@ -532,7 +532,7 @@ class WholeCharacterMixin:
prop_path = '["%s"]' % bpy.utils.escape_identifier(prop)
try:
rna_property = bone.path_resolve(prop_path, False)
except ValueError as ex:
except ValueError:
# This happens when a custom property is set to None. In that case it cannot
# be converted to an FCurve-compatible value, so we can't keyframe it anyway.
continue
@ -582,7 +582,7 @@ class BUILTIN_KSI_DeltaLocation(KeyingSetInfo):
iterator = keyingsets_utils.RKS_ITER_selected_objects
# generator - delta location channels only
def generate(self, context, ks, data):
def generate(self, _context, ks, data):
# get id-block and path info
id_block, base_path, grouping = keyingsets_utils.get_transform_generators_base_info(data)
@ -608,7 +608,7 @@ class BUILTIN_KSI_DeltaRotation(KeyingSetInfo):
iterator = keyingsets_utils.RKS_ITER_selected_objects
# generator - delta location channels only
def generate(self, context, ks, data):
def generate(self, _context, ks, data):
# get id-block and path info
id_block, base_path, grouping = keyingsets_utils.get_transform_generators_base_info(data)
@ -642,7 +642,7 @@ class BUILTIN_KSI_DeltaScale(KeyingSetInfo):
iterator = keyingsets_utils.RKS_ITER_selected_objects
# generator - delta location channels only
def generate(self, context, ks, data):
def generate(self, _context, ks, data):
# get id-block and path info
id_block, base_path, grouping = keyingsets_utils.get_transform_generators_base_info(data)

View File

@ -66,7 +66,7 @@ class GeometryNodeCategory(SortedNodeCategory):
# menu entry for node group tools
def group_tools_draw(self, layout, context):
def group_tools_draw(self, layout, _context):
layout.operator("node.group_make")
layout.operator("node.group_ungroup")
layout.separator()
@ -514,6 +514,7 @@ geometry_node_categories = [
NodeItem("GeometryNodeDeleteGeometry"),
NodeItem("GeometryNodeTransform"),
NodeItem("GeometryNodeJoinGeometry"),
NodeItem("GeometryNodeSeparateComponents"),
]),
GeometryNodeCategory("GEO_INPUT", "Input", items=[
NodeItem("GeometryNodeObjectInfo"),

View File

@ -156,7 +156,7 @@ struct ListBase *BKE_curve_editNurbs_get(struct Curve *cu);
void BKE_curve_bevelList_free(struct ListBase *bev);
void BKE_curve_bevelList_make(struct Object *ob, struct ListBase *nurbs, bool for_render);
ListBase BKE_curve_bevel_make(const struct Curve *ob);
ListBase BKE_curve_bevel_make(const struct Curve *curve);
void BKE_curve_forward_diff_bezier(
float q0, float q1, float q2, float q3, float *p, int it, int stride);

View File

@ -294,6 +294,22 @@ bool BKE_gpencil_has_geometry_modifiers(struct Object *ob);
bool BKE_gpencil_has_time_modifiers(struct Object *ob);
bool BKE_gpencil_has_transform_modifiers(struct Object *ob);
/* Stores the maximum calculation range in the whole modifier stack for line art so the cache can
* cover everything that will be visible. */
typedef struct GpencilLineartLimitInfo {
char min_level;
char max_level;
short edge_types;
} GpencilLineartLimitInfo;
GpencilLineartLimitInfo BKE_gpencil_get_lineart_modifier_limits(const struct Object *ob);
void BKE_gpencil_set_lineart_modifier_limits(struct GpencilModifierData *md,
const struct GpencilLineartLimitInfo *info,
const bool is_first_lineart);
bool BKE_gpencil_is_first_lineart_in_stack(const struct Object *ob,
const struct GpencilModifierData *md);
void BKE_gpencil_lattice_init(struct Object *ob);
void BKE_gpencil_lattice_clear(struct Object *ob);

View File

@ -1436,6 +1436,7 @@ int ntreeTexExecTree(struct bNodeTree *ntree,
#define GEO_NODE_CONVEX_HULL 1056
#define GEO_NODE_CURVE_TO_POINTS 1057
#define GEO_NODE_CURVE_REVERSE 1058
#define GEO_NODE_SEPARATE_COMPONENTS 1059
/** \} */

View File

@ -160,6 +160,8 @@ void BKE_shaderfx_free_ex(struct ShaderFxData *fx, const int flag);
void BKE_shaderfx_free(struct ShaderFxData *fx);
bool BKE_shaderfx_unique_name(struct ListBase *shaderfx, struct ShaderFxData *fx);
bool BKE_shaderfx_depends_ontime(struct ShaderFxData *fx);
bool BKE_shaderfx_is_nonlocal_in_liboverride(const struct Object *ob,
const struct ShaderFxData *shaderfx);
struct ShaderFxData *BKE_shaderfx_findby_type(struct Object *ob, ShaderFxType type);
struct ShaderFxData *BKE_shaderfx_findby_name(struct Object *ob, const char *name);
void BKE_shaderfx_copydata_generic(const struct ShaderFxData *fx_src, struct ShaderFxData *fx_dst);

View File

@ -337,13 +337,14 @@ class BezierSpline final : public Spline {
virtual blender::fn::GVArrayPtr interpolate_to_evaluated_points(
const blender::fn::GVArray &source_data) const override;
void evaluate_segment(const int index,
const int next_index,
blender::MutableSpan<blender::float3> positions) const;
bool segment_is_vector(const int start_index) const;
private:
void ensure_auto_handles() const;
void correct_end_tangents() const final;
bool segment_is_vector(const int start_index) const;
void evaluate_bezier_segment(const int index,
const int next_index,
blender::MutableSpan<blender::float3> positions) const;
};
/**
@ -535,6 +536,7 @@ struct CurveEval {
blender::Span<SplinePtr> splines() const;
blender::MutableSpan<SplinePtr> splines();
void resize(const int size);
void add_spline(SplinePtr spline);
void remove_splines(blender::IndexMask mask);

View File

@ -31,6 +31,7 @@
#include "BLI_linklist.h"
#include "BLI_math.h"
#include "BLI_task.h"
#include "BLI_threads.h"
#include "BLI_utildefines.h"
@ -160,6 +161,26 @@ void bvhcache_free(BVHCache *bvh_cache)
MEM_freeN(bvh_cache);
}
/* BVH tree balancing inside a mutex lock must be run in isolation. Balancing
* is multithreaded, and we do not want the current thread to start another task
* that may involve acquiring the same mutex lock that it is waiting for. */
static void bvhtree_balance_isolated(void *userdata)
{
BLI_bvhtree_balance((BVHTree *)userdata);
}
static void bvhtree_balance(BVHTree *tree, const bool isolate)
{
if (tree) {
if (isolate) {
BLI_task_isolate(bvhtree_balance_isolated, tree);
}
else {
BLI_bvhtree_balance(tree);
}
}
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Local Callbacks
@ -566,7 +587,6 @@ static BVHTree *bvhtree_from_editmesh_verts_create_tree(float epsilon,
BLI_bvhtree_insert(tree, i, eve->co, 1);
}
BLI_assert(BLI_bvhtree_get_len(tree) == verts_num_active);
BLI_bvhtree_balance(tree);
}
return tree;
@ -600,7 +620,6 @@ static BVHTree *bvhtree_from_mesh_verts_create_tree(float epsilon,
BLI_bvhtree_insert(tree, i, vert[i].co, 1);
}
BLI_assert(BLI_bvhtree_get_len(tree) == verts_num_active);
BLI_bvhtree_balance(tree);
}
}
@ -649,6 +668,7 @@ BVHTree *bvhtree_from_editmesh_verts_ex(BVHTreeFromEditMesh *data,
if (data->cached == false) {
tree = bvhtree_from_editmesh_verts_create_tree(
epsilon, tree_type, axis, em, verts_mask, verts_num_active);
bvhtree_balance(tree, true);
/* Save on cache for later use */
/* printf("BVHTree built and saved on cache\n"); */
@ -660,6 +680,7 @@ BVHTree *bvhtree_from_editmesh_verts_ex(BVHTreeFromEditMesh *data,
else {
tree = bvhtree_from_editmesh_verts_create_tree(
epsilon, tree_type, axis, em, verts_mask, verts_num_active);
bvhtree_balance(tree, false);
}
if (tree) {
@ -711,6 +732,7 @@ BVHTree *bvhtree_from_mesh_verts_ex(BVHTreeFromMesh *data,
if (in_cache == false) {
tree = bvhtree_from_mesh_verts_create_tree(
epsilon, tree_type, axis, vert, verts_num, verts_mask, verts_num_active);
bvhtree_balance(tree, bvh_cache_p != NULL);
if (bvh_cache_p) {
/* Save on cache for later use */
@ -771,7 +793,6 @@ static BVHTree *bvhtree_from_editmesh_edges_create_tree(float epsilon,
BLI_bvhtree_insert(tree, i, co[0], 2);
}
BLI_assert(BLI_bvhtree_get_len(tree) == edges_num_active);
BLI_bvhtree_balance(tree);
}
return tree;
@ -809,7 +830,6 @@ static BVHTree *bvhtree_from_mesh_edges_create_tree(const MVert *vert,
BLI_bvhtree_insert(tree, i, co[0], 2);
}
BLI_bvhtree_balance(tree);
}
}
@ -861,7 +881,7 @@ BVHTree *bvhtree_from_editmesh_edges_ex(BVHTreeFromEditMesh *data,
if (data->cached == false) {
tree = bvhtree_from_editmesh_edges_create_tree(
epsilon, tree_type, axis, em, edges_mask, edges_num_active);
bvhtree_balance(tree, true);
/* Save on cache for later use */
/* printf("BVHTree built and saved on cache\n"); */
bvhcache_insert(bvh_cache, tree, bvh_cache_type);
@ -872,6 +892,7 @@ BVHTree *bvhtree_from_editmesh_edges_ex(BVHTreeFromEditMesh *data,
else {
tree = bvhtree_from_editmesh_edges_create_tree(
epsilon, tree_type, axis, em, edges_mask, edges_num_active);
bvhtree_balance(tree, false);
}
if (tree) {
@ -928,12 +949,17 @@ BVHTree *bvhtree_from_mesh_edges_ex(BVHTreeFromMesh *data,
vert, edge, edges_num, edges_mask, edges_num_active, epsilon, tree_type, axis);
if (bvh_cache_p) {
bvhtree_balance(tree, true);
BVHCache *bvh_cache = *bvh_cache_p;
/* Save on cache for later use */
/* printf("BVHTree built and saved on cache\n"); */
bvhcache_insert(bvh_cache, tree, bvh_cache_type);
in_cache = true;
}
else {
bvhtree_balance(tree, false);
}
}
if (bvh_cache_p) {
@ -994,7 +1020,6 @@ static BVHTree *bvhtree_from_mesh_faces_create_tree(float epsilon,
}
}
BLI_assert(BLI_bvhtree_get_len(tree) == faces_num_active);
BLI_bvhtree_balance(tree);
}
}
@ -1057,6 +1082,7 @@ BVHTree *bvhtree_from_mesh_faces_ex(BVHTreeFromMesh *data,
if (in_cache == false) {
tree = bvhtree_from_mesh_faces_create_tree(
epsilon, tree_type, axis, vert, face, numFaces, faces_mask, faces_num_active);
bvhtree_balance(tree, bvh_cache_p != NULL);
if (bvh_cache_p) {
/* Save on cache for later use */
@ -1127,7 +1153,6 @@ static BVHTree *bvhtree_from_editmesh_looptri_create_tree(float epsilon,
}
}
BLI_assert(BLI_bvhtree_get_len(tree) == looptri_num_active);
BLI_bvhtree_balance(tree);
}
}
@ -1173,7 +1198,6 @@ static BVHTree *bvhtree_from_mesh_looptri_create_tree(float epsilon,
}
}
BLI_assert(BLI_bvhtree_get_len(tree) == looptri_num_active);
BLI_bvhtree_balance(tree);
}
}
@ -1229,6 +1253,7 @@ BVHTree *bvhtree_from_editmesh_looptri_ex(BVHTreeFromEditMesh *data,
bool in_cache = bvhcache_find(
bvh_cache_p, bvh_cache_type, &tree, &lock_started, mesh_eval_mutex);
BVHCache *bvh_cache = *bvh_cache_p;
bvhtree_balance(tree, true);
if (in_cache == false) {
tree = bvhtree_from_editmesh_looptri_create_tree(
@ -1243,6 +1268,7 @@ BVHTree *bvhtree_from_editmesh_looptri_ex(BVHTreeFromEditMesh *data,
else {
tree = bvhtree_from_editmesh_looptri_create_tree(
epsilon, tree_type, axis, em, looptri_mask, looptri_num_active);
bvhtree_balance(tree, false);
}
if (tree) {
@ -1303,6 +1329,8 @@ BVHTree *bvhtree_from_mesh_looptri_ex(BVHTreeFromMesh *data,
looptri_mask,
looptri_num_active);
bvhtree_balance(tree, bvh_cache_p != NULL);
if (bvh_cache_p) {
BVHCache *bvh_cache = *bvh_cache_p;
bvhcache_insert(bvh_cache, tree, bvh_cache_type);
@ -1742,7 +1770,7 @@ BVHTree *BKE_bvhtree_from_pointcloud_get(BVHTreeFromPointCloud *data,
BLI_bvhtree_insert(tree, i, pointcloud->co[i], 1);
}
BLI_assert(BLI_bvhtree_get_len(tree) == pointcloud->totpoint);
BLI_bvhtree_balance(tree);
bvhtree_balance(tree, false);
data->coords = pointcloud->co;
data->tree = tree;

View File

@ -42,6 +42,12 @@ blender::MutableSpan<SplinePtr> CurveEval::splines()
return splines_;
}
void CurveEval::resize(const int size)
{
splines_.resize(size);
attributes.reallocate(size);
}
/**
* \warning Call #reallocate on the spline's attributes after adding all splines.
*/

View File

@ -362,7 +362,7 @@ void BKE_editmesh_loop_tangent_calc(BMEditMesh *em,
/* Calculation */
if (em->tottri != 0) {
TaskPool *task_pool;
task_pool = BLI_task_pool_create(NULL, TASK_PRIORITY_LOW, TASK_ISOLATION_ON);
task_pool = BLI_task_pool_create(NULL, TASK_PRIORITY_LOW);
tangent_mask_curr = 0;
/* Calculate tangent layers */

View File

@ -650,9 +650,16 @@ static void join_instance_groups_pointcloud(Span<GeometryInstanceGroup> set_grou
static void join_instance_groups_volume(Span<GeometryInstanceGroup> set_groups,
GeometrySet &result)
{
/* Not yet supported. Joining volume grids with the same name requires resampling of at least
* one of the grids. The cell size of the resulting volume has to be determined somehow. */
UNUSED_VARS(set_groups, result);
/* Not yet supported; for now only return the first volume. Joining volume grids with the same
* name requires resampling of at least one of the grids. The cell size of the resulting volume
* has to be determined somehow. */
for (const GeometryInstanceGroup &set_group : set_groups) {
const GeometrySet &set = set_group.geometry_set;
if (set.has<VolumeComponent>()) {
result.add(*set.get_component_for_read<VolumeComponent>());
return;
}
}
}
static void join_instance_groups_curve(Span<GeometryInstanceGroup> set_groups, GeometrySet &result)

View File

@ -55,6 +55,7 @@
#include "DEG_depsgraph.h"
#include "DEG_depsgraph_query.h"
#include "MOD_gpencil_lineart.h"
#include "MOD_gpencil_modifiertypes.h"
#include "BLO_read_write.h"
@ -202,6 +203,62 @@ bool BKE_gpencil_has_transform_modifiers(Object *ob)
return false;
}
GpencilLineartLimitInfo BKE_gpencil_get_lineart_modifier_limits(const Object *ob)
{
GpencilLineartLimitInfo info = {0};
bool is_first = true;
LISTBASE_FOREACH (GpencilModifierData *, md, &ob->greasepencil_modifiers) {
if (md->type == eGpencilModifierType_Lineart) {
LineartGpencilModifierData *lmd = (LineartGpencilModifierData *)md;
if (is_first || (lmd->flags & LRT_GPENCIL_USE_CACHE)) {
info.min_level = MIN2(info.min_level, lmd->level_start);
info.max_level = MAX2(info.max_level,
(lmd->use_multiple_levels ? lmd->level_end : lmd->level_start));
info.edge_types |= lmd->edge_types;
}
}
}
return info;
}
void BKE_gpencil_set_lineart_modifier_limits(GpencilModifierData *md,
const GpencilLineartLimitInfo *info,
const bool is_first_lineart)
{
BLI_assert(md->type == eGpencilModifierType_Lineart);
LineartGpencilModifierData *lmd = (LineartGpencilModifierData *)md;
if (is_first_lineart || lmd->flags & LRT_GPENCIL_USE_CACHE) {
lmd->level_start_override = info->min_level;
lmd->level_end_override = info->max_level;
lmd->edge_types_override = info->edge_types;
}
else {
lmd->level_start_override = lmd->level_start;
lmd->level_end_override = lmd->level_end;
lmd->edge_types_override = lmd->edge_types;
}
}
bool BKE_gpencil_is_first_lineart_in_stack(const Object *ob, const GpencilModifierData *md)
{
if (md->type != eGpencilModifierType_Lineart) {
return false;
}
LISTBASE_FOREACH (GpencilModifierData *, gmd, &ob->greasepencil_modifiers) {
if (gmd->type == eGpencilModifierType_Lineart) {
if (gmd == md) {
return true;
}
else {
return false;
}
}
}
/* If we reach here it means md is not in ob's modifier stack. */
BLI_assert(false);
return false;
}
/* apply time modifiers */
static int gpencil_time_modifier(
Depsgraph *depsgraph, Scene *scene, Object *ob, bGPDlayer *gpl, int cfra, bool is_render)
@ -771,6 +828,8 @@ void BKE_gpencil_modifiers_calc(Depsgraph *depsgraph, Scene *scene, Object *ob)
BKE_gpencil_lattice_init(ob);
const bool time_remap = BKE_gpencil_has_time_modifiers(ob);
bool is_first_lineart = true;
GpencilLineartLimitInfo info = BKE_gpencil_get_lineart_modifier_limits(ob);
LISTBASE_FOREACH (GpencilModifierData *, md, &ob->greasepencil_modifiers) {
@ -781,6 +840,11 @@ void BKE_gpencil_modifiers_calc(Depsgraph *depsgraph, Scene *scene, Object *ob)
continue;
}
if (md->type == eGpencilModifierType_Lineart) {
BKE_gpencil_set_lineart_modifier_limits(md, &info, is_first_lineart);
is_first_lineart = false;
}
/* Apply geometry modifiers (add new geometry). */
if (mti && mti->generateStrokes) {
mti->generateStrokes(md, depsgraph, ob);
@ -806,6 +870,8 @@ void BKE_gpencil_modifiers_calc(Depsgraph *depsgraph, Scene *scene, Object *ob)
/* Clear any lattice data. */
BKE_gpencil_lattice_clear(ob);
MOD_lineart_clear_cache(&gpd->runtime.lineart_cache);
}
void BKE_gpencil_modifier_blend_write(BlendWriter *writer, ListBase *modbase)

View File

@ -68,6 +68,7 @@
#include "BLI_math_vector.h"
#include "BLI_mempool.h"
#include "BLI_system.h"
#include "BLI_task.h"
#include "BLI_threads.h"
#include "BLI_timecode.h" /* For stamp time-code format. */
#include "BLI_utildefines.h"
@ -882,6 +883,39 @@ Image *BKE_image_load_exists(Main *bmain, const char *filepath)
return BKE_image_load_exists_ex(bmain, filepath, NULL);
}
typedef struct ImageFillData {
short gen_type;
uint width;
uint height;
unsigned char *rect;
float *rect_float;
float fill_color[4];
} ImageFillData;
static void image_buf_fill_isolated(void *usersata_v)
{
ImageFillData *usersata = usersata_v;
const short gen_type = usersata->gen_type;
const uint width = usersata->width;
const uint height = usersata->height;
unsigned char *rect = usersata->rect;
float *rect_float = usersata->rect_float;
switch (gen_type) {
case IMA_GENTYPE_GRID:
BKE_image_buf_fill_checker(rect, rect_float, width, height);
break;
case IMA_GENTYPE_GRID_COLOR:
BKE_image_buf_fill_checker_color(rect, rect_float, width, height);
break;
default:
BKE_image_buf_fill_color(rect, rect_float, width, height, usersata->fill_color);
break;
}
}
static ImBuf *add_ibuf_size(unsigned int width,
unsigned int height,
const char *name,
@ -944,17 +978,16 @@ static ImBuf *add_ibuf_size(unsigned int width,
STRNCPY(ibuf->name, name);
switch (gen_type) {
case IMA_GENTYPE_GRID:
BKE_image_buf_fill_checker(rect, rect_float, width, height);
break;
case IMA_GENTYPE_GRID_COLOR:
BKE_image_buf_fill_checker_color(rect, rect_float, width, height);
break;
default:
BKE_image_buf_fill_color(rect, rect_float, width, height, fill_color);
break;
}
ImageFillData data;
data.gen_type = gen_type;
data.width = width;
data.height = height;
data.rect = rect;
data.rect_float = rect_float;
copy_v4_v4(data.fill_color, fill_color);
BLI_task_isolate(image_buf_fill_isolated, &data);
return ibuf;
}

View File

@ -2335,8 +2335,7 @@ bool BKE_lib_override_library_main_operations_create(Main *bmain, const bool for
}
struct LibOverrideOpCreateData create_pool_data = {.bmain = bmain, .changed = false};
TaskPool *task_pool = BLI_task_pool_create(
&create_pool_data, TASK_PRIORITY_HIGH, TASK_ISOLATION_ON);
TaskPool *task_pool = BLI_task_pool_create(&create_pool_data, TASK_PRIORITY_HIGH);
FOREACH_MAIN_ID_BEGIN (bmain, id) {
if (!ID_IS_LINKED(id) && ID_IS_OVERRIDE_LIBRARY_REAL(id) &&

View File

@ -1715,8 +1715,7 @@ void BKE_mesh_normals_loop_split(const MVert *mverts,
loop_split_generator(NULL, &common_data);
}
else {
TaskPool *task_pool = BLI_task_pool_create(
&common_data, TASK_PRIORITY_HIGH, TASK_ISOLATION_ON);
TaskPool *task_pool = BLI_task_pool_create(&common_data, TASK_PRIORITY_HIGH);
loop_split_generator(task_pool, &common_data);

View File

@ -656,7 +656,7 @@ void BKE_mesh_calc_loop_tangent_ex(const MVert *mvert,
/* Calculation */
if (looptri_len != 0) {
TaskPool *task_pool = BLI_task_pool_create(NULL, TASK_PRIORITY_LOW, TASK_ISOLATION_ON);
TaskPool *task_pool = BLI_task_pool_create(NULL, TASK_PRIORITY_LOW);
tangent_mask_curr = 0;
/* Calculate tangent layers */

View File

@ -87,7 +87,7 @@ static void reserve_hash_maps(const Mesh *mesh,
MutableSpan<EdgeMap> edge_maps)
{
const int totedge_guess = std::max(keep_existing_edges ? mesh->totedge : 0, mesh->totpoly * 2);
parallel_for_each(
threading::parallel_for_each(
edge_maps, [&](EdgeMap &edge_map) { edge_map.reserve(totedge_guess / edge_maps.size()); });
}
@ -96,7 +96,7 @@ static void add_existing_edges_to_hash_maps(Mesh *mesh,
uint32_t parallel_mask)
{
/* Assume existing edges are valid. */
parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
threading::parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
const int task_index = &edge_map - &edge_maps[0];
for (const MEdge &edge : Span(mesh->medge, mesh->totedge)) {
OrderedEdge ordered_edge{edge.v1, edge.v2};
@ -113,7 +113,7 @@ static void add_polygon_edges_to_hash_maps(Mesh *mesh,
uint32_t parallel_mask)
{
const Span<MLoop> loops{mesh->mloop, mesh->totloop};
parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
threading::parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
const int task_index = &edge_map - &edge_maps[0];
for (const MPoly &poly : Span(mesh->mpoly, mesh->totpoly)) {
Span<MLoop> poly_loops = loops.slice(poly.loopstart, poly.totloop);
@ -146,7 +146,7 @@ static void serialize_and_initialize_deduplicated_edges(MutableSpan<EdgeMap> edg
edge_index_offsets[i + 1] = edge_index_offsets[i] + edge_maps[i].size();
}
parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
threading::parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
const int task_index = &edge_map - &edge_maps[0];
int new_edge_index = edge_index_offsets[task_index];
@ -174,7 +174,7 @@ static void update_edge_indices_in_poly_loops(Mesh *mesh,
uint32_t parallel_mask)
{
const MutableSpan<MLoop> loops{mesh->mloop, mesh->totloop};
parallel_for(IndexRange(mesh->totpoly), 100, [&](IndexRange range) {
threading::parallel_for(IndexRange(mesh->totpoly), 100, [&](IndexRange range) {
for (const int poly_index : range) {
MPoly &poly = mesh->mpoly[poly_index];
MutableSpan<MLoop> poly_loops = loops.slice(poly.loopstart, poly.totloop);
@ -215,7 +215,7 @@ static int get_parallel_maps_count(const Mesh *mesh)
static void clear_hash_tables(MutableSpan<EdgeMap> edge_maps)
{
parallel_for_each(edge_maps, [](EdgeMap &edge_map) { edge_map.clear(); });
threading::parallel_for_each(edge_maps, [](EdgeMap &edge_map) { edge_map.clear(); });
}
} // namespace blender::bke::calc_edges

View File

@ -5084,6 +5084,7 @@ static void registerGeometryNodes()
register_node_type_geo_points_to_volume();
register_node_type_geo_sample_texture();
register_node_type_geo_select_by_material();
register_node_type_geo_separate_components();
register_node_type_geo_subdivide();
register_node_type_geo_subdivision_surface();
register_node_type_geo_switch();

View File

@ -663,7 +663,7 @@ void BKE_ocean_simulate(struct Ocean *o, float t, float scale, float chop_amount
osd.scale = scale;
osd.chop_amount = chop_amount;
pool = BLI_task_pool_create(&osd, TASK_PRIORITY_HIGH, TASK_ISOLATION_ON);
pool = BLI_task_pool_create(&osd, TASK_PRIORITY_HIGH);
BLI_rw_mutex_lock(&o->oceanmutex, THREAD_LOCK_WRITE);

View File

@ -3179,7 +3179,7 @@ void psys_cache_child_paths(ParticleSimulationData *sim,
return;
}
task_pool = BLI_task_pool_create(&ctx, TASK_PRIORITY_LOW, TASK_ISOLATION_ON);
task_pool = BLI_task_pool_create(&ctx, TASK_PRIORITY_LOW);
totchild = ctx.totchild;
totparent = ctx.totparent;

View File

@ -1330,7 +1330,7 @@ static void distribute_particles_on_dm(ParticleSimulationData *sim, int from)
return;
}
task_pool = BLI_task_pool_create(&ctx, TASK_PRIORITY_LOW, TASK_ISOLATION_ON);
task_pool = BLI_task_pool_create(&ctx, TASK_PRIORITY_LOW);
totpart = (from == PART_FROM_CHILD ? sim->psys->totchild : sim->psys->totpart);
psys_tasks_create(&ctx, 0, totpart, &tasks, &numtasks);

View File

@ -1145,6 +1145,7 @@ static void scene_blend_read_data(BlendDataReader *reader, ID *id)
BLO_read_data_address(reader, &ed->act_seq);
ed->cache = NULL;
ed->prefetch_job = NULL;
ed->runtime.sequence_lookup = NULL;
/* recursive link sequences, lb will be correctly initialized */
link_recurs_seq(reader, &ed->seqbase);

View File

@ -467,7 +467,7 @@ static void panel_list_copy(ListBase *newlb, const ListBase *lb)
Panel *panel = lb->first;
for (; new_panel; new_panel = new_panel->next, panel = panel->next) {
new_panel->activedata = NULL;
new_panel->runtime.custom_data_ptr = NULL;
memset(&new_panel->runtime, 0x0, sizeof(new_panel->runtime));
panel_list_copy(&new_panel->children, &panel->children);
}
}
@ -476,6 +476,8 @@ ARegion *BKE_area_region_copy(const SpaceType *st, const ARegion *region)
{
ARegion *newar = MEM_dupallocN(region);
memset(&newar->runtime, 0x0, sizeof(newar->runtime));
newar->prev = newar->next = NULL;
BLI_listbase_clear(&newar->handlers);
BLI_listbase_clear(&newar->uiblocks);
@ -1419,6 +1421,8 @@ static void direct_link_panel_list(BlendDataReader *reader, ListBase *lb)
static void direct_link_region(BlendDataReader *reader, ARegion *region, int spacetype)
{
memset(&region->runtime, 0x0, sizeof(region->runtime));
direct_link_panel_list(reader, &region->panels);
BLO_read_list(reader, &region->panels_category_active);
@ -1560,16 +1564,15 @@ static void direct_link_area(BlendDataReader *reader, ScrArea *area)
if (sl->spacetype == SPACE_VIEW3D) {
View3D *v3d = (View3D *)sl;
memset(&v3d->runtime, 0x0, sizeof(v3d->runtime));
if (v3d->gpd) {
BLO_read_data_address(reader, &v3d->gpd);
BKE_gpencil_blend_read_data(reader, v3d->gpd);
}
BLO_read_data_address(reader, &v3d->localvd);
/* Runtime data */
v3d->runtime.properties_storage = NULL;
v3d->runtime.flag = 0;
/* render can be quite heavy, set to solid on load */
if (v3d->shading.type == OB_RENDER) {
v3d->shading.type = OB_SOLID;
@ -1584,7 +1587,7 @@ static void direct_link_area(BlendDataReader *reader, ScrArea *area)
SpaceGraph *sipo = (SpaceGraph *)sl;
BLO_read_data_address(reader, &sipo->ads);
BLI_listbase_clear(&sipo->runtime.ghost_curves);
memset(&sipo->runtime, 0x0, sizeof(sipo->runtime));
}
else if (sl->spacetype == SPACE_NLA) {
SpaceNla *snla = (SpaceNla *)sl;
@ -1652,7 +1655,7 @@ static void direct_link_area(BlendDataReader *reader, ScrArea *area)
}
else if (sl->spacetype == SPACE_TEXT) {
SpaceText *st = (SpaceText *)sl;
memset(&st->runtime, 0, sizeof(st->runtime));
memset(&st->runtime, 0x0, sizeof(st->runtime));
}
else if (sl->spacetype == SPACE_SEQ) {
SpaceSeq *sseq = (SpaceSeq *)sl;
@ -1724,6 +1727,11 @@ static void direct_link_area(BlendDataReader *reader, ScrArea *area)
BLO_read_data_address(reader, &sfile->params);
BLO_read_data_address(reader, &sfile->asset_params);
}
else if (sl->spacetype == SPACE_ACTION) {
SpaceAction *saction = (SpaceAction *)sl;
memset(&saction->runtime, 0x0, sizeof(saction->runtime));
}
else if (sl->spacetype == SPACE_CLIP) {
SpaceClip *sclip = (SpaceClip *)sl;

View File

@ -164,6 +164,18 @@ const ShaderFxTypeInfo *BKE_shaderfx_get_info(ShaderFxType type)
return NULL;
}
/**
* Check whether given shaderfx is not local (i.e. from linked data) when the object is a library
* override.
*
* \param shaderfx: May be NULL, in which case we consider it as a non-local shaderfx case.
*/
bool BKE_shaderfx_is_nonlocal_in_liboverride(const Object *ob, const ShaderFxData *shaderfx)
{
return (ID_IS_OVERRIDE_LIBRARY(ob) &&
((shaderfx == NULL) || (shaderfx->flag & eShaderFxFlag_OverrideLibrary_Local) == 0));
}
/**
* Get an effect's panel type, which was defined in the #panelRegister callback.
*

View File

@ -209,10 +209,20 @@ static float3 rotate_direction_around_axis(const float3 &direction,
return axis_scaled + diff * std::cos(angle) + cross * std::sin(angle);
}
static void calculate_normals_z_up(Span<float3> tangents, MutableSpan<float3> normals)
static void calculate_normals_z_up(Span<float3> tangents, MutableSpan<float3> r_normals)
{
for (const int i : normals.index_range()) {
normals[i] = float3::cross(tangents[i], float3(0.0f, 0.0f, 1.0f)).normalized();
BLI_assert(r_normals.size() == tangents.size());
/* Same as in `vec_to_quat`. */
const float epsilon = 1e-4f;
for (const int i : r_normals.index_range()) {
const float3 &tangent = tangents[i];
if (fabsf(tangent.x) + fabsf(tangent.y) < epsilon) {
r_normals[i] = {1.0f, 0.0f, 0.0f};
}
else {
r_normals[i] = float3(tangent.y, -tangent.x, 0.0f).normalized();
}
}
}
@ -368,7 +378,7 @@ void Spline::sample_based_on_index_factors(const GVArray &src,
using T = decltype(dummy);
const GVArray_Typed<T> src_typed = src.typed<T>();
MutableSpan<T> dst_typed = dst.typed<T>();
blender::parallel_for(dst_typed.index_range(), 1024, [&](IndexRange range) {
blender::threading::parallel_for(dst_typed.index_range(), 1024, [&](IndexRange range) {
for (const int i : range) {
const LookupResult interp = this->lookup_data_from_index_factor(index_factors[i]);
dst_typed[i] = blender::attribute_math::mix2(interp.factor,

View File

@ -352,9 +352,9 @@ static void bezier_forward_difference_3d(const float3 &point_0,
}
}
void BezierSpline::evaluate_bezier_segment(const int index,
const int next_index,
MutableSpan<float3> positions) const
void BezierSpline::evaluate_segment(const int index,
const int next_index,
MutableSpan<float3> positions) const
{
if (this->segment_is_vector(index)) {
BLI_assert(positions.size() == 1);
@ -417,7 +417,7 @@ static void calculate_mappings_linear_resolution(Span<int> offsets,
}
const int grain_size = std::max(2048 / resolution, 1);
parallel_for(IndexRange(1, size - 2), grain_size, [&](IndexRange range) {
blender::threading::parallel_for(IndexRange(1, size - 2), grain_size, [&](IndexRange range) {
for (const int i_control_point : range) {
const int segment_len = offsets[i_control_point + 1] - offsets[i_control_point];
const float segment_len_inv = 1.0f / segment_len;
@ -497,14 +497,13 @@ Span<float3> BezierSpline::evaluated_positions() const
Span<int> offsets = this->control_point_offsets();
const int grain_size = std::max(512 / resolution_, 1);
parallel_for(IndexRange(size - 1), grain_size, [&](IndexRange range) {
blender::threading::parallel_for(IndexRange(size - 1), grain_size, [&](IndexRange range) {
for (const int i : range) {
this->evaluate_bezier_segment(
i, i + 1, positions.slice(offsets[i], offsets[i + 1] - offsets[i]));
this->evaluate_segment(i, i + 1, positions.slice(offsets[i], offsets[i + 1] - offsets[i]));
}
});
if (is_cyclic_) {
this->evaluate_bezier_segment(
this->evaluate_segment(
size - 1, 0, positions.slice(offsets[size - 1], offsets[size] - offsets[size - 1]));
}
else {

View File

@ -36,6 +36,7 @@
#include "BLI_math.h"
#include "BLI_path_util.h"
#include "BLI_string.h"
#include "BLI_task.hh"
#include "BLI_utildefines.h"
#include "BKE_anim_data.h"
@ -324,15 +325,19 @@ struct VolumeGrid {
openvdb::io::File file(filepath);
try {
file.setCopyMaxBytes(0);
file.open();
openvdb::GridBase::Ptr vdb_grid = file.readGrid(name());
entry->grid->setTree(vdb_grid->baseTreePtr());
}
catch (const openvdb::IoError &e) {
entry->error_msg = e.what();
}
/* Isolate file loading since that's potentially multithreaded and we are
* holding a mutex lock. */
blender::threading::isolate_task([&] {
try {
file.setCopyMaxBytes(0);
file.open();
openvdb::GridBase::Ptr vdb_grid = file.readGrid(name());
entry->grid->setTree(vdb_grid->baseTreePtr());
}
catch (const openvdb::IoError &e) {
entry->error_msg = e.what();
}
});
std::atomic_thread_fence(std::memory_order_release);
entry->is_loaded = true;

View File

@ -26,7 +26,7 @@
#include "BLI_map.hh"
#include "BLI_utility_mixins.hh"
namespace blender {
namespace blender::threading {
namespace enumerable_thread_specific_utils {
inline std::atomic<int> next_id = 0;
@ -70,4 +70,4 @@ template<typename T> class EnumerableThreadSpecific : NonCopyable, NonMovable {
#endif /* WITH_TBB */
};
} // namespace blender
} // namespace blender::threading

View File

@ -67,55 +67,17 @@ typedef enum TaskPriority {
TASK_PRIORITY_HIGH,
} TaskPriority;
/**
* Task isolation helps avoid unexpected task scheduling decisions that can lead to bugs if wrong
* assumptions were made. Typically that happens when doing "nested threading", i.e. one thread
* schedules a bunch of main-tasks and those spawn new sub-tasks.
*
* What can happen is that when a main-task waits for its sub-tasks to complete on other threads,
* another main-task is scheduled within the already running main-task. Generally, this is good,
* because it leads to better performance. However, sometimes code (often unintentionally) makes
* the assumption that at most one main-task runs on a thread at a time.
*
* The bugs often show themselves in two ways:
* - Deadlock, when a main-task holds a mutex while waiting for its sub-tasks to complete.
* - Data corruption, when a main-task makes wrong assumptions about a thread-local variable.
*
* Task isolation can avoid these bugs by making sure that a main-task does not start executing
* another main-task while waiting for its sub-tasks. More precisely, a function that runs in an
* isolated region is only allowed to run sub-tasks that were spawned in the same isolated region.
*
* Unfortunately, incorrect use of task isolation can lead to deadlocks itself. This can happen
* when threading primitives are used that separate spawning tasks from executing them. The problem
* occurs when a task is spawned in one isolated region while the tasks are waited for in another
* isolated region. In this setup, the thread that is waiting for the spawned tasks to complete
* cannot run the tasks itself. On a single thread, that causes a deadlock already. When there are
* multiple threads, another thread will typically run the task and avoid the deadlock. However, if
* this situation happens on all threads at the same time, all threads will deadlock. This happened
* in T88598.
*/
typedef enum TaskIsolation {
/* Do not use task isolation. Always use this when tasks are pushed recursively. */
TASK_ISOLATION_OFF,
/* Run each task in its own isolated region. */
TASK_ISOLATION_ON,
} TaskIsolation;
typedef struct TaskPool TaskPool;
typedef void (*TaskRunFunction)(TaskPool *__restrict pool, void *taskdata);
typedef void (*TaskFreeFunction)(TaskPool *__restrict pool, void *taskdata);
/* Regular task pool that immediately starts executing tasks as soon as they
* are pushed, either on the current or another thread. */
TaskPool *BLI_task_pool_create(void *userdata,
TaskPriority priority,
TaskIsolation task_isolation);
TaskPool *BLI_task_pool_create(void *userdata, TaskPriority priority);
/* Background: always run tasks in a background thread, never immediately
* execute them. For running background jobs. */
TaskPool *BLI_task_pool_create_background(void *userdata,
TaskPriority priority,
TaskIsolation task_isolation);
TaskPool *BLI_task_pool_create_background(void *userdata, TaskPriority priority);
/* Background Serial: run tasks one after the other in the background,
* without parallelization between the tasks. */
@ -125,9 +87,7 @@ TaskPool *BLI_task_pool_create_background_serial(void *userdata, TaskPriority pr
* as threads can't immediately start working. But it can be used if the data
* structures the threads operate on are not fully initialized until all tasks
* are created. */
TaskPool *BLI_task_pool_create_suspended(void *userdata,
TaskPriority priority,
TaskIsolation task_isolation);
TaskPool *BLI_task_pool_create_suspended(void *userdata, TaskPriority priority);
/* No threads: immediately executes tasks on the same thread. For debugging. */
TaskPool *BLI_task_pool_create_no_threads(void *userdata);
@ -365,6 +325,36 @@ struct TaskNode *BLI_task_graph_node_create(struct TaskGraph *task_graph,
bool BLI_task_graph_node_push_work(struct TaskNode *task_node);
void BLI_task_graph_edge_create(struct TaskNode *from_node, struct TaskNode *to_node);
/* Task Isolation
*
* Task isolation helps avoid unexpected task scheduling decisions that can lead to bugs if wrong
* assumptions were made. Typically that happens when doing "nested threading", i.e. one thread
* schedules a bunch of main-tasks and those spawn new sub-tasks.
*
* What can happen is that when a main-task waits for its sub-tasks to complete on other threads,
* another main-task is scheduled within the already running main-task. Generally, this is good,
* because it leads to better performance. However, sometimes code (often unintentionally) makes
* the assumption that at most one main-task runs on a thread at a time.
*
* The bugs often show themselves in two ways:
* - Deadlock, when a main-task holds a mutex while waiting for its sub-tasks to complete.
* - Data corruption, when a main-task makes wrong assumptions about a thread-local variable.
*
* Task isolation can avoid these bugs by making sure that a main-task does not start executing
* another main-task while waiting for its sub-tasks. More precisely, a function that runs in an
* isolated region is only allowed to run sub-tasks that were spawned in the same isolated region.
*
* Unfortunately, incorrect use of task isolation can lead to deadlocks itself. This can happen
* when threading primitives are used that separate spawning tasks from executing them. The problem
* occurs when a task is spawned in one isolated region while the tasks are waited for in another
* isolated region. In this setup, the thread that is waiting for the spawned tasks to complete
* cannot run the tasks itself. On a single thread, that causes a deadlock already. When there are
* multiple threads, another thread will typically run the task and avoid the deadlock. However, if
* this situation happens on all threads at the same time, all threads will deadlock. This happened
* in T88598.
*/
void BLI_task_isolate(void (*func)(void *userdata), void *userdata);
#ifdef __cplusplus
}
#endif

View File

@ -31,6 +31,7 @@
# include <tbb/blocked_range.h>
# include <tbb/parallel_for.h>
# include <tbb/parallel_for_each.h>
# include <tbb/task_arena.h>
# ifdef WIN32
/* We cannot keep this defined, since other parts of the code deal with this on their own, leading
* to multiple define warnings unless we un-define this, however we can only undefine this if we
@ -44,7 +45,7 @@
#include "BLI_index_range.hh"
#include "BLI_utildefines.h"
namespace blender {
namespace blender::threading {
template<typename Range, typename Function>
void parallel_for_each(Range &range, const Function &function)
@ -75,4 +76,14 @@ void parallel_for(IndexRange range, int64_t grain_size, const Function &function
#endif
}
} // namespace blender
/** See #BLI_task_isolate for a description of what isolating a task means. */
template<typename Function> void isolate_task(const Function &function)
{
#ifdef WITH_TBB
tbb::this_task_arena::isolate(function);
#else
function();
#endif
}
} // namespace blender::threading

View File

@ -1983,7 +1983,7 @@ static void populate_comp_bbs(const Vector<Vector<int>> &components,
* absolute value of any coordinate. Do it first per component,
* then get the overall max. */
Array<double> max_abs(components.size(), 0.0);
parallel_for(components.index_range(), comp_grainsize, [&](IndexRange comp_range) {
threading::parallel_for(components.index_range(), comp_grainsize, [&](IndexRange comp_range) {
for (int c : comp_range) {
BoundingBox &bb = comp_bb[c];
double &maxa = max_abs[c];
@ -2691,7 +2691,7 @@ static IMesh raycast_tris_boolean(const IMesh &tm,
tbb::spin_mutex mtx;
# endif
const int grainsize = 256;
parallel_for(IndexRange(tm.face_size()), grainsize, [&](IndexRange range) {
threading::parallel_for(IndexRange(tm.face_size()), grainsize, [&](IndexRange range) {
Array<float> in_shape(nshapes, 0);
Array<int> winding(nshapes, 0);
for (int t : range) {
@ -3391,7 +3391,7 @@ static IMesh polymesh_from_trimesh_with_dissolve(const IMesh &tm_out,
}
/* For now: need plane normals for all triangles. */
const int grainsize = 1024;
parallel_for(tm_out.face_index_range(), grainsize, [&](IndexRange range) {
threading::parallel_for(tm_out.face_index_range(), grainsize, [&](IndexRange range) {
for (int i : range) {
Face *tri = tm_out.face(i);
tri->populate_plane(false);

View File

@ -91,7 +91,7 @@ struct TaskNode {
#ifdef WITH_TBB
tbb::flow::continue_msg run(const tbb::flow::continue_msg UNUSED(input))
{
tbb::this_task_arena::isolate([this] { run_func(task_data); });
run_func(task_data);
return tbb::flow::continue_msg();
}
#endif

View File

@ -237,7 +237,7 @@ static void task_parallel_iterator_do(const TaskParallelSettings *settings,
void *userdata_chunk_array = NULL;
const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
TaskPool *task_pool = BLI_task_pool_create(state, TASK_PRIORITY_HIGH, TASK_ISOLATION_ON);
TaskPool *task_pool = BLI_task_pool_create(state, TASK_PRIORITY_HIGH);
if (use_userdata_chunk) {
userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks);
@ -442,7 +442,7 @@ void BLI_task_parallel_mempool(BLI_mempool *mempool,
return;
}
task_pool = BLI_task_pool_create(&state, TASK_PRIORITY_HIGH, TASK_ISOLATION_ON);
task_pool = BLI_task_pool_create(&state, TASK_PRIORITY_HIGH);
num_threads = BLI_task_scheduler_num_threads();
/* The idea here is to prevent creating task for each of the loop iterations

View File

@ -22,7 +22,6 @@
#include <cstdlib>
#include <memory>
#include <thread>
#include <utility>
#include "MEM_guardedalloc.h"
@ -156,7 +155,6 @@ enum TaskPoolType {
struct TaskPool {
TaskPoolType type;
bool use_threads;
TaskIsolation task_isolation;
ThreadMutex user_mutex;
void *userdata;
@ -164,8 +162,6 @@ struct TaskPool {
#ifdef WITH_TBB
/* TBB task pool. */
TBBTaskGroup tbb_group;
/* This is used to detect a common way to accidentally create a deadlock with task isolation. */
std::thread::id task_pool_create_thread_id;
#endif
volatile bool is_suspended;
BLI_mempool *suspended_mempool;
@ -179,33 +175,9 @@ struct TaskPool {
/* Execute task. */
void Task::operator()() const
{
#ifdef WITH_TBB
if (pool->task_isolation == TASK_ISOLATION_ON) {
tbb::this_task_arena::isolate([this] { run(pool, taskdata); });
return;
}
#endif
run(pool, taskdata);
}
static void assert_on_valid_thread(TaskPool *pool)
{
/* TODO: Remove this `return` to enable the check. */
return;
#ifdef DEBUG
# ifdef WITH_TBB
if (pool->task_isolation == TASK_ISOLATION_ON) {
const std::thread::id current_id = std::this_thread::get_id();
/* This task pool is modified from different threads. To avoid deadlocks, `TASK_ISOLATION_OFF`
* has to be used. Task isolation can still be used in a more fine-grained way within the
* tasks, but should not be enabled for the entire task pool. */
BLI_assert(pool->task_pool_create_thread_id == current_id);
}
# endif
#endif
UNUSED_VARS_NDEBUG(pool);
}
/* TBB Task Pool.
*
* Task pool using the TBB scheduler for tasks. When building without TBB
@ -391,10 +363,7 @@ static void background_task_pool_free(TaskPool *pool)
/* Task Pool */
static TaskPool *task_pool_create_ex(void *userdata,
TaskPoolType type,
TaskPriority priority,
TaskIsolation task_isolation)
static TaskPool *task_pool_create_ex(void *userdata, TaskPoolType type, TaskPriority priority)
{
const bool use_threads = BLI_task_scheduler_num_threads() > 1 && type != TASK_POOL_NO_THREADS;
@ -410,11 +379,6 @@ static TaskPool *task_pool_create_ex(void *userdata,
pool->type = type;
pool->use_threads = use_threads;
pool->task_isolation = task_isolation;
#ifdef WITH_TBB
pool->task_pool_create_thread_id = std::this_thread::get_id();
#endif
pool->userdata = userdata;
BLI_mutex_init(&pool->user_mutex);
@ -437,9 +401,9 @@ static TaskPool *task_pool_create_ex(void *userdata,
/**
* Create a normal task pool. Tasks will be executed as soon as they are added.
*/
TaskPool *BLI_task_pool_create(void *userdata, TaskPriority priority, TaskIsolation task_isolation)
TaskPool *BLI_task_pool_create(void *userdata, TaskPriority priority)
{
return task_pool_create_ex(userdata, TASK_POOL_TBB, priority, task_isolation);
return task_pool_create_ex(userdata, TASK_POOL_TBB, priority);
}
/**
@ -454,11 +418,9 @@ TaskPool *BLI_task_pool_create(void *userdata, TaskPriority priority, TaskIsolat
* they could end never being executed, since the 'fallback' background thread is already
* busy with parent task in single-threaded context).
*/
TaskPool *BLI_task_pool_create_background(void *userdata,
TaskPriority priority,
TaskIsolation task_isolation)
TaskPool *BLI_task_pool_create_background(void *userdata, TaskPriority priority)
{
return task_pool_create_ex(userdata, TASK_POOL_BACKGROUND, priority, task_isolation);
return task_pool_create_ex(userdata, TASK_POOL_BACKGROUND, priority);
}
/**
@ -466,11 +428,9 @@ TaskPool *BLI_task_pool_create_background(void *userdata,
* for until BLI_task_pool_work_and_wait() is called. This helps reducing threading
* overhead when pushing huge amount of small initial tasks from the main thread.
*/
TaskPool *BLI_task_pool_create_suspended(void *userdata,
TaskPriority priority,
TaskIsolation task_isolation)
TaskPool *BLI_task_pool_create_suspended(void *userdata, TaskPriority priority)
{
return task_pool_create_ex(userdata, TASK_POOL_TBB_SUSPENDED, priority, task_isolation);
return task_pool_create_ex(userdata, TASK_POOL_TBB_SUSPENDED, priority);
}
/**
@ -479,8 +439,7 @@ TaskPool *BLI_task_pool_create_suspended(void *userdata,
*/
TaskPool *BLI_task_pool_create_no_threads(void *userdata)
{
return task_pool_create_ex(
userdata, TASK_POOL_NO_THREADS, TASK_PRIORITY_HIGH, TASK_ISOLATION_ON);
return task_pool_create_ex(userdata, TASK_POOL_NO_THREADS, TASK_PRIORITY_HIGH);
}
/**
@ -489,7 +448,7 @@ TaskPool *BLI_task_pool_create_no_threads(void *userdata)
*/
TaskPool *BLI_task_pool_create_background_serial(void *userdata, TaskPriority priority)
{
return task_pool_create_ex(userdata, TASK_POOL_BACKGROUND_SERIAL, priority, TASK_ISOLATION_ON);
return task_pool_create_ex(userdata, TASK_POOL_BACKGROUND_SERIAL, priority);
}
void BLI_task_pool_free(TaskPool *pool)
@ -517,8 +476,6 @@ void BLI_task_pool_push(TaskPool *pool,
bool free_taskdata,
TaskFreeFunction freedata)
{
assert_on_valid_thread(pool);
Task task(pool, run, taskdata, free_taskdata, freedata);
switch (pool->type) {
@ -536,8 +493,6 @@ void BLI_task_pool_push(TaskPool *pool,
void BLI_task_pool_work_and_wait(TaskPool *pool)
{
assert_on_valid_thread(pool);
switch (pool->type) {
case TASK_POOL_TBB:
case TASK_POOL_TBB_SUSPENDED:

View File

@ -90,13 +90,11 @@ struct RangeTask {
void operator()(const tbb::blocked_range<int> &r) const
{
tbb::this_task_arena::isolate([this, r] {
TaskParallelTLS tls;
tls.userdata_chunk = userdata_chunk;
for (int i = r.begin(); i != r.end(); ++i) {
func(userdata, i, &tls);
}
});
TaskParallelTLS tls;
tls.userdata_chunk = userdata_chunk;
for (int i = r.begin(); i != r.end(); ++i) {
func(userdata, i, &tls);
}
}
void join(const RangeTask &other)

View File

@ -28,6 +28,7 @@
#ifdef WITH_TBB
/* Need to include at least one header to get the version define. */
# include <tbb/blocked_range.h>
# include <tbb/task_arena.h>
# if TBB_INTERFACE_VERSION_MAJOR >= 10
# include <tbb/global_control.h>
# define WITH_TBB_GLOBAL_CONTROL
@ -76,3 +77,12 @@ int BLI_task_scheduler_num_threads()
{
return task_scheduler_num_threads;
}
void BLI_task_isolate(void (*func)(void *userdata), void *userdata)
{
#ifdef WITH_TBB
tbb::this_task_arena::isolate([&] { func(userdata); });
#else
func(userdata);
#endif
}

View File

@ -81,7 +81,7 @@ TEST(LockfreeLinkList, InsertMultipleConcurrent)
LockfreeLinkList list;
BLI_linklist_lockfree_init(&list);
/* Initialize task scheduler and pool. */
TaskPool *pool = BLI_task_pool_create_suspended(&list, TASK_PRIORITY_HIGH, TASK_ISOLATION_ON);
TaskPool *pool = BLI_task_pool_create_suspended(&list, TASK_PRIORITY_HIGH);
/* Push tasks to the pool. */
for (int i = 0; i < num_nodes; ++i) {
BLI_task_pool_push(pool, concurrent_insert, POINTER_FROM_INT(i), false, nullptr);

View File

@ -59,6 +59,7 @@ set(SRC
intern/versioning_290.c
intern/versioning_300.c
intern/versioning_cycles.c
intern/versioning_common.cc
intern/versioning_defaults.c
intern/versioning_dna.c
intern/versioning_legacy.c
@ -72,6 +73,7 @@ set(SRC
BLO_undofile.h
BLO_writefile.h
intern/readfile.h
intern/versioning_common.h
)
set(LIB

View File

@ -1785,7 +1785,7 @@ void do_versions_after_linking_280(Main *bmain, ReportList *UNUSED(reports))
static void do_versions_seq_unique_name_all_strips(Scene *sce, ListBase *seqbasep)
{
for (Sequence *seq = seqbasep->first; seq != NULL; seq = seq->next) {
SEQ_sequence_base_unique_name_recursive(&sce->ed->seqbase, seq);
SEQ_sequence_base_unique_name_recursive(sce, &sce->ed->seqbase, seq);
if (seq->seqbase.first != NULL) {
do_versions_seq_unique_name_all_strips(sce, &seq->seqbase);
}

View File

@ -81,6 +81,7 @@
#include "BLO_readfile.h"
#include "readfile.h"
#include "versioning_common.h"
/* Make preferences read-only, use versioning_userdef.c. */
#define U (*((const UserDef *)&U))
@ -118,24 +119,38 @@ static bool can_use_proxy(const Sequence *seq, int psize)
}
/* image_size is width or height depending what RNA property is converted - X or Y. */
static void seq_convert_transform_animation(const Scene *scene,
static void seq_convert_transform_animation(const Sequence *seq,
const Scene *scene,
const char *path,
const int image_size)
const int image_size,
const int scene_size)
{
if (scene->adt == NULL || scene->adt->action == NULL) {
return;
}
FCurve *fcu = BKE_fcurve_find(&scene->adt->action->curves, path, 0);
if (fcu != NULL && !BKE_fcurve_is_empty(fcu)) {
BezTriple *bezt = fcu->bezt;
for (int i = 0; i < fcu->totvert; i++, bezt++) {
/* Same math as with old_image_center_*, but simplified. */
bezt->vec[0][1] = image_size / 2 + bezt->vec[0][1] - scene->r.xsch / 2;
bezt->vec[1][1] = image_size / 2 + bezt->vec[1][1] - scene->r.xsch / 2;
bezt->vec[2][1] = image_size / 2 + bezt->vec[2][1] - scene->r.xsch / 2;
/* Hardcoded legacy bit-flags which has been removed. */
const uint32_t use_transform_flag = (1 << 16);
const uint32_t use_crop_flag = (1 << 17);
/* Convert offset animation, but only if crop is not used. */
if ((seq->flag & use_transform_flag) != 0 && (seq->flag & use_crop_flag) == 0) {
FCurve *fcu = BKE_fcurve_find(&scene->adt->action->curves, path, 0);
if (fcu != NULL && !BKE_fcurve_is_empty(fcu)) {
BezTriple *bezt = fcu->bezt;
for (int i = 0; i < fcu->totvert; i++, bezt++) {
/* Same math as with old_image_center_*, but simplified. */
bezt->vec[0][1] = (image_size - scene_size) / 2 + bezt->vec[0][1];
bezt->vec[1][1] = (image_size - scene_size) / 2 + bezt->vec[1][1];
bezt->vec[2][1] = (image_size - scene_size) / 2 + bezt->vec[2][1];
}
}
}
else { /* Else, remove offset animation. */
FCurve *fcu = BKE_fcurve_find(&scene->adt->action->curves, path, 0);
BLI_remlink(&scene->adt->action->curves, fcu);
BKE_fcurve_free(fcu);
}
}
static void seq_convert_transform_crop(const Scene *scene,
@ -232,18 +247,15 @@ static void seq_convert_transform_crop(const Scene *scene,
t->xofs = old_image_center_x - scene->r.xsch / 2;
t->yofs = old_image_center_y - scene->r.ysch / 2;
/* Convert offset animation, but only if crop is not used. */
if ((seq->flag & use_transform_flag) != 0 && (seq->flag & use_crop_flag) == 0) {
char name_esc[(sizeof(seq->name) - 2) * 2], *path;
BLI_str_escape(name_esc, seq->name + 2, sizeof(name_esc));
char name_esc[(sizeof(seq->name) - 2) * 2], *path;
BLI_str_escape(name_esc, seq->name + 2, sizeof(name_esc));
path = BLI_sprintfN("sequence_editor.sequences_all[\"%s\"].transform.offset_x", name_esc);
seq_convert_transform_animation(scene, path, image_size_x);
MEM_freeN(path);
path = BLI_sprintfN("sequence_editor.sequences_all[\"%s\"].transform.offset_y", name_esc);
seq_convert_transform_animation(scene, path, image_size_y);
MEM_freeN(path);
}
path = BLI_sprintfN("sequence_editor.sequences_all[\"%s\"].transform.offset_x", name_esc);
seq_convert_transform_animation(seq, scene, path, image_size_x, scene->r.xsch);
MEM_freeN(path);
path = BLI_sprintfN("sequence_editor.sequences_all[\"%s\"].transform.offset_y", name_esc);
seq_convert_transform_animation(seq, scene, path, image_size_y, scene->r.ysch);
MEM_freeN(path);
seq->flag &= ~use_transform_flag;
seq->flag &= ~use_crop_flag;
@ -862,26 +874,6 @@ static void version_node_join_geometry_for_multi_input_socket(bNodeTree *ntree)
}
}
static ARegion *do_versions_add_region_if_not_found(ListBase *regionbase,
int region_type,
const char *name,
int link_after_region_type)
{
ARegion *link_after_region = NULL;
LISTBASE_FOREACH (ARegion *, region, regionbase) {
if (region->regiontype == region_type) {
return NULL;
}
if (region->regiontype == link_after_region_type) {
link_after_region = region;
}
}
ARegion *new_region = MEM_callocN(sizeof(ARegion), name);
new_region->regiontype = region_type;
BLI_insertlinkafter(regionbase, link_after_region, new_region);
return new_region;
}
/* NOLINTNEXTLINE: readability-function-size */
void blo_do_versions_290(FileData *fd, Library *UNUSED(lib), Main *bmain)
{

View File

@ -0,0 +1,50 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup blenloader
*/
/* allow readfile to use deprecated functionality */
#define DNA_DEPRECATED_ALLOW
#include "DNA_screen_types.h"
#include "BLI_listbase.h"
#include "MEM_guardedalloc.h"
#include "versioning_common.h"
ARegion *do_versions_add_region_if_not_found(ListBase *regionbase,
int region_type,
const char *name,
int link_after_region_type)
{
ARegion *link_after_region = NULL;
LISTBASE_FOREACH (ARegion *, region, regionbase) {
if (region->regiontype == region_type) {
return NULL;
}
if (region->regiontype == link_after_region_type) {
link_after_region = region;
}
}
ARegion *new_region = static_cast<ARegion *>(MEM_callocN(sizeof(ARegion), name));
new_region->regiontype = region_type;
BLI_insertlinkafter(regionbase, link_after_region, new_region);
return new_region;
}

View File

@ -0,0 +1,37 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup blenloader
*/
#pragma once
struct ARegion;
struct ListBase;
#ifdef __cplusplus
extern "C" {
#endif
struct ARegion *do_versions_add_region_if_not_found(struct ListBase *regionbase,
int region_type,
const char *name,
int link_after_region_type);
#ifdef __cplusplus
}
#endif

View File

@ -53,11 +53,18 @@
/**
* \param face_normal: This will be optimized out as a constant.
*/
BLI_INLINE int mesh_calc_tessellation_for_face_impl(BMLoop *(*looptris)[3],
BMFace *efa,
MemArena **pf_arena_p,
const bool face_normal)
BLI_INLINE void mesh_calc_tessellation_for_face_impl(BMLoop *(*looptris)[3],
BMFace *efa,
MemArena **pf_arena_p,
const bool face_normal)
{
#ifdef DEBUG
/* The face normal is used for projecting faces into 2D space for tessellation.
* Invalid normals may result in invalid tessellation.
* Either `face_normal` should be true or normals should be updated first. */
BLI_assert(face_normal || BM_face_is_normal_valid(efa));
#endif
switch (efa->len) {
case 3: {
/* `0 1 2` -> `0 1 2` */
@ -69,7 +76,7 @@ BLI_INLINE int mesh_calc_tessellation_for_face_impl(BMLoop *(*looptris)[3],
if (face_normal) {
normal_tri_v3(efa->no, l_ptr[0]->v->co, l_ptr[1]->v->co, l_ptr[2]->v->co);
}
return 1;
break;
}
case 4: {
/* `0 1 2 3` -> (`0 1 2`, `0 2 3`) */
@ -92,7 +99,7 @@ BLI_INLINE int mesh_calc_tessellation_for_face_impl(BMLoop *(*looptris)[3],
l_ptr_a[2] = l_ptr_b[2];
l_ptr_b[0] = l_ptr_a[1];
}
return 2;
break;
}
default: {
if (face_normal) {
@ -139,23 +146,23 @@ BLI_INLINE int mesh_calc_tessellation_for_face_impl(BMLoop *(*looptris)[3],
}
BLI_memarena_clear(pf_arena);
return tris_len;
break;
}
}
}
static int mesh_calc_tessellation_for_face(BMLoop *(*looptris)[3],
BMFace *efa,
MemArena **pf_arena_p)
static void mesh_calc_tessellation_for_face(BMLoop *(*looptris)[3],
BMFace *efa,
MemArena **pf_arena_p)
{
return mesh_calc_tessellation_for_face_impl(looptris, efa, pf_arena_p, false);
mesh_calc_tessellation_for_face_impl(looptris, efa, pf_arena_p, false);
}
static int mesh_calc_tessellation_for_face_with_normal(BMLoop *(*looptris)[3],
BMFace *efa,
MemArena **pf_arena_p)
static void mesh_calc_tessellation_for_face_with_normal(BMLoop *(*looptris)[3],
BMFace *efa,
MemArena **pf_arena_p)
{
return mesh_calc_tessellation_for_face_impl(looptris, efa, pf_arena_p, true);
mesh_calc_tessellation_for_face_impl(looptris, efa, pf_arena_p, true);
}
/**
@ -182,13 +189,15 @@ static void bm_mesh_calc_tessellation__single_threaded(BMesh *bm,
BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
BLI_assert(efa->len >= 3);
BM_face_calc_normal(efa, efa->no);
i += mesh_calc_tessellation_for_face_with_normal(looptris + i, efa, &pf_arena);
mesh_calc_tessellation_for_face_with_normal(looptris + i, efa, &pf_arena);
i += efa->len - 2;
}
}
else {
BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
BLI_assert(efa->len >= 3);
i += mesh_calc_tessellation_for_face(looptris + i, efa, &pf_arena);
mesh_calc_tessellation_for_face(looptris + i, efa, &pf_arena);
i += efa->len - 2;
}
}

View File

@ -411,8 +411,7 @@ static void threading_model_task_schedule(WorkPackage *package)
static void threading_model_task_start()
{
BLI_thread_local_create(g_thread_device);
g_work_scheduler.task.pool = BLI_task_pool_create(
nullptr, TASK_PRIORITY_HIGH, TASK_ISOLATION_ON);
g_work_scheduler.task.pool = BLI_task_pool_create(nullptr, TASK_PRIORITY_HIGH);
}
static void threading_model_task_finish()

View File

@ -25,8 +25,8 @@
namespace blender::compositor {
/**
* @brief AntiAliasingNode
* @ingroup Node
* \brief AntiAliasingNode
* \ingroup Node
*/
class AntiAliasingNode : public Node {
public:

View File

@ -168,7 +168,7 @@ void ImageNode::convertToOperations(NodeConverter &converter,
if (index == 0 && operation) {
converter.addPreview(operation->getOutputSocket());
}
if (STREQ(rpass->name, RE_PASSNAME_COMBINED)) {
if (STREQ(rpass->name, RE_PASSNAME_COMBINED) && !(bnodeSocket->flag & SOCK_UNAVAIL)) {
for (NodeOutput *alphaSocket : getOutputSockets()) {
bNodeSocket *bnodeAlphaSocket = alphaSocket->getbNodeSocket();
if (!STREQ(bnodeAlphaSocket->name, "Alpha")) {

View File

@ -24,6 +24,7 @@ SetColorOperation::SetColorOperation()
{
this->addOutputSocket(DataType::Color);
flags.is_set_operation = true;
flags.is_fullframe_operation = true;
}
void SetColorOperation::executePixelSampled(float output[4],
@ -41,4 +42,14 @@ void SetColorOperation::determineResolution(unsigned int resolution[2],
resolution[1] = preferredResolution[1];
}
void SetColorOperation::update_memory_buffer(MemoryBuffer *output,
const rcti &output_area,
Span<MemoryBuffer *> UNUSED(inputs),
ExecutionSystem &UNUSED(exec_system))
{
BLI_assert(output->is_a_single_elem());
float *out_elem = output->get_elem(output_area.xmin, output_area.ymin);
copy_v4_v4(out_elem, m_color);
}
} // namespace blender::compositor

View File

@ -80,6 +80,11 @@ class SetColorOperation : public NodeOperation {
void determineResolution(unsigned int resolution[2],
unsigned int preferredResolution[2]) override;
void update_memory_buffer(MemoryBuffer *output,
const rcti &output_area,
Span<MemoryBuffer *> inputs,
ExecutionSystem &exec_system) override;
};
} // namespace blender::compositor

View File

@ -24,6 +24,7 @@ SetValueOperation::SetValueOperation()
{
this->addOutputSocket(DataType::Value);
flags.is_set_operation = true;
flags.is_fullframe_operation = true;
}
void SetValueOperation::executePixelSampled(float output[4],
@ -41,4 +42,14 @@ void SetValueOperation::determineResolution(unsigned int resolution[2],
resolution[1] = preferredResolution[1];
}
void SetValueOperation::update_memory_buffer(MemoryBuffer *output,
const rcti &output_area,
Span<MemoryBuffer *> UNUSED(inputs),
ExecutionSystem &UNUSED(exec_system))
{
BLI_assert(output->is_a_single_elem());
float *out_elem = output->get_elem(output_area.xmin, output_area.ymin);
*out_elem = m_value;
}
} // namespace blender::compositor

View File

@ -51,6 +51,10 @@ class SetValueOperation : public NodeOperation {
void executePixelSampled(float output[4], float x, float y, PixelSampler sampler) override;
void determineResolution(unsigned int resolution[2],
unsigned int preferredResolution[2]) override;
void update_memory_buffer(MemoryBuffer *output,
const rcti &output_area,
Span<MemoryBuffer *> inputs,
ExecutionSystem &exec_system) override;
};
} // namespace blender::compositor

View File

@ -353,8 +353,7 @@ static TaskPool *deg_evaluate_task_pool_create(DepsgraphEvalState *state)
return BLI_task_pool_create_no_threads(state);
}
/* TODO: Disable task isolation. */
return BLI_task_pool_create_suspended(state, TASK_PRIORITY_HIGH, TASK_ISOLATION_ON);
return BLI_task_pool_create_suspended(state, TASK_PRIORITY_HIGH);
}
/**

View File

@ -397,7 +397,7 @@ static void gpencil_vfx_shadow(ShadowShaderFxData *fx, Object *ob, gpIterVfxData
unit_m4(uv_mat);
zero_v2(wave_ofs);
/* We reset the uv_mat so we need to account for the rotation in the */
/* Reset the `uv_mat` to account for rotation in the Y-axis (Shadow-V parameter). */
copy_v2_fl2(tmp, 0.0f, blur_size[1]);
rotate_v2_v2fl(blur_dir, tmp, -fx->rotation);
mul_v2_v2(blur_dir, vp_size_inv);

View File

@ -83,8 +83,9 @@ typedef enum eMRDataType {
MR_DATA_LOOPTRI = 1 << 3,
/** Force loop normals calculation. */
MR_DATA_TAN_LOOP_NOR = 1 << 4,
MR_DATA_MAT_OFFSETS = 1 << 5,
} eMRDataType;
ENUM_OPERATORS(eMRDataType, MR_DATA_TAN_LOOP_NOR)
ENUM_OPERATORS(eMRDataType, MR_DATA_MAT_OFFSETS)
#ifdef __cplusplus
extern "C" {
@ -160,47 +161,20 @@ typedef struct MeshBufferCache {
* - Loose geometry.
*/
typedef struct MeshBufferExtractionCache {
int edge_loose_len;
int vert_loose_len;
int *lverts;
int *ledges;
struct {
int edge_len;
int vert_len;
int *verts;
int *edges;
} loose_geom;
struct {
int *tri;
int visible_tri_len;
} mat_offsets;
} MeshBufferExtractionCache;
typedef enum DRWBatchFlag {
MBC_SURFACE = (1 << 0),
MBC_SURFACE_WEIGHTS = (1 << 1),
MBC_EDIT_TRIANGLES = (1 << 2),
MBC_EDIT_VERTICES = (1 << 3),
MBC_EDIT_EDGES = (1 << 4),
MBC_EDIT_VNOR = (1 << 5),
MBC_EDIT_LNOR = (1 << 6),
MBC_EDIT_FACEDOTS = (1 << 7),
MBC_EDIT_MESH_ANALYSIS = (1 << 8),
MBC_EDITUV_FACES_STRETCH_AREA = (1 << 9),
MBC_EDITUV_FACES_STRETCH_ANGLE = (1 << 10),
MBC_EDITUV_FACES = (1 << 11),
MBC_EDITUV_EDGES = (1 << 12),
MBC_EDITUV_VERTS = (1 << 13),
MBC_EDITUV_FACEDOTS = (1 << 14),
MBC_EDIT_SELECTION_VERTS = (1 << 15),
MBC_EDIT_SELECTION_EDGES = (1 << 16),
MBC_EDIT_SELECTION_FACES = (1 << 17),
MBC_EDIT_SELECTION_FACEDOTS = (1 << 18),
MBC_ALL_VERTS = (1 << 19),
MBC_ALL_EDGES = (1 << 20),
MBC_LOOSE_EDGES = (1 << 21),
MBC_EDGE_DETECTION = (1 << 22),
MBC_WIRE_EDGES = (1 << 23),
MBC_WIRE_LOOPS = (1 << 24),
MBC_WIRE_LOOPS_UVS = (1 << 25),
MBC_SKIN_ROOTS = (1 << 26),
MBC_SCULPT_OVERLAYS = (1 << 27),
} DRWBatchFlag;
#define MBC_EDITUV \
(MBC_EDITUV_FACES_STRETCH_AREA | MBC_EDITUV_FACES_STRETCH_ANGLE | MBC_EDITUV_FACES | \
MBC_EDITUV_EDGES | MBC_EDITUV_VERTS | MBC_EDITUV_FACEDOTS | MBC_WIRE_LOOPS_UVS)
#define FOREACH_MESH_BUFFER_CACHE(batch_cache, mbc) \
for (MeshBufferCache *mbc = &batch_cache->final; \
mbc == &batch_cache->final || mbc == &batch_cache->cage || mbc == &batch_cache->uv_cage; \
@ -253,8 +227,8 @@ typedef struct MeshBatchCache {
GPUBatch **surface_per_mat;
DRWBatchFlag batch_requested;
DRWBatchFlag batch_ready;
uint32_t batch_requested; /* DRWBatchFlag */
uint32_t batch_ready; /* DRWBatchFlag */
/* settings to determine if cache is invalid */
int edge_len;
@ -288,6 +262,47 @@ typedef struct MeshBatchCache {
#define MBC_VBO_LEN (sizeof(((MeshBufferCache){0}).vbo) / sizeof(void *))
#define MBC_IBO_LEN (sizeof(((MeshBufferCache){0}).ibo) / sizeof(void *))
#define MBC_BATCH_INDEX(batch_name) \
((offsetof(MeshBatchCache, batch_name) - offsetof(MeshBatchCache, batch.surface)) / \
sizeof(void *))
typedef enum DRWBatchFlag {
MBC_SURFACE = (1u << MBC_BATCH_INDEX(batch.surface)),
MBC_SURFACE_WEIGHTS = (1u << MBC_BATCH_INDEX(batch.surface_weights)),
MBC_EDIT_TRIANGLES = (1u << MBC_BATCH_INDEX(batch.edit_triangles)),
MBC_EDIT_VERTICES = (1u << MBC_BATCH_INDEX(batch.edit_vertices)),
MBC_EDIT_EDGES = (1u << MBC_BATCH_INDEX(batch.edit_edges)),
MBC_EDIT_VNOR = (1u << MBC_BATCH_INDEX(batch.edit_vnor)),
MBC_EDIT_LNOR = (1u << MBC_BATCH_INDEX(batch.edit_lnor)),
MBC_EDIT_FACEDOTS = (1u << MBC_BATCH_INDEX(batch.edit_fdots)),
MBC_EDIT_MESH_ANALYSIS = (1u << MBC_BATCH_INDEX(batch.edit_mesh_analysis)),
MBC_SKIN_ROOTS = (1u << MBC_BATCH_INDEX(batch.edit_skin_roots)),
MBC_EDITUV_FACES_STRETCH_AREA = (1u << MBC_BATCH_INDEX(batch.edituv_faces_stretch_area)),
MBC_EDITUV_FACES_STRETCH_ANGLE = (1u << MBC_BATCH_INDEX(batch.edituv_faces_stretch_angle)),
MBC_EDITUV_FACES = (1u << MBC_BATCH_INDEX(batch.edituv_faces)),
MBC_EDITUV_EDGES = (1u << MBC_BATCH_INDEX(batch.edituv_edges)),
MBC_EDITUV_VERTS = (1u << MBC_BATCH_INDEX(batch.edituv_verts)),
MBC_EDITUV_FACEDOTS = (1u << MBC_BATCH_INDEX(batch.edituv_fdots)),
MBC_EDIT_SELECTION_VERTS = (1u << MBC_BATCH_INDEX(batch.edit_selection_verts)),
MBC_EDIT_SELECTION_EDGES = (1u << MBC_BATCH_INDEX(batch.edit_selection_edges)),
MBC_EDIT_SELECTION_FACES = (1u << MBC_BATCH_INDEX(batch.edit_selection_faces)),
MBC_EDIT_SELECTION_FACEDOTS = (1u << MBC_BATCH_INDEX(batch.edit_selection_fdots)),
MBC_ALL_VERTS = (1u << MBC_BATCH_INDEX(batch.all_verts)),
MBC_ALL_EDGES = (1u << MBC_BATCH_INDEX(batch.all_edges)),
MBC_LOOSE_EDGES = (1u << MBC_BATCH_INDEX(batch.loose_edges)),
MBC_EDGE_DETECTION = (1u << MBC_BATCH_INDEX(batch.edge_detection)),
MBC_WIRE_EDGES = (1u << MBC_BATCH_INDEX(batch.wire_edges)),
MBC_WIRE_LOOPS = (1u << MBC_BATCH_INDEX(batch.wire_loops)),
MBC_WIRE_LOOPS_UVS = (1u << MBC_BATCH_INDEX(batch.wire_loops_uvs)),
MBC_SCULPT_OVERLAYS = (1u << MBC_BATCH_INDEX(batch.sculpt_overlays)),
} DRWBatchFlag;
BLI_STATIC_ASSERT(MBC_BATCH_INDEX(surface_per_mat) < 32, "Number of batches exceeded the limit of bit fields");
#define MBC_EDITUV \
(MBC_EDITUV_FACES_STRETCH_AREA | MBC_EDITUV_FACES_STRETCH_ANGLE | MBC_EDITUV_FACES | \
MBC_EDITUV_EDGES | MBC_EDITUV_VERTS | MBC_EDITUV_FACEDOTS | MBC_WIRE_LOOPS_UVS)
void mesh_buffer_cache_create_requested(struct TaskGraph *task_graph,
MeshBatchCache *cache,
MeshBufferCache *mbc,

View File

@ -498,11 +498,15 @@ static struct TaskNode *extract_task_node_create(struct TaskGraph *task_graph,
* \{ */
struct MeshRenderDataUpdateTaskData {
MeshRenderData *mr = nullptr;
MeshBufferExtractionCache *cache = nullptr;
eMRIterType iter_type;
eMRDataType data_flag;
MeshRenderDataUpdateTaskData(MeshRenderData *mr, eMRIterType iter_type, eMRDataType data_flag)
: mr(mr), iter_type(iter_type), data_flag(data_flag)
MeshRenderDataUpdateTaskData(MeshRenderData *mr,
MeshBufferExtractionCache *cache,
eMRIterType iter_type,
eMRDataType data_flag)
: mr(mr), cache(cache), iter_type(iter_type), data_flag(data_flag)
{
}
@ -533,15 +537,17 @@ static void mesh_extract_render_data_node_exec(void *__restrict task_data)
mesh_render_data_update_normals(mr, data_flag);
mesh_render_data_update_looptris(mr, iter_type, data_flag);
mesh_render_data_update_mat_offsets(mr, update_task_data->cache, data_flag);
}
static struct TaskNode *mesh_extract_render_data_node_create(struct TaskGraph *task_graph,
MeshRenderData *mr,
MeshBufferExtractionCache *cache,
const eMRIterType iter_type,
const eMRDataType data_flag)
{
MeshRenderDataUpdateTaskData *task_data = new MeshRenderDataUpdateTaskData(
mr, iter_type, data_flag);
mr, cache, iter_type, data_flag);
struct TaskNode *task_node = BLI_task_graph_node_create(
task_graph,
@ -702,7 +708,7 @@ static void mesh_buffer_cache_create_requested(struct TaskGraph *task_graph,
#endif
struct TaskNode *task_node_mesh_render_data = mesh_extract_render_data_node_create(
task_graph, mr, iter_type, data_flag);
task_graph, mr, extraction_cache, iter_type, data_flag);
/* Simple heuristic. */
const bool use_thread = (mr->loop_len + mr->loop_loose_len) > MIM_RANGE_LEN;

View File

@ -38,6 +38,70 @@
extern "C" {
#endif
/* ---------------------------------------------------------------------- */
/** \name Dependencies between buffer and batch
* \{ */
#ifndef NDEBUG
# define _MDEF_type(name) static DRWBatchFlag MDEP_assert_##name = 0, MDEP_##name
#else
# define _MDEF_type(name) static const DRWBatchFlag MDEP_##name
#endif
/* clang-format off */
#define _MDEPS_CREATE1(b) (1u << MBC_BATCH_INDEX(b))
#define _MDEPS_CREATE2(b1, b2) _MDEPS_CREATE1(b1) | _MDEPS_CREATE1(b2)
#define _MDEPS_CREATE3(b1, b2, b3) _MDEPS_CREATE2(b1, b2) | _MDEPS_CREATE1(b3)
#define _MDEPS_CREATE4(b1, b2, b3, b4) _MDEPS_CREATE3(b1, b2, b3) | _MDEPS_CREATE1(b4)
#define _MDEPS_CREATE5(b1, b2, b3, b4, b5) _MDEPS_CREATE4(b1, b2, b3, b4) | _MDEPS_CREATE1(b5)
#define _MDEPS_CREATE6(b1, b2, b3, b4, b5, b6) _MDEPS_CREATE5(b1, b2, b3, b4, b5) | _MDEPS_CREATE1(b6)
#define _MDEPS_CREATE7(b1, b2, b3, b4, b5, b6, b7) _MDEPS_CREATE6(b1, b2, b3, b4, b5, b6) | _MDEPS_CREATE1(b7)
#define _MDEPS_CREATE8(b1, b2, b3, b4, b5, b6, b7, b8) _MDEPS_CREATE7(b1, b2, b3, b4, b5, b6, b7) | _MDEPS_CREATE1(b8)
#define _MDEPS_CREATE9(b1, b2, b3, b4, b5, b6, b7, b8, b9) _MDEPS_CREATE8(b1, b2, b3, b4, b5, b6, b7, b8) | _MDEPS_CREATE1(b9)
#define _MDEPS_CREATE10(b1, b2, b3, b4, b5, b6, b7, b8, b9, b10) _MDEPS_CREATE9(b1, b2, b3, b4, b5, b6, b7, b8, b9) | _MDEPS_CREATE1(b10)
#define _MDEPS_CREATE19(b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, b16, b17, b18, b19) _MDEPS_CREATE10(b1, b2, b3, b4, b5, b6, b7, b8, b9, b10) | _MDEPS_CREATE9(b11, b12, b13, b14, b15, b16, b17, b18, b19)
#define MDEPS_CREATE(name, ...) _MDEF_type(name) = VA_NARGS_CALL_OVERLOAD(_MDEPS_CREATE, __VA_ARGS__)
#define _MDEPS_CREATE_MAP1(a) MDEP_##a
#define _MDEPS_CREATE_MAP2(a, b) MDEP_##a | MDEP_##b
#define _MDEPS_CREATE_MAP3(a, b, c) _MDEPS_CREATE_MAP2(a, b) | MDEP_##c
#define _MDEPS_CREATE_MAP4(a, b, c, d) _MDEPS_CREATE_MAP3(a, b, c) | MDEP_##d
#define _MDEPS_CREATE_MAP5(a, b, c, d, e) _MDEPS_CREATE_MAP4(a, b, c, d) | MDEP_##e
#define _MDEPS_CREATE_MAP6(a, b, c, d, e, f) _MDEPS_CREATE_MAP5(a, b, c, d, e) | MDEP_##f
#define _MDEPS_CREATE_MAP7(a, b, c, d, e, f, g) _MDEPS_CREATE_MAP6(a, b, c, d, e, f) | MDEP_##g
#define _MDEPS_CREATE_MAP8(a, b, c, d, e, f, g, h) _MDEPS_CREATE_MAP7(a, b, c, d, e, f, g) | MDEP_##h
#define _MDEPS_CREATE_MAP9(a, b, c, d, e, f, g, h, i) _MDEPS_CREATE_MAP8(a, b, c, d, e, f, g, h) | MDEP_##i
#define _MDEPS_CREATE_MAP10(a, b, c, d, e, f, g, h, i, j) _MDEPS_CREATE_MAP9(a, b, c, d, e, f, g, h, i) | MDEP_##j
#define MDEPS_CREATE_MAP(...) VA_NARGS_CALL_OVERLOAD(_MDEPS_CREATE_MAP, __VA_ARGS__)
#ifndef NDEBUG
# define _MDEPS_ASSERT2(b, name) \
MDEP_assert_##name |= _MDEPS_CREATE1(b); \
BLI_assert(MDEP_##name & _MDEPS_CREATE1(b))
# define _MDEPS_ASSERT3(b, n1, n2) _MDEPS_ASSERT2(b, n1); _MDEPS_ASSERT2(b, n2)
# define _MDEPS_ASSERT4(b, n1, n2, n3) _MDEPS_ASSERT3(b, n1, n2); _MDEPS_ASSERT2(b, n3)
# define _MDEPS_ASSERT5(b, n1, n2, n3, n4) _MDEPS_ASSERT4(b, n1, n2, n3); _MDEPS_ASSERT2(b, n4)
# define _MDEPS_ASSERT6(b, n1, n2, n3, n4, n5) _MDEPS_ASSERT5(b, n1, n2, n3, n4); _MDEPS_ASSERT2(b, n5)
# define _MDEPS_ASSERT7(b, n1, n2, n3, n4, n5, n6) _MDEPS_ASSERT6(b, n1, n2, n3, n4, n5); _MDEPS_ASSERT2(b, n6)
# define _MDEPS_ASSERT8(b, n1, n2, n3, n4, n5, n6, n7) _MDEPS_ASSERT7(b, n1, n2, n3, n4, n5, n6); _MDEPS_ASSERT2(b, n7)
# define MDEPS_ASSERT(...) VA_NARGS_CALL_OVERLOAD(_MDEPS_ASSERT, __VA_ARGS__)
# define MDEPS_ASSERT_MAP(name) BLI_assert(MDEP_assert_##name == MDEP_##name)
#else
# define MDEPS_ASSERT(...)
# define MDEPS_ASSERT_MAP(name) UNUSED_VARS(MDEP_##name)
#endif
/* clang-format on */
/** \} */
/* ---------------------------------------------------------------------- */
/** \name Mesh Render Data
* \{ */
typedef enum eMRExtractType {
MR_EXTRACT_BMESH,
MR_EXTRACT_MAPPED,
@ -94,6 +158,10 @@ typedef struct MeshRenderData {
float (*loop_normals)[3];
float (*poly_normals)[3];
int *lverts, *ledges;
struct {
int *tri;
int visible_tri_len;
} mat_offsets;
} MeshRenderData;
BLI_INLINE BMFace *bm_original_face_get(const MeshRenderData *mr, int idx)
@ -150,6 +218,8 @@ BLI_INLINE const float *bm_face_no_get(const MeshRenderData *mr, const BMFace *e
return efa->no;
}
/** \} */
/* ---------------------------------------------------------------------- */
/** \name Mesh Elements Extract Struct
* \{ */
@ -238,6 +308,9 @@ MeshRenderData *mesh_render_data_create(Mesh *me,
const eMRIterType iter_type);
void mesh_render_data_free(MeshRenderData *mr);
void mesh_render_data_update_normals(MeshRenderData *mr, const eMRDataType data_flag);
void mesh_render_data_update_mat_offsets(MeshRenderData *mr,
MeshBufferExtractionCache *cache,
const eMRDataType data_flag);
void mesh_render_data_update_looptris(MeshRenderData *mr,
const eMRIterType iter_type,
const eMRDataType data_flag);

View File

@ -39,15 +39,26 @@
#include "draw_cache_extract_mesh_private.h"
/* ---------------------------------------------------------------------- */
/** \name Mesh/BMesh Interface (indirect, partially cached access to complex data).
/** \name Update Loose Geometry
* \{ */
static void mesh_render_data_lverts_bm(const MeshRenderData *mr,
MeshBufferExtractionCache *cache,
BMesh *bm);
static void mesh_render_data_ledges_bm(const MeshRenderData *mr,
MeshBufferExtractionCache *cache,
BMesh *bm);
static void mesh_render_data_loose_geom_mesh(const MeshRenderData *mr,
MeshBufferExtractionCache *cache);
static void mesh_render_data_loose_geom_build(const MeshRenderData *mr,
MeshBufferExtractionCache *cache);
static void mesh_render_data_loose_geom_load(MeshRenderData *mr, MeshBufferExtractionCache *cache)
{
mr->ledges = cache->ledges;
mr->lverts = cache->lverts;
mr->vert_loose_len = cache->vert_loose_len;
mr->edge_loose_len = cache->edge_loose_len;
mr->ledges = cache->loose_geom.edges;
mr->lverts = cache->loose_geom.verts;
mr->vert_loose_len = cache->loose_geom.vert_len;
mr->edge_loose_len = cache->loose_geom.edge_len;
mr->loop_loose_len = mr->vert_loose_len + (mr->edge_loose_len * 2);
}
@ -57,74 +68,212 @@ static void mesh_render_data_loose_geom_ensure(const MeshRenderData *mr,
{
/* Early exit: Are loose geometry already available. Only checking for loose verts as loose edges
* and verts are calculated at the same time.*/
if (cache->lverts) {
if (cache->loose_geom.verts) {
return;
}
mesh_render_data_loose_geom_build(mr, cache);
}
cache->vert_loose_len = 0;
cache->edge_loose_len = 0;
static void mesh_render_data_loose_geom_build(const MeshRenderData *mr,
MeshBufferExtractionCache *cache)
{
cache->loose_geom.vert_len = 0;
cache->loose_geom.edge_len = 0;
if (mr->extract_type != MR_EXTRACT_BMESH) {
/* Mesh */
BLI_bitmap *lvert_map = BLI_BITMAP_NEW(mr->vert_len, __func__);
cache->ledges = MEM_mallocN(mr->edge_len * sizeof(*cache->ledges), __func__);
const MEdge *med = mr->medge;
for (int med_index = 0; med_index < mr->edge_len; med_index++, med++) {
if (med->flag & ME_LOOSEEDGE) {
cache->ledges[cache->edge_loose_len++] = med_index;
}
/* Tag verts as not loose. */
BLI_BITMAP_ENABLE(lvert_map, med->v1);
BLI_BITMAP_ENABLE(lvert_map, med->v2);
}
if (cache->edge_loose_len < mr->edge_len) {
cache->ledges = MEM_reallocN(cache->ledges, cache->edge_loose_len * sizeof(*cache->ledges));
}
cache->lverts = MEM_mallocN(mr->vert_len * sizeof(*mr->lverts), __func__);
for (int v = 0; v < mr->vert_len; v++) {
if (!BLI_BITMAP_TEST(lvert_map, v)) {
cache->lverts[cache->vert_loose_len++] = v;
}
}
if (cache->vert_loose_len < mr->vert_len) {
cache->lverts = MEM_reallocN(cache->lverts, cache->vert_loose_len * sizeof(*cache->lverts));
}
MEM_freeN(lvert_map);
mesh_render_data_loose_geom_mesh(mr, cache);
}
else {
/* #BMesh */
BMesh *bm = mr->bm;
int elem_id;
BMIter iter;
BMVert *eve;
BMEdge *ede;
mesh_render_data_lverts_bm(mr, cache, bm);
mesh_render_data_ledges_bm(mr, cache, bm);
}
}
cache->lverts = MEM_mallocN(mr->vert_len * sizeof(*cache->lverts), __func__);
BM_ITER_MESH_INDEX (eve, &iter, bm, BM_VERTS_OF_MESH, elem_id) {
if (eve->e == NULL) {
cache->lverts[cache->vert_loose_len++] = elem_id;
}
}
if (cache->vert_loose_len < mr->vert_len) {
cache->lverts = MEM_reallocN(cache->lverts, cache->vert_loose_len * sizeof(*cache->lverts));
}
static void mesh_render_data_loose_geom_mesh(const MeshRenderData *mr,
MeshBufferExtractionCache *cache)
{
BLI_bitmap *lvert_map = BLI_BITMAP_NEW(mr->vert_len, __func__);
cache->ledges = MEM_mallocN(mr->edge_len * sizeof(*cache->ledges), __func__);
BM_ITER_MESH_INDEX (ede, &iter, bm, BM_EDGES_OF_MESH, elem_id) {
if (ede->l == NULL) {
cache->ledges[cache->edge_loose_len++] = elem_id;
}
cache->loose_geom.edges = MEM_mallocN(mr->edge_len * sizeof(*cache->loose_geom.edges), __func__);
const MEdge *med = mr->medge;
for (int med_index = 0; med_index < mr->edge_len; med_index++, med++) {
if (med->flag & ME_LOOSEEDGE) {
cache->loose_geom.edges[cache->loose_geom.edge_len++] = med_index;
}
if (cache->edge_loose_len < mr->edge_len) {
cache->ledges = MEM_reallocN(cache->ledges, cache->edge_loose_len * sizeof(*cache->ledges));
/* Tag verts as not loose. */
BLI_BITMAP_ENABLE(lvert_map, med->v1);
BLI_BITMAP_ENABLE(lvert_map, med->v2);
}
if (cache->loose_geom.edge_len < mr->edge_len) {
cache->loose_geom.edges = MEM_reallocN(
cache->loose_geom.edges, cache->loose_geom.edge_len * sizeof(*cache->loose_geom.edges));
}
cache->loose_geom.verts = MEM_mallocN(mr->vert_len * sizeof(*cache->loose_geom.verts), __func__);
for (int v = 0; v < mr->vert_len; v++) {
if (!BLI_BITMAP_TEST(lvert_map, v)) {
cache->loose_geom.verts[cache->loose_geom.vert_len++] = v;
}
}
if (cache->loose_geom.vert_len < mr->vert_len) {
cache->loose_geom.verts = MEM_reallocN(
cache->loose_geom.verts, cache->loose_geom.vert_len * sizeof(*cache->loose_geom.verts));
}
MEM_freeN(lvert_map);
}
static void mesh_render_data_lverts_bm(const MeshRenderData *mr,
MeshBufferExtractionCache *cache,
BMesh *bm)
{
int elem_id;
BMIter iter;
BMVert *eve;
cache->loose_geom.verts = MEM_mallocN(mr->vert_len * sizeof(*cache->loose_geom.verts), __func__);
BM_ITER_MESH_INDEX (eve, &iter, bm, BM_VERTS_OF_MESH, elem_id) {
if (eve->e == NULL) {
cache->loose_geom.verts[cache->loose_geom.vert_len++] = elem_id;
}
}
if (cache->loose_geom.vert_len < mr->vert_len) {
cache->loose_geom.verts = MEM_reallocN(
cache->loose_geom.verts, cache->loose_geom.vert_len * sizeof(*cache->loose_geom.verts));
}
}
static void mesh_render_data_ledges_bm(const MeshRenderData *mr,
MeshBufferExtractionCache *cache,
BMesh *bm)
{
int elem_id;
BMIter iter;
BMEdge *ede;
cache->loose_geom.edges = MEM_mallocN(mr->edge_len * sizeof(*cache->loose_geom.edges), __func__);
BM_ITER_MESH_INDEX (ede, &iter, bm, BM_EDGES_OF_MESH, elem_id) {
if (ede->l == NULL) {
cache->loose_geom.edges[cache->loose_geom.edge_len++] = elem_id;
}
}
if (cache->loose_geom.edge_len < mr->edge_len) {
cache->loose_geom.edges = MEM_reallocN(
cache->loose_geom.edges, cache->loose_geom.edge_len * sizeof(*cache->loose_geom.edges));
}
}
/** \} */
/* ---------------------------------------------------------------------- */
/** \name Material Offsets
*
* Material offsets contains the offset of a material after sorting tris based on their material.
*
* \{ */
static void mesh_render_data_mat_offset_load(MeshRenderData *mr,
const MeshBufferExtractionCache *cache);
static void mesh_render_data_mat_offset_ensure(MeshRenderData *mr,
MeshBufferExtractionCache *cache);
static void mesh_render_data_mat_offset_build(MeshRenderData *mr,
MeshBufferExtractionCache *cache);
static void mesh_render_data_mat_offset_build_bm(MeshRenderData *mr,
MeshBufferExtractionCache *cache);
static void mesh_render_data_mat_offset_build_mesh(MeshRenderData *mr,
MeshBufferExtractionCache *cache);
static void mesh_render_data_mat_offset_apply_offset(MeshRenderData *mr,
MeshBufferExtractionCache *cache);
void mesh_render_data_update_mat_offsets(MeshRenderData *mr,
MeshBufferExtractionCache *cache,
const eMRDataType data_flag)
{
if (data_flag & MR_DATA_MAT_OFFSETS) {
mesh_render_data_mat_offset_ensure(mr, cache);
mesh_render_data_mat_offset_load(mr, cache);
}
}
static void mesh_render_data_mat_offset_load(MeshRenderData *mr,
const MeshBufferExtractionCache *cache)
{
mr->mat_offsets.tri = cache->mat_offsets.tri;
mr->mat_offsets.visible_tri_len = cache->mat_offsets.visible_tri_len;
}
static void mesh_render_data_mat_offset_ensure(MeshRenderData *mr,
MeshBufferExtractionCache *cache)
{
if (cache->mat_offsets.tri) {
return;
}
mesh_render_data_mat_offset_build(mr, cache);
}
static void mesh_render_data_mat_offset_build(MeshRenderData *mr, MeshBufferExtractionCache *cache)
{
size_t mat_tri_idx_size = sizeof(int) * mr->mat_len;
cache->mat_offsets.tri = MEM_callocN(mat_tri_idx_size, __func__);
/* Count how many triangles for each material. */
if (mr->extract_type == MR_EXTRACT_BMESH) {
mesh_render_data_mat_offset_build_bm(mr, cache);
}
else {
mesh_render_data_mat_offset_build_mesh(mr, cache);
}
mesh_render_data_mat_offset_apply_offset(mr, cache);
}
static void mesh_render_data_mat_offset_build_bm(MeshRenderData *mr,
MeshBufferExtractionCache *cache)
{
int *mat_tri_len = cache->mat_offsets.tri;
BMIter iter;
BMFace *efa;
BM_ITER_MESH (efa, &iter, mr->bm, BM_FACES_OF_MESH) {
if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
int mat = min_ii(efa->mat_nr, mr->mat_len - 1);
mat_tri_len[mat] += efa->len - 2;
}
}
}
static void mesh_render_data_mat_offset_build_mesh(MeshRenderData *mr,
MeshBufferExtractionCache *cache)
{
int *mat_tri_len = cache->mat_offsets.tri;
const MPoly *mp = mr->mpoly;
for (int mp_index = 0; mp_index < mr->poly_len; mp_index++, mp++) {
if (!(mr->use_hide && (mp->flag & ME_HIDE))) {
int mat = min_ii(mp->mat_nr, mr->mat_len - 1);
mat_tri_len[mat] += mp->totloop - 2;
}
}
}
static void mesh_render_data_mat_offset_apply_offset(MeshRenderData *mr,
MeshBufferExtractionCache *cache)
{
int *mat_tri_len = cache->mat_offsets.tri;
int ofs = mat_tri_len[0];
mat_tri_len[0] = 0;
for (int i = 1; i < mr->mat_len; i++) {
int tmp = mat_tri_len[i];
mat_tri_len[i] = ofs;
ofs += tmp;
}
cache->mat_offsets.visible_tri_len = ofs;
}
/** \} */
/* ---------------------------------------------------------------------- */
/** \name Mesh/BMesh Interface (indirect, partially cached access to complex data).
* \{ */
/**
* Part of the creation of the #MeshRenderData that happens in a thread.
*/
@ -159,16 +308,8 @@ void mesh_render_data_update_normals(MeshRenderData *mr, const eMRDataType data_
if (mr->extract_type != MR_EXTRACT_BMESH) {
/* Mesh */
if (data_flag & (MR_DATA_POLY_NOR | MR_DATA_LOOP_NOR | MR_DATA_TAN_LOOP_NOR)) {
mr->poly_normals = MEM_mallocN(sizeof(*mr->poly_normals) * mr->poly_len, __func__);
BKE_mesh_calc_normals_poly((MVert *)mr->mvert,
NULL,
mr->vert_len,
mr->mloop,
mr->mpoly,
mr->loop_len,
mr->poly_len,
mr->poly_normals,
true);
BKE_mesh_ensure_normals_for_display(mr->me);
mr->poly_normals = CustomData_get_layer(&mr->me->pdata, CD_NORMAL);
}
if (((data_flag & MR_DATA_LOOP_NOR) && is_auto_smooth) || (data_flag & MR_DATA_TAN_LOOP_NOR)) {
mr->loop_normals = MEM_mallocN(sizeof(*mr->loop_normals) * mr->loop_len, __func__);
@ -358,7 +499,6 @@ MeshRenderData *mesh_render_data_create(Mesh *me,
void mesh_render_data_free(MeshRenderData *mr)
{
MEM_SAFE_FREE(mr->mlooptri);
MEM_SAFE_FREE(mr->poly_normals);
MEM_SAFE_FREE(mr->loop_normals);
/* Loose geometry are owned by MeshBufferExtractionCache. */
@ -368,4 +508,4 @@ void mesh_render_data_free(MeshRenderData *mr)
MEM_freeN(mr);
}
/** \} */
/** \} */

View File

@ -67,12 +67,109 @@
#include "ED_uvedit.h"
#include "draw_cache_extract.h"
#include "draw_cache_extract_mesh_private.h"
#include "draw_cache_inline.h"
#include "draw_cache_impl.h" /* own include */
MDEPS_CREATE(vbo_lnor, batch.surface, batch.wire_loops, batch.edit_lnor, surface_per_mat);
MDEPS_CREATE(vbo_pos_nor,
batch.surface,
batch.surface_weights,
batch.all_verts,
batch.all_edges,
batch.loose_edges,
batch.edge_detection,
batch.wire_loops,
batch.wire_edges,
batch.edit_vnor,
batch.edit_lnor,
batch.edit_vertices,
batch.edit_edges,
batch.edit_triangles,
batch.edit_selection_verts,
batch.edit_selection_edges,
batch.edit_selection_faces,
batch.edit_mesh_analysis,
batch.sculpt_overlays,
surface_per_mat);
MDEPS_CREATE(vbo_uv,
batch.surface,
batch.wire_loops_uvs,
batch.edituv_faces,
batch.edituv_faces_stretch_area,
batch.edituv_faces_stretch_angle,
batch.edituv_edges,
batch.edituv_verts,
surface_per_mat);
MDEPS_CREATE(vbo_vcol, batch.surface, surface_per_mat);
MDEPS_CREATE(vbo_sculpt_data, batch.sculpt_overlays);
MDEPS_CREATE(vbo_weights, batch.surface_weights);
MDEPS_CREATE(vbo_edge_fac, batch.wire_edges);
MDEPS_CREATE(vbo_mesh_analysis, batch.edit_mesh_analysis);
MDEPS_CREATE(vbo_tan, surface_per_mat);
MDEPS_CREATE(vbo_orco, surface_per_mat);
MDEPS_CREATE(vbo_edit_data, batch.edit_triangles, batch.edit_edges, batch.edit_vertices);
MDEPS_CREATE(vbo_fdots_pos, batch.edit_fdots, batch.edit_selection_fdots);
MDEPS_CREATE(vbo_fdots_nor, batch.edit_fdots);
MDEPS_CREATE(vbo_skin_roots, batch.edit_skin_roots);
MDEPS_CREATE(vbo_vert_idx, batch.edit_selection_verts);
MDEPS_CREATE(vbo_edge_idx, batch.edit_selection_edges);
MDEPS_CREATE(vbo_poly_idx, batch.edit_selection_faces);
MDEPS_CREATE(vbo_fdot_idx, batch.edit_selection_fdots);
MDEPS_CREATE(vbo_edituv_data,
batch.edituv_faces,
batch.edituv_faces_stretch_area,
batch.edituv_faces_stretch_angle,
batch.edituv_edges,
batch.edituv_verts);
MDEPS_CREATE(vbo_edituv_stretch_area, batch.edituv_faces_stretch_area);
MDEPS_CREATE(vbo_edituv_stretch_angle, batch.edituv_faces_stretch_angle);
MDEPS_CREATE(vbo_fdots_uv, batch.edituv_fdots);
MDEPS_CREATE(vbo_fdots_edituv_data, batch.edituv_fdots);
MDEPS_CREATE(ibo_tris,
batch.surface,
batch.sculpt_overlays,
batch.surface_weights,
batch.edit_mesh_analysis,
batch.edit_triangles,
batch.edit_lnor,
batch.edit_selection_faces);
MDEPS_CREATE(
ibo_lines, batch.all_edges, batch.wire_edges, batch.edit_edges, batch.edit_selection_edges);
MDEPS_CREATE(ibo_lines_loose, batch.loose_edges);
MDEPS_CREATE(ibo_lines_adjacency, batch.edge_detection);
MDEPS_CREATE(ibo_lines_paint_mask, batch.wire_loops);
MDEPS_CREATE(ibo_tris_per_mat, surface_per_mat);
MDEPS_CREATE(ibo_points, batch.edit_vnor, batch.edit_selection_verts, batch.edit_vertices);
MDEPS_CREATE(ibo_fdots, batch.edit_fdots, batch.edit_selection_fdots);
MDEPS_CREATE(ibo_edituv_tris,
batch.edituv_faces,
batch.edituv_faces_stretch_area,
batch.edituv_faces_stretch_angle);
MDEPS_CREATE(ibo_edituv_lines, batch.edituv_edges, batch.wire_loops_uvs);
MDEPS_CREATE(ibo_edituv_points, batch.edituv_verts);
MDEPS_CREATE(ibo_edituv_fdots, batch.edituv_fdots);
static void mesh_batch_cache_discard_surface_batches(MeshBatchCache *cache);
static void mesh_batch_cache_clear(Mesh *me);
static void mesh_batch_cache_discard_batch(MeshBatchCache *cache, const DRWBatchFlag batch_map)
{
for (int i = 0; i < MBC_BATCH_LEN; i++) {
DRWBatchFlag batch_requested = (1u << i);
if (batch_map & batch_requested) {
GPU_BATCH_DISCARD_SAFE(((GPUBatch **)&cache->batch)[i]);
cache->batch_ready &= ~batch_requested;
}
}
if (batch_map & (1u << MBC_BATCH_INDEX(surface_per_mat))) {
mesh_batch_cache_discard_surface_batches(cache);
}
}
/* Return true is all layers in _b_ are inside _a_. */
BLI_INLINE bool mesh_cd_layers_type_overlap(DRW_MeshCDMask a, DRW_MeshCDMask b)
{
@ -562,14 +659,8 @@ static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache *cache)
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.vcol);
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.orco);
}
/* Discard batches using vbo.uv. */
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_stretch_area);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_stretch_angle);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_edges);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_verts);
mesh_batch_cache_discard_surface_batches(cache);
DRWBatchFlag batch_map = MDEPS_CREATE_MAP(vbo_uv, vbo_tan, vbo_vcol, vbo_orco);
mesh_batch_cache_discard_batch(cache, batch_map);
mesh_cd_layers_type_clear(&cache->cd_used);
}
@ -587,13 +678,17 @@ static void mesh_batch_cache_discard_uvedit(MeshBatchCache *cache)
GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_points);
GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_fdots);
}
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_stretch_area);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_stretch_angle);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_edges);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_verts);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_fdots);
GPU_BATCH_DISCARD_SAFE(cache->batch.wire_loops_uvs);
DRWBatchFlag batch_map = MDEPS_CREATE_MAP(vbo_edituv_stretch_angle,
vbo_edituv_stretch_area,
vbo_uv,
vbo_edituv_data,
vbo_fdots_uv,
vbo_fdots_edituv_data,
ibo_edituv_tris,
ibo_edituv_lines,
ibo_edituv_points,
ibo_edituv_fdots);
mesh_batch_cache_discard_batch(cache, batch_map);
cache->tot_area = 0.0f;
cache->tot_uv_area = 0.0f;
@ -603,9 +698,6 @@ static void mesh_batch_cache_discard_uvedit(MeshBatchCache *cache)
/* We discarded the vbo.uv so we need to reset the cd_used flag. */
cache->cd_used.uv = 0;
cache->cd_used.edit_uv = 0;
/* Discard other batches that uses vbo.uv */
mesh_batch_cache_discard_surface_batches(cache);
}
static void mesh_batch_cache_discard_uvedit_select(MeshBatchCache *cache)
@ -618,14 +710,13 @@ static void mesh_batch_cache_discard_uvedit_select(MeshBatchCache *cache)
GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_points);
GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_fdots);
}
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_stretch_area);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_stretch_angle);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_edges);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_verts);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_fdots);
GPU_BATCH_DISCARD_SAFE(cache->batch.wire_loops_uvs);
cache->batch_ready &= ~MBC_EDITUV;
DRWBatchFlag batch_map = MDEPS_CREATE_MAP(vbo_edituv_data,
vbo_fdots_edituv_data,
ibo_edituv_tris,
ibo_edituv_lines,
ibo_edituv_points,
ibo_edituv_fdots);
mesh_batch_cache_discard_batch(cache, batch_map);
}
void DRW_mesh_batch_cache_dirty_tag(Mesh *me, eMeshBatchDirtyMode mode)
@ -634,25 +725,16 @@ void DRW_mesh_batch_cache_dirty_tag(Mesh *me, eMeshBatchDirtyMode mode)
if (cache == NULL) {
return;
}
DRWBatchFlag batch_map;
switch (mode) {
case BKE_MESH_BATCH_DIRTY_SELECT:
FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.edit_data);
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.fdots_nor);
}
GPU_BATCH_DISCARD_SAFE(cache->batch.edit_triangles);
GPU_BATCH_DISCARD_SAFE(cache->batch.edit_vertices);
GPU_BATCH_DISCARD_SAFE(cache->batch.edit_edges);
GPU_BATCH_DISCARD_SAFE(cache->batch.edit_fdots);
GPU_BATCH_DISCARD_SAFE(cache->batch.edit_selection_verts);
GPU_BATCH_DISCARD_SAFE(cache->batch.edit_selection_edges);
GPU_BATCH_DISCARD_SAFE(cache->batch.edit_selection_faces);
GPU_BATCH_DISCARD_SAFE(cache->batch.edit_selection_fdots);
GPU_BATCH_DISCARD_SAFE(cache->batch.edit_mesh_analysis);
cache->batch_ready &= ~(MBC_EDIT_TRIANGLES | MBC_EDIT_VERTICES | MBC_EDIT_EDGES |
MBC_EDIT_FACEDOTS | MBC_EDIT_SELECTION_FACEDOTS |
MBC_EDIT_SELECTION_FACES | MBC_EDIT_SELECTION_EDGES |
MBC_EDIT_SELECTION_VERTS | MBC_EDIT_MESH_ANALYSIS);
batch_map = MDEPS_CREATE_MAP(vbo_edit_data, vbo_fdots_nor);
mesh_batch_cache_discard_batch(cache, batch_map);
/* Because visible UVs depends on edit mode selection, discard topology. */
mesh_batch_cache_discard_uvedit_select(cache);
break;
@ -664,20 +746,8 @@ void DRW_mesh_batch_cache_dirty_tag(Mesh *me, eMeshBatchDirtyMode mode)
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.pos_nor);
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.lnor);
}
GPU_BATCH_DISCARD_SAFE(cache->batch.surface);
/* Discard batches using vbo.pos_nor. */
GPU_BATCH_DISCARD_SAFE(cache->batch.wire_loops);
GPU_BATCH_DISCARD_SAFE(cache->batch.wire_edges);
GPU_BATCH_DISCARD_SAFE(cache->batch.all_verts);
GPU_BATCH_DISCARD_SAFE(cache->batch.all_edges);
GPU_BATCH_DISCARD_SAFE(cache->batch.loose_edges);
GPU_BATCH_DISCARD_SAFE(cache->batch.edge_detection);
GPU_BATCH_DISCARD_SAFE(cache->batch.surface_weights);
GPU_BATCH_DISCARD_SAFE(cache->batch.edit_mesh_analysis);
/* Discard batches using vbo.lnor. */
GPU_BATCH_DISCARD_SAFE(cache->batch.edit_lnor);
mesh_batch_cache_discard_surface_batches(cache);
cache->batch_ready &= ~(MBC_SURFACE | MBC_WIRE_EDGES | MBC_WIRE_LOOPS);
batch_map = MDEPS_CREATE_MAP(ibo_lines_paint_mask, vbo_pos_nor, vbo_lnor);
mesh_batch_cache_discard_batch(cache, batch_map);
break;
case BKE_MESH_BATCH_DIRTY_ALL:
cache->is_dirty = true;
@ -694,13 +764,8 @@ void DRW_mesh_batch_cache_dirty_tag(Mesh *me, eMeshBatchDirtyMode mode)
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.edituv_data);
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.fdots_edituv_data);
}
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_stretch_area);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_stretch_angle);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_edges);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_verts);
GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_fdots);
cache->batch_ready &= ~MBC_EDITUV;
batch_map = MDEPS_CREATE_MAP(vbo_edituv_data, vbo_fdots_edituv_data);
mesh_batch_cache_discard_batch(cache, batch_map);
break;
default:
BLI_assert(0);
@ -721,10 +786,12 @@ static void mesh_buffer_cache_clear(MeshBufferCache *mbufcache)
static void mesh_buffer_extraction_cache_clear(MeshBufferExtractionCache *extraction_cache)
{
MEM_SAFE_FREE(extraction_cache->lverts);
MEM_SAFE_FREE(extraction_cache->ledges);
extraction_cache->edge_loose_len = 0;
extraction_cache->vert_loose_len = 0;
MEM_SAFE_FREE(extraction_cache->loose_geom.verts);
MEM_SAFE_FREE(extraction_cache->loose_geom.edges);
extraction_cache->loose_geom.edge_len = 0;
extraction_cache->loose_geom.vert_len = 0;
MEM_SAFE_FREE(extraction_cache->mat_offsets.tri);
}
static void mesh_batch_cache_clear(Mesh *me)
@ -1370,6 +1437,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
MeshBufferCache *mbufcache = &cache->final;
/* Initialize batches and request VBO's & IBO's. */
MDEPS_ASSERT(batch.surface, ibo_tris, vbo_lnor, vbo_pos_nor, vbo_uv, vbo_vcol);
if (DRW_batch_requested(cache->batch.surface, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache->batch.surface, &mbufcache->ibo.tris);
/* Order matters. First ones override latest VBO's attributes. */
@ -1382,43 +1450,52 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
DRW_vbo_request(cache->batch.surface, &mbufcache->vbo.vcol);
}
}
MDEPS_ASSERT(batch.all_verts, vbo_pos_nor);
if (DRW_batch_requested(cache->batch.all_verts, GPU_PRIM_POINTS)) {
DRW_vbo_request(cache->batch.all_verts, &mbufcache->vbo.pos_nor);
}
MDEPS_ASSERT(batch.sculpt_overlays, ibo_tris, vbo_pos_nor, vbo_sculpt_data);
if (DRW_batch_requested(cache->batch.sculpt_overlays, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache->batch.sculpt_overlays, &mbufcache->ibo.tris);
DRW_vbo_request(cache->batch.sculpt_overlays, &mbufcache->vbo.pos_nor);
DRW_vbo_request(cache->batch.sculpt_overlays, &mbufcache->vbo.sculpt_data);
}
MDEPS_ASSERT(batch.all_edges, ibo_lines, vbo_pos_nor);
if (DRW_batch_requested(cache->batch.all_edges, GPU_PRIM_LINES)) {
DRW_ibo_request(cache->batch.all_edges, &mbufcache->ibo.lines);
DRW_vbo_request(cache->batch.all_edges, &mbufcache->vbo.pos_nor);
}
MDEPS_ASSERT(batch.loose_edges, ibo_lines_loose, vbo_pos_nor);
if (DRW_batch_requested(cache->batch.loose_edges, GPU_PRIM_LINES)) {
DRW_ibo_request(NULL, &mbufcache->ibo.lines);
DRW_ibo_request(cache->batch.loose_edges, &mbufcache->ibo.lines_loose);
DRW_vbo_request(cache->batch.loose_edges, &mbufcache->vbo.pos_nor);
}
MDEPS_ASSERT(batch.edge_detection, ibo_lines_adjacency, vbo_pos_nor);
if (DRW_batch_requested(cache->batch.edge_detection, GPU_PRIM_LINES_ADJ)) {
DRW_ibo_request(cache->batch.edge_detection, &mbufcache->ibo.lines_adjacency);
DRW_vbo_request(cache->batch.edge_detection, &mbufcache->vbo.pos_nor);
}
MDEPS_ASSERT(batch.surface_weights, ibo_tris, vbo_pos_nor, vbo_weights);
if (DRW_batch_requested(cache->batch.surface_weights, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache->batch.surface_weights, &mbufcache->ibo.tris);
DRW_vbo_request(cache->batch.surface_weights, &mbufcache->vbo.pos_nor);
DRW_vbo_request(cache->batch.surface_weights, &mbufcache->vbo.weights);
}
MDEPS_ASSERT(batch.wire_loops, ibo_lines_paint_mask, vbo_lnor, vbo_pos_nor);
if (DRW_batch_requested(cache->batch.wire_loops, GPU_PRIM_LINES)) {
DRW_ibo_request(cache->batch.wire_loops, &mbufcache->ibo.lines_paint_mask);
/* Order matters. First ones override latest VBO's attributes. */
DRW_vbo_request(cache->batch.wire_loops, &mbufcache->vbo.lnor);
DRW_vbo_request(cache->batch.wire_loops, &mbufcache->vbo.pos_nor);
}
MDEPS_ASSERT(batch.wire_edges, ibo_lines, vbo_pos_nor, vbo_edge_fac);
if (DRW_batch_requested(cache->batch.wire_edges, GPU_PRIM_LINES)) {
DRW_ibo_request(cache->batch.wire_edges, &mbufcache->ibo.lines);
DRW_vbo_request(cache->batch.wire_edges, &mbufcache->vbo.pos_nor);
DRW_vbo_request(cache->batch.wire_edges, &mbufcache->vbo.edge_fac);
}
MDEPS_ASSERT(batch.wire_loops_uvs, ibo_edituv_lines, vbo_uv);
if (DRW_batch_requested(cache->batch.wire_loops_uvs, GPU_PRIM_LINES)) {
DRW_ibo_request(cache->batch.wire_loops_uvs, &mbufcache->ibo.edituv_lines);
/* For paint overlay. Active layer should have been queried. */
@ -1426,6 +1503,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
DRW_vbo_request(cache->batch.wire_loops_uvs, &mbufcache->vbo.uv);
}
}
MDEPS_ASSERT(batch.edit_mesh_analysis, ibo_tris, vbo_pos_nor, vbo_mesh_analysis);
if (DRW_batch_requested(cache->batch.edit_mesh_analysis, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache->batch.edit_mesh_analysis, &mbufcache->ibo.tris);
DRW_vbo_request(cache->batch.edit_mesh_analysis, &mbufcache->vbo.pos_nor);
@ -1433,6 +1511,14 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
}
/* Per Material */
MDEPS_ASSERT(surface_per_mat,
ibo_tris_per_mat,
vbo_lnor,
vbo_pos_nor,
vbo_uv,
vbo_tan,
vbo_vcol,
vbo_orco);
for (int i = 0; i < cache->mat_len; i++) {
if (DRW_batch_requested(cache->surface_per_mat[i], GPU_PRIM_TRIS)) {
DRW_ibo_request(cache->surface_per_mat[i], &mbufcache->tris_per_mat[i]);
@ -1457,55 +1543,66 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
mbufcache = (do_cage) ? &cache->cage : &cache->final;
/* Edit Mesh */
MDEPS_ASSERT(batch.edit_triangles, ibo_tris, vbo_pos_nor, vbo_edit_data);
if (DRW_batch_requested(cache->batch.edit_triangles, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache->batch.edit_triangles, &mbufcache->ibo.tris);
DRW_vbo_request(cache->batch.edit_triangles, &mbufcache->vbo.pos_nor);
DRW_vbo_request(cache->batch.edit_triangles, &mbufcache->vbo.edit_data);
}
MDEPS_ASSERT(batch.edit_vertices, ibo_points, vbo_pos_nor, vbo_edit_data);
if (DRW_batch_requested(cache->batch.edit_vertices, GPU_PRIM_POINTS)) {
DRW_ibo_request(cache->batch.edit_vertices, &mbufcache->ibo.points);
DRW_vbo_request(cache->batch.edit_vertices, &mbufcache->vbo.pos_nor);
DRW_vbo_request(cache->batch.edit_vertices, &mbufcache->vbo.edit_data);
}
MDEPS_ASSERT(batch.edit_edges, ibo_lines, vbo_pos_nor, vbo_edit_data);
if (DRW_batch_requested(cache->batch.edit_edges, GPU_PRIM_LINES)) {
DRW_ibo_request(cache->batch.edit_edges, &mbufcache->ibo.lines);
DRW_vbo_request(cache->batch.edit_edges, &mbufcache->vbo.pos_nor);
DRW_vbo_request(cache->batch.edit_edges, &mbufcache->vbo.edit_data);
}
MDEPS_ASSERT(batch.edit_vnor, ibo_points, vbo_pos_nor);
if (DRW_batch_requested(cache->batch.edit_vnor, GPU_PRIM_POINTS)) {
DRW_ibo_request(cache->batch.edit_vnor, &mbufcache->ibo.points);
DRW_vbo_request(cache->batch.edit_vnor, &mbufcache->vbo.pos_nor);
}
MDEPS_ASSERT(batch.edit_lnor, ibo_tris, vbo_pos_nor, vbo_lnor);
if (DRW_batch_requested(cache->batch.edit_lnor, GPU_PRIM_POINTS)) {
DRW_ibo_request(cache->batch.edit_lnor, &mbufcache->ibo.tris);
DRW_vbo_request(cache->batch.edit_lnor, &mbufcache->vbo.pos_nor);
DRW_vbo_request(cache->batch.edit_lnor, &mbufcache->vbo.lnor);
}
MDEPS_ASSERT(batch.edit_fdots, ibo_fdots, vbo_fdots_pos, vbo_fdots_nor);
if (DRW_batch_requested(cache->batch.edit_fdots, GPU_PRIM_POINTS)) {
DRW_ibo_request(cache->batch.edit_fdots, &mbufcache->ibo.fdots);
DRW_vbo_request(cache->batch.edit_fdots, &mbufcache->vbo.fdots_pos);
DRW_vbo_request(cache->batch.edit_fdots, &mbufcache->vbo.fdots_nor);
}
MDEPS_ASSERT(batch.edit_skin_roots, vbo_skin_roots);
if (DRW_batch_requested(cache->batch.edit_skin_roots, GPU_PRIM_POINTS)) {
DRW_vbo_request(cache->batch.edit_skin_roots, &mbufcache->vbo.skin_roots);
}
/* Selection */
MDEPS_ASSERT(batch.edit_selection_verts, ibo_points, vbo_pos_nor, vbo_vert_idx);
if (DRW_batch_requested(cache->batch.edit_selection_verts, GPU_PRIM_POINTS)) {
DRW_ibo_request(cache->batch.edit_selection_verts, &mbufcache->ibo.points);
DRW_vbo_request(cache->batch.edit_selection_verts, &mbufcache->vbo.pos_nor);
DRW_vbo_request(cache->batch.edit_selection_verts, &mbufcache->vbo.vert_idx);
}
MDEPS_ASSERT(batch.edit_selection_edges, ibo_lines, vbo_pos_nor, vbo_edge_idx);
if (DRW_batch_requested(cache->batch.edit_selection_edges, GPU_PRIM_LINES)) {
DRW_ibo_request(cache->batch.edit_selection_edges, &mbufcache->ibo.lines);
DRW_vbo_request(cache->batch.edit_selection_edges, &mbufcache->vbo.pos_nor);
DRW_vbo_request(cache->batch.edit_selection_edges, &mbufcache->vbo.edge_idx);
}
MDEPS_ASSERT(batch.edit_selection_faces, ibo_tris, vbo_pos_nor, vbo_poly_idx);
if (DRW_batch_requested(cache->batch.edit_selection_faces, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache->batch.edit_selection_faces, &mbufcache->ibo.tris);
DRW_vbo_request(cache->batch.edit_selection_faces, &mbufcache->vbo.pos_nor);
DRW_vbo_request(cache->batch.edit_selection_faces, &mbufcache->vbo.poly_idx);
}
MDEPS_ASSERT(batch.edit_selection_fdots, ibo_fdots, vbo_fdots_pos, vbo_fdot_idx);
if (DRW_batch_requested(cache->batch.edit_selection_fdots, GPU_PRIM_POINTS)) {
DRW_ibo_request(cache->batch.edit_selection_fdots, &mbufcache->ibo.fdots);
DRW_vbo_request(cache->batch.edit_selection_fdots, &mbufcache->vbo.fdots_pos);
@ -1520,39 +1617,90 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
mbufcache = (do_uvcage) ? &cache->uv_cage : &cache->final;
/* Edit UV */
MDEPS_ASSERT(batch.edituv_faces, ibo_edituv_tris, vbo_uv, vbo_edituv_data);
if (DRW_batch_requested(cache->batch.edituv_faces, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache->batch.edituv_faces, &mbufcache->ibo.edituv_tris);
DRW_vbo_request(cache->batch.edituv_faces, &mbufcache->vbo.uv);
DRW_vbo_request(cache->batch.edituv_faces, &mbufcache->vbo.edituv_data);
}
MDEPS_ASSERT(batch.edituv_faces_stretch_area,
ibo_edituv_tris,
vbo_uv,
vbo_edituv_data,
vbo_edituv_stretch_area);
if (DRW_batch_requested(cache->batch.edituv_faces_stretch_area, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache->batch.edituv_faces_stretch_area, &mbufcache->ibo.edituv_tris);
DRW_vbo_request(cache->batch.edituv_faces_stretch_area, &mbufcache->vbo.uv);
DRW_vbo_request(cache->batch.edituv_faces_stretch_area, &mbufcache->vbo.edituv_data);
DRW_vbo_request(cache->batch.edituv_faces_stretch_area, &mbufcache->vbo.edituv_stretch_area);
}
MDEPS_ASSERT(batch.edituv_faces_stretch_angle,
ibo_edituv_tris,
vbo_uv,
vbo_edituv_data,
vbo_edituv_stretch_angle);
if (DRW_batch_requested(cache->batch.edituv_faces_stretch_angle, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache->batch.edituv_faces_stretch_angle, &mbufcache->ibo.edituv_tris);
DRW_vbo_request(cache->batch.edituv_faces_stretch_angle, &mbufcache->vbo.uv);
DRW_vbo_request(cache->batch.edituv_faces_stretch_angle, &mbufcache->vbo.edituv_data);
DRW_vbo_request(cache->batch.edituv_faces_stretch_angle, &mbufcache->vbo.edituv_stretch_angle);
}
MDEPS_ASSERT(batch.edituv_edges, ibo_edituv_lines, vbo_uv, vbo_edituv_data);
if (DRW_batch_requested(cache->batch.edituv_edges, GPU_PRIM_LINES)) {
DRW_ibo_request(cache->batch.edituv_edges, &mbufcache->ibo.edituv_lines);
DRW_vbo_request(cache->batch.edituv_edges, &mbufcache->vbo.uv);
DRW_vbo_request(cache->batch.edituv_edges, &mbufcache->vbo.edituv_data);
}
MDEPS_ASSERT(batch.edituv_verts, ibo_edituv_points, vbo_uv, vbo_edituv_data);
if (DRW_batch_requested(cache->batch.edituv_verts, GPU_PRIM_POINTS)) {
DRW_ibo_request(cache->batch.edituv_verts, &mbufcache->ibo.edituv_points);
DRW_vbo_request(cache->batch.edituv_verts, &mbufcache->vbo.uv);
DRW_vbo_request(cache->batch.edituv_verts, &mbufcache->vbo.edituv_data);
}
MDEPS_ASSERT(batch.edituv_fdots, ibo_edituv_fdots, vbo_fdots_uv, vbo_fdots_edituv_data);
if (DRW_batch_requested(cache->batch.edituv_fdots, GPU_PRIM_POINTS)) {
DRW_ibo_request(cache->batch.edituv_fdots, &mbufcache->ibo.edituv_fdots);
DRW_vbo_request(cache->batch.edituv_fdots, &mbufcache->vbo.fdots_uv);
DRW_vbo_request(cache->batch.edituv_fdots, &mbufcache->vbo.fdots_edituv_data);
}
MDEPS_ASSERT_MAP(vbo_lnor);
MDEPS_ASSERT_MAP(vbo_pos_nor);
MDEPS_ASSERT_MAP(vbo_uv);
MDEPS_ASSERT_MAP(vbo_vcol);
MDEPS_ASSERT_MAP(vbo_sculpt_data);
MDEPS_ASSERT_MAP(vbo_weights);
MDEPS_ASSERT_MAP(vbo_edge_fac);
MDEPS_ASSERT_MAP(vbo_mesh_analysis);
MDEPS_ASSERT_MAP(vbo_tan);
MDEPS_ASSERT_MAP(vbo_orco);
MDEPS_ASSERT_MAP(vbo_edit_data);
MDEPS_ASSERT_MAP(vbo_fdots_pos);
MDEPS_ASSERT_MAP(vbo_fdots_nor);
MDEPS_ASSERT_MAP(vbo_skin_roots);
MDEPS_ASSERT_MAP(vbo_vert_idx);
MDEPS_ASSERT_MAP(vbo_edge_idx);
MDEPS_ASSERT_MAP(vbo_poly_idx);
MDEPS_ASSERT_MAP(vbo_fdot_idx);
MDEPS_ASSERT_MAP(vbo_edituv_data);
MDEPS_ASSERT_MAP(vbo_edituv_stretch_area);
MDEPS_ASSERT_MAP(vbo_edituv_stretch_angle);
MDEPS_ASSERT_MAP(vbo_fdots_uv);
MDEPS_ASSERT_MAP(vbo_fdots_edituv_data);
MDEPS_ASSERT_MAP(ibo_tris);
MDEPS_ASSERT_MAP(ibo_lines);
MDEPS_ASSERT_MAP(ibo_lines_loose);
MDEPS_ASSERT_MAP(ibo_lines_adjacency);
MDEPS_ASSERT_MAP(ibo_lines_paint_mask);
MDEPS_ASSERT_MAP(ibo_tris_per_mat);
MDEPS_ASSERT_MAP(ibo_points);
MDEPS_ASSERT_MAP(ibo_fdots);
MDEPS_ASSERT_MAP(ibo_edituv_tris);
MDEPS_ASSERT_MAP(ibo_edituv_lines);
MDEPS_ASSERT_MAP(ibo_edituv_points);
MDEPS_ASSERT_MAP(ibo_edituv_fdots);
/* Meh loose Scene const correctness here. */
const bool use_subsurf_fdots = scene ? BKE_modifiers_uses_subsurf_facedots(scene, ob) : false;

View File

@ -33,7 +33,7 @@ namespace blender::draw {
struct MeshExtract_Tri_Data {
GPUIndexBufBuilder elb;
int *tri_mat_start;
const int *tri_mat_start;
int *tri_mat_end;
};
@ -43,45 +43,9 @@ static void extract_tris_init(const MeshRenderData *mr,
void *tls_data)
{
MeshExtract_Tri_Data *data = static_cast<MeshExtract_Tri_Data *>(tls_data);
size_t mat_tri_idx_size = sizeof(int) * mr->mat_len;
data->tri_mat_start = static_cast<int *>(MEM_callocN(mat_tri_idx_size, __func__));
data->tri_mat_end = static_cast<int *>(MEM_callocN(mat_tri_idx_size, __func__));
int *mat_tri_len = data->tri_mat_start;
/* Count how many triangle for each material. */
if (mr->extract_type == MR_EXTRACT_BMESH) {
BMIter iter;
BMFace *efa;
BM_ITER_MESH (efa, &iter, mr->bm, BM_FACES_OF_MESH) {
if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
int mat = min_ii(efa->mat_nr, mr->mat_len - 1);
mat_tri_len[mat] += efa->len - 2;
}
}
}
else {
const MPoly *mp = mr->mpoly;
for (int mp_index = 0; mp_index < mr->poly_len; mp_index++, mp++) {
if (!(mr->use_hide && (mp->flag & ME_HIDE))) {
int mat = min_ii(mp->mat_nr, mr->mat_len - 1);
mat_tri_len[mat] += mp->totloop - 2;
}
}
}
/* Accumulate triangle lengths per material to have correct offsets. */
int ofs = mat_tri_len[0];
mat_tri_len[0] = 0;
for (int i = 1; i < mr->mat_len; i++) {
int tmp = mat_tri_len[i];
mat_tri_len[i] = ofs;
ofs += tmp;
}
memcpy(data->tri_mat_end, mat_tri_len, mat_tri_idx_size);
int visible_tri_tot = ofs;
GPU_indexbuf_init(&data->elb, GPU_PRIM_TRIS, visible_tri_tot, mr->loop_len);
data->tri_mat_start = mr->mat_offsets.tri;
data->tri_mat_end = static_cast<int *>(MEM_dupallocN(data->tri_mat_start));
GPU_indexbuf_init(&data->elb, GPU_PRIM_TRIS, mr->mat_offsets.visible_tri_len, mr->loop_len);
}
static void extract_tris_iter_looptri_bm(const MeshRenderData *mr,
@ -146,7 +110,6 @@ static void extract_tris_finish(const MeshRenderData *mr,
GPU_indexbuf_create_subrange_in_place(mbc_final->tris_per_mat[i], ibo, start, len);
}
}
MEM_freeN(data->tri_mat_start);
MEM_freeN(data->tri_mat_end);
}
@ -157,7 +120,7 @@ constexpr MeshExtract create_extractor_tris()
extractor.iter_looptri_bm = extract_tris_iter_looptri_bm;
extractor.iter_looptri_mesh = extract_tris_iter_looptri_mesh;
extractor.finish = extract_tris_finish;
extractor.data_type = MR_DATA_NONE;
extractor.data_type = MR_DATA_MAT_OFFSETS;
extractor.data_size = sizeof(MeshExtract_Tri_Data);
extractor.use_threading = false;
extractor.mesh_buffer_offset = offsetof(MeshBufferCache, ibo.tris);

View File

@ -27,9 +27,10 @@ extern "C" {
#endif
struct Main;
struct wmWindowManager;
/* info_stats.c */
void ED_info_stats_clear(struct ViewLayer *view_layer);
void ED_info_stats_clear(struct wmWindowManager *wm, struct ViewLayer *view_layer);
const char *ED_info_statusbar_string(struct Main *bmain,
struct Scene *scene,
struct ViewLayer *view_layer);
@ -41,6 +42,7 @@ const char *ED_info_statistics_string(struct Main *bmain,
void ED_info_draw_stats(struct Main *bmain,
struct Scene *scene,
struct ViewLayer *view_layer,
struct View3D *v3d_local,
int x,
int *y,
int height);

View File

@ -49,6 +49,11 @@ typedef enum {
} NodeBorder;
#define NODE_GRID_STEPS 5
#define NODE_EDGE_PAN_INSIDE_PAD 2
#define NODE_EDGE_PAN_OUTSIDE_PAD 0 /* Disable clamping for node panning, use whole screen. */
#define NODE_EDGE_PAN_SPEED_RAMP 1
#define NODE_EDGE_PAN_MAX_SPEED 40 /* In UI units per second, slower than default. */
#define NODE_EDGE_PAN_DELAY 1.0f
/* space_node.c */

View File

@ -215,7 +215,7 @@ void ED_screen_restore_temp_type(struct bContext *C, ScrArea *area);
ScrArea *ED_screen_full_newspace(struct bContext *C, ScrArea *area, int type);
void ED_screen_full_prevspace(struct bContext *C, ScrArea *area);
void ED_screen_full_restore(struct bContext *C, ScrArea *area);
ScrArea *ED_screen_state_maximized_create(struct bContext *C);
bScreen *ED_screen_state_maximized_create(struct bContext *C);
struct ScrArea *ED_screen_state_toggle(struct bContext *C,
struct wmWindow *win,
struct ScrArea *area,
@ -354,6 +354,7 @@ bool ED_operator_uvedit(struct bContext *C);
bool ED_operator_uvedit_space_image(struct bContext *C);
bool ED_operator_uvmap(struct bContext *C);
bool ED_operator_posemode_exclusive(struct bContext *C);
bool ED_operator_object_active_local_editable_posemode_exclusive(struct bContext *C);
bool ED_operator_posemode_context(struct bContext *C);
bool ED_operator_posemode(struct bContext *C);
bool ED_operator_posemode_local(struct bContext *C);

View File

@ -42,7 +42,8 @@ bool ED_space_sequencer_maskedit_poll(struct bContext *C);
bool ED_space_sequencer_check_show_imbuf(struct SpaceSeq *sseq);
bool ED_space_sequencer_check_show_strip(struct SpaceSeq *sseq);
bool ED_space_sequencer_has_visible_animation_on_strip(const struct Scene *scene);
bool ED_space_sequencer_has_playback_animation(const struct SpaceSeq *sseq,
const struct Scene *scene);
void ED_operatormacros_sequencer(void);

View File

@ -290,18 +290,14 @@ enum {
/** Active right part of number button */
UI_BUT_ACTIVE_RIGHT = 1 << 22,
/* (also used by search buttons to enforce shortcut display for their items). */
/** Button has shortcut text. */
UI_BUT_HAS_SHORTCUT = 1 << 23,
/** Reverse order of consecutive off/on icons */
UI_BUT_ICON_REVERSE = 1 << 24,
UI_BUT_ICON_REVERSE = 1 << 23,
/** Value is animated, but the current value differs from the animated one. */
UI_BUT_ANIMATED_CHANGED = 1 << 25,
UI_BUT_ANIMATED_CHANGED = 1 << 24,
/* Draw the checkbox buttons inverted. */
UI_BUT_CHECKBOX_INVERT = 1 << 26,
UI_BUT_CHECKBOX_INVERT = 1 << 25,
};
/* scale fixed button widths by this to account for DPI */

View File

@ -105,8 +105,12 @@ struct ScrArea;
struct bContext;
struct bScreen;
struct rctf;
struct rcti;
struct wmEvent;
struct wmGizmoGroupType;
struct wmKeyConfig;
struct wmOperator;
struct wmOperatorType;
typedef struct View2DScrollers View2DScrollers;
@ -287,6 +291,77 @@ void UI_view2d_smooth_view(struct bContext *C,
/* Caller passes in own idname. */
void VIEW2D_GGT_navigate_impl(struct wmGizmoGroupType *gzgt, const char *idname);
/* Edge pan */
/**
* Custom-data for view panning operators.
*/
typedef struct View2DEdgePanData {
/** Screen where view pan was initiated. */
struct bScreen *screen;
/** Area where view pan was initiated. */
struct ScrArea *area;
/** Region where view pan was initiated. */
struct ARegion *region;
/** View2d we're operating in. */
struct View2D *v2d;
/** Inside distance in UI units from the edge of the region within which to start panning. */
float inside_pad;
/** Outside distance in UI units from the edge of the region at which to stop panning. */
float outside_pad;
/**
* Width of the zone in UI units where speed increases with distance from the edge.
* At the end of this zone max speed is reached.
*/
float speed_ramp;
/** Maximum speed in UI units per second. */
float max_speed;
/** Delay in seconds before maximum speed is reached. */
float delay;
/** Amount to move view relative to zoom. */
float facx, facy;
/* Timers. */
double edge_pan_last_time;
double edge_pan_start_time_x, edge_pan_start_time_y;
} View2DEdgePanData;
bool UI_view2d_edge_pan_poll(struct bContext *C);
void UI_view2d_edge_pan_init(struct bContext *C,
struct View2DEdgePanData *vpd,
float inside_pad,
float outside_pad,
float speed_ramp,
float max_speed,
float delay);
void UI_view2d_edge_pan_reset(struct View2DEdgePanData *vpd);
/* Apply transform to view (i.e. adjust 'cur' rect). */
void UI_view2d_edge_pan_apply(struct bContext *C, struct View2DEdgePanData *vpd, int x, int y);
/* Apply transform to view using mouse events. */
void UI_view2d_edge_pan_apply_event(struct bContext *C,
struct View2DEdgePanData *vpd,
const struct wmEvent *event);
void UI_view2d_edge_pan_operator_properties(struct wmOperatorType *ot);
void UI_view2d_edge_pan_operator_properties_ex(struct wmOperatorType *ot,
float inside_pad,
float outside_pad,
float speed_ramp,
float max_speed,
float delay);
/* Initialize panning data with operator settings. */
void UI_view2d_edge_pan_operator_init(struct bContext *C,
struct View2DEdgePanData *vpd,
struct wmOperator *op);
#ifdef __cplusplus
}
#endif

View File

@ -75,6 +75,7 @@ set(SRC
resources.c
view2d.c
view2d_draw.c
view2d_edge_pan.c
view2d_gizmo_navigate.c
view2d_ops.c

View File

@ -1160,7 +1160,6 @@ void ui_but_add_shortcut(uiBut *but, const char *shortcut_str, const bool do_str
MEM_freeN(butstr_orig);
but->str = but->strdata;
but->flag |= UI_BUT_HAS_SEP_CHAR;
but->drawflag |= UI_BUT_HAS_SHORTCUT;
ui_but_update(but);
}

View File

@ -1280,7 +1280,6 @@ void ui_popup_context_menu_for_panel(bContext *C, ARegion *region, Panel *panel)
uiBlock *block = uiLayoutGetBlock(layout);
uiBut *but = block->buttons.last;
but->flag |= UI_BUT_HAS_SEP_CHAR;
but->drawflag |= UI_BUT_HAS_SHORTCUT;
}
}
UI_popup_menu_end(C, pup);

View File

@ -95,7 +95,7 @@ typedef struct uiSearchboxData {
/** draw thumbnail previews, rather than list */
bool preview;
/** Use the #UI_SEP_CHAR char for splitting shortcuts (good for operators, bad for data). */
bool use_sep;
bool use_shortcut_sep;
int prv_rows, prv_cols;
/**
* Show the active icon and text after the last instance of this string.
@ -314,7 +314,7 @@ bool ui_searchbox_apply(uiBut *but, ARegion *region)
data->items.name_prefix_offsets[data->active] :
0);
const char *name_sep = data->use_sep ? strrchr(name, UI_SEP_CHAR) : NULL;
const char *name_sep = data->use_shortcut_sep ? strrchr(name, UI_SEP_CHAR) : NULL;
BLI_strncpy(but->editstr, name, name_sep ? (name_sep - name) + 1 : data->items.maxstrlen);
@ -535,7 +535,7 @@ void ui_searchbox_update(bContext *C, ARegion *region, uiBut *but, const bool re
/* Never include the prefix in the button. */
(data->items.name_prefix_offsets ? data->items.name_prefix_offsets[a] :
0);
const char *name_sep = data->use_sep ? strrchr(name, UI_SEP_CHAR) : NULL;
const char *name_sep = data->use_shortcut_sep ? strrchr(name, UI_SEP_CHAR) : NULL;
if (STREQLEN(but->editstr, name, name_sep ? (name_sep - name) : data->items.maxstrlen)) {
data->active = a;
break;
@ -627,7 +627,7 @@ static void ui_searchbox_region_draw_cb(const bContext *C, ARegion *region)
char *name_sep_test = NULL;
uiMenuItemSeparatorType separator_type = UI_MENU_ITEM_SEPARATOR_NONE;
if (data->use_sep) {
if (data->use_shortcut_sep) {
separator_type = UI_MENU_ITEM_SEPARATOR_SHORTCUT;
}
/* Only set for displaying additional hint (e.g. library name of a linked data-block). */
@ -719,7 +719,10 @@ static void ui_searchbox_region_free_cb(ARegion *region)
region->regiondata = NULL;
}
ARegion *ui_searchbox_create_generic(bContext *C, ARegion *butregion, uiButSearch *search_but)
static ARegion *ui_searchbox_create_generic_ex(bContext *C,
ARegion *butregion,
uiButSearch *search_but,
const bool use_shortcut_sep)
{
wmWindow *win = CTX_wm_window(C);
const uiStyle *style = UI_style_get();
@ -759,12 +762,8 @@ ARegion *ui_searchbox_create_generic(bContext *C, ARegion *butregion, uiButSearc
data->prv_cols = but->a2;
}
/* Only show key shortcuts when needed (checking RNA prop pointer is useless here, a lot of
* buttons are about data without having that pointer defined, let's rather try with optype!).
* One can also enforce that behavior by setting
* UI_BUT_HAS_SHORTCUT drawflag of search button. */
if (but->optype != NULL || (but->drawflag & UI_BUT_HAS_SHORTCUT) != 0) {
data->use_sep = true;
if (but->optype != NULL || use_shortcut_sep) {
data->use_shortcut_sep = true;
}
data->sep_string = search_but->item_sep_string;
@ -888,6 +887,11 @@ ARegion *ui_searchbox_create_generic(bContext *C, ARegion *butregion, uiButSearc
return region;
}
ARegion *ui_searchbox_create_generic(bContext *C, ARegion *butregion, uiButSearch *search_but)
{
return ui_searchbox_create_generic_ex(C, butregion, search_but, false);
}
/**
* Similar to Python's `str.title` except...
*
@ -973,8 +977,8 @@ static void ui_searchbox_region_draw_cb__operator(const bContext *UNUSED(C), ARe
data->items.names[a],
0,
state,
data->use_sep ? UI_MENU_ITEM_SEPARATOR_SHORTCUT :
UI_MENU_ITEM_SEPARATOR_NONE,
data->use_shortcut_sep ? UI_MENU_ITEM_SEPARATOR_SHORTCUT :
UI_MENU_ITEM_SEPARATOR_NONE,
NULL);
}
}
@ -996,8 +1000,7 @@ static void ui_searchbox_region_draw_cb__operator(const bContext *UNUSED(C), ARe
ARegion *ui_searchbox_create_operator(bContext *C, ARegion *butregion, uiButSearch *search_but)
{
UI_but_drawflag_enable(&search_but->but, UI_BUT_HAS_SHORTCUT);
ARegion *region = ui_searchbox_create_generic(C, butregion, search_but);
ARegion *region = ui_searchbox_create_generic_ex(C, butregion, search_but, true);
region->type->draw = ui_searchbox_region_draw_cb__operator;
@ -1016,8 +1019,7 @@ static void ui_searchbox_region_draw_cb__menu(const bContext *UNUSED(C), ARegion
ARegion *ui_searchbox_create_menu(bContext *C, ARegion *butregion, uiButSearch *search_but)
{
UI_but_drawflag_enable(&search_but->but, UI_BUT_HAS_SHORTCUT);
ARegion *region = ui_searchbox_create_generic(C, butregion, search_but);
ARegion *region = ui_searchbox_create_generic_ex(C, butregion, search_but, true);
if (false) {
region->type->draw = ui_searchbox_region_draw_cb__menu;

View File

@ -2131,14 +2131,15 @@ static void widget_draw_text(const uiFontStyle *fstyle,
transopts = ui_translate_buttons();
#endif
bool use_drawstr_right_as_hint = false;
/* cut string in 2 parts - only for menu entries */
if ((but->drawflag & UI_BUT_HAS_SHORTCUT) && (but->editstr == NULL)) {
if (but->flag & UI_BUT_HAS_SEP_CHAR) {
drawstr_right = strrchr(drawstr, UI_SEP_CHAR);
if (drawstr_right) {
drawstr_left_len = (drawstr_right - drawstr);
drawstr_right++;
}
if (but->flag & UI_BUT_HAS_SEP_CHAR && (but->editstr == NULL)) {
drawstr_right = strrchr(drawstr, UI_SEP_CHAR);
if (drawstr_right) {
use_drawstr_right_as_hint = true;
drawstr_left_len = (drawstr_right - drawstr);
drawstr_right++;
}
}
@ -2243,7 +2244,7 @@ static void widget_draw_text(const uiFontStyle *fstyle,
if (drawstr_right) {
uchar col[4];
copy_v4_v4_uchar(col, wcol->text);
if (but->drawflag & UI_BUT_HAS_SHORTCUT) {
if (use_drawstr_right_as_hint) {
col[3] *= 0.5f;
}

View File

@ -0,0 +1,345 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2021 Blender Foundation
* All rights reserved.
*/
/** \file
* \ingroup spnode
*/
#include "BKE_context.h"
#include "BLI_math.h"
#include "BLI_rect.h"
#include "ED_screen.h"
#include "MEM_guardedalloc.h"
#include "PIL_time.h"
#include "RNA_access.h"
#include "RNA_define.h"
#include "UI_interface.h"
#include "UI_view2d.h"
#include "WM_api.h"
#include "WM_types.h"
/* -------------------------------------------------------------------- */
/** \name Edge Pan Operator Utilties
* \{ */
bool UI_view2d_edge_pan_poll(bContext *C)
{
ARegion *region = CTX_wm_region(C);
/* Check if there's a region in context to work with. */
if (region == NULL) {
return false;
}
View2D *v2d = &region->v2d;
/* Check that 2d-view can pan. */
if ((v2d->keepofs & V2D_LOCKOFS_X) && (v2d->keepofs & V2D_LOCKOFS_Y)) {
return false;
}
/* View can pan. */
return true;
}
void UI_view2d_edge_pan_init(bContext *C,
View2DEdgePanData *vpd,
float inside_pad,
float outside_pad,
float speed_ramp,
float max_speed,
float delay)
{
if (!UI_view2d_edge_pan_poll(C)) {
return;
}
/* Set pointers to owners. */
vpd->screen = CTX_wm_screen(C);
vpd->area = CTX_wm_area(C);
vpd->region = CTX_wm_region(C);
vpd->v2d = &vpd->region->v2d;
BLI_assert(speed_ramp > 0.0f);
vpd->inside_pad = inside_pad;
vpd->outside_pad = outside_pad;
vpd->speed_ramp = speed_ramp;
vpd->max_speed = max_speed;
vpd->delay = delay;
/* Calculate translation factor, based on size of view. */
const float winx = (float)(BLI_rcti_size_x(&vpd->region->winrct) + 1);
const float winy = (float)(BLI_rcti_size_y(&vpd->region->winrct) + 1);
vpd->facx = (BLI_rctf_size_x(&vpd->v2d->cur)) / winx;
vpd->facy = (BLI_rctf_size_y(&vpd->v2d->cur)) / winy;
UI_view2d_edge_pan_reset(vpd);
}
void UI_view2d_edge_pan_reset(View2DEdgePanData *vpd)
{
vpd->edge_pan_start_time_x = 0.0;
vpd->edge_pan_start_time_y = 0.0;
vpd->edge_pan_last_time = PIL_check_seconds_timer();
}
/**
* Reset the edge pan timers if the mouse isn't in the scroll zone and
* start the timers when the mouse enters a scroll zone.
*/
static void edge_pan_manage_delay_timers(View2DEdgePanData *vpd,
int pan_dir_x,
int pan_dir_y,
const double current_time)
{
if (pan_dir_x == 0) {
vpd->edge_pan_start_time_x = 0.0;
}
else if (vpd->edge_pan_start_time_x == 0.0) {
vpd->edge_pan_start_time_x = current_time;
}
if (pan_dir_y == 0) {
vpd->edge_pan_start_time_y = 0.0;
}
else if (vpd->edge_pan_start_time_y == 0.0) {
vpd->edge_pan_start_time_y = current_time;
}
}
/**
* Used to calculate a "fade in" factor for edge panning to make the interaction feel smooth
* and more purposeful.
*
* \note Assumes a domain_min of 0.0f.
*/
static float smootherstep(const float domain_max, float x)
{
x = clamp_f(x / domain_max, 0.0, 1.0);
return x * x * x * (x * (x * 6.0 - 15.0) + 10.0);
}
static float edge_pan_speed(View2DEdgePanData *vpd,
int event_loc,
bool x_dir,
const double current_time)
{
ARegion *region = vpd->region;
/* Find the distance from the start of the drag zone. */
const int pad = vpd->inside_pad * U.widget_unit;
const int min = (x_dir ? region->winrct.xmin : region->winrct.ymin) + pad;
const int max = (x_dir ? region->winrct.xmax : region->winrct.ymax) - pad;
int distance = 0.0;
if (event_loc > max) {
distance = event_loc - max;
}
else if (event_loc < min) {
distance = min - event_loc;
}
else {
BLI_assert(!"Calculating speed outside of pan zones");
return 0.0f;
}
float distance_factor = distance / (vpd->speed_ramp * U.widget_unit);
CLAMP(distance_factor, 0.0f, 1.0f);
/* Apply a fade in to the speed based on a start time delay. */
const double start_time = x_dir ? vpd->edge_pan_start_time_x : vpd->edge_pan_start_time_y;
const float delay_factor = smootherstep(vpd->delay, (float)(current_time - start_time));
return distance_factor * delay_factor * vpd->max_speed * U.widget_unit * (float)U.dpi_fac;
}
static void edge_pan_apply_delta(bContext *C, View2DEdgePanData *vpd, float dx, float dy)
{
View2D *v2d = vpd->v2d;
if (!v2d) {
return;
}
/* Calculate amount to move view by. */
dx *= vpd->facx;
dy *= vpd->facy;
/* Only move view on an axis if change is allowed. */
if ((v2d->keepofs & V2D_LOCKOFS_X) == 0) {
v2d->cur.xmin += dx;
v2d->cur.xmax += dx;
}
if ((v2d->keepofs & V2D_LOCKOFS_Y) == 0) {
v2d->cur.ymin += dy;
v2d->cur.ymax += dy;
}
/* Inform v2d about changes after this operation. */
UI_view2d_curRect_changed(C, v2d);
/* Don't rebuild full tree in outliner, since we're just changing our view. */
ED_region_tag_redraw_no_rebuild(vpd->region);
/* Request updates to be done. */
WM_event_add_mousemove(CTX_wm_window(C));
UI_view2d_sync(vpd->screen, vpd->area, v2d, V2D_LOCK_COPY);
}
void UI_view2d_edge_pan_apply(bContext *C, View2DEdgePanData *vpd, int x, int y)
{
ARegion *region = vpd->region;
rcti inside_rect, outside_rect;
inside_rect = region->winrct;
outside_rect = region->winrct;
BLI_rcti_pad(&inside_rect, -vpd->inside_pad * U.widget_unit, -vpd->inside_pad * U.widget_unit);
BLI_rcti_pad(&outside_rect, vpd->outside_pad * U.widget_unit, vpd->outside_pad * U.widget_unit);
int pan_dir_x = 0;
int pan_dir_y = 0;
if ((vpd->outside_pad == 0) || BLI_rcti_isect_pt(&outside_rect, x, y)) {
/* Find whether the mouse is beyond X and Y edges. */
if (x > inside_rect.xmax) {
pan_dir_x = 1;
}
else if (x < inside_rect.xmin) {
pan_dir_x = -1;
}
if (y > inside_rect.ymax) {
pan_dir_y = 1;
}
else if (y < inside_rect.ymin) {
pan_dir_y = -1;
}
}
const double current_time = PIL_check_seconds_timer();
edge_pan_manage_delay_timers(vpd, pan_dir_x, pan_dir_y, current_time);
/* Calculate the delta since the last time the operator was called. */
const float dtime = (float)(current_time - vpd->edge_pan_last_time);
float dx = 0.0f, dy = 0.0f;
if (pan_dir_x != 0) {
const float speed = edge_pan_speed(vpd, x, true, current_time);
dx = dtime * speed * (float)pan_dir_x;
}
if (pan_dir_y != 0) {
const float speed = edge_pan_speed(vpd, y, false, current_time);
dy = dtime * speed * (float)pan_dir_y;
}
vpd->edge_pan_last_time = current_time;
/* Pan, clamping inside the regions's total bounds. */
edge_pan_apply_delta(C, vpd, dx, dy);
}
void UI_view2d_edge_pan_apply_event(bContext *C, View2DEdgePanData *vpd, const wmEvent *event)
{
/* Only mousemove events matter here, ignore others. */
if (event->type != MOUSEMOVE) {
return;
}
UI_view2d_edge_pan_apply(C, vpd, event->x, event->y);
}
void UI_view2d_edge_pan_operator_properties(wmOperatorType *ot)
{
/* Default values for edge panning operators. */
UI_view2d_edge_pan_operator_properties_ex(ot,
/*inside_pad*/ 1.0f,
/*outside_pad*/ 0.0f,
/*speed_ramp*/ 1.0f,
/*max_speed*/ 500.0f,
/*delay*/ 1.0f);
}
void UI_view2d_edge_pan_operator_properties_ex(struct wmOperatorType *ot,
float inside_pad,
float outside_pad,
float speed_ramp,
float max_speed,
float delay)
{
RNA_def_float(
ot->srna,
"inside_padding",
inside_pad,
0.0f,
100.0f,
"Inside Padding",
"Inside distance in UI units from the edge of the region within which to start panning",
0.0f,
100.0f);
RNA_def_float(
ot->srna,
"outside_padding",
outside_pad,
0.0f,
100.0f,
"Outside Padding",
"Outside distance in UI units from the edge of the region at which to stop panning",
0.0f,
100.0f);
RNA_def_float(ot->srna,
"speed_ramp",
speed_ramp,
0.0f,
100.0f,
"Speed Ramp",
"Width of the zone in UI units where speed increases with distance from the edge",
0.0f,
100.0f);
RNA_def_float(ot->srna,
"max_speed",
max_speed,
0.0f,
10000.0f,
"Max Speed",
"Maximum speed in UI units per second",
0.0f,
10000.0f);
RNA_def_float(ot->srna,
"delay",
delay,
0.0f,
10.0f,
"Delay",
"Delay in seconds before maximum speed is reached",
0.0f,
10.0f);
}
void UI_view2d_edge_pan_operator_init(bContext *C, View2DEdgePanData *vpd, wmOperator *op)
{
UI_view2d_edge_pan_init(C,
vpd,
RNA_float_get(op->ptr, "inside_padding"),
RNA_float_get(op->ptr, "outside_padding"),
RNA_float_get(op->ptr, "speed_ramp"),
RNA_float_get(op->ptr, "max_speed"),
RNA_float_get(op->ptr, "delay"));
}
/** \} */

View File

@ -341,162 +341,37 @@ static void VIEW2D_OT_pan(wmOperatorType *ot)
* passes through.
* \{ */
/** Distance from the edge of the region within which to start panning. */
#define EDGE_PAN_REGION_PAD (U.widget_unit)
/** Speed factor in pixels per second per pixel of distance from edge pan zone beginning. */
#define EDGE_PAN_SPEED_PER_PIXEL (25.0f * (float)U.dpi_fac)
/** Delay before drag panning in seconds. */
#define EDGE_PAN_DELAY 1.0f
/* set up modal operator and relevant settings */
static int view_edge_pan_invoke(bContext *C, wmOperator *op, const wmEvent *UNUSED(event))
{
/* Set up customdata. */
view_pan_init(C, op);
v2dViewPanData *vpd = op->customdata;
vpd->edge_pan_start_time_x = 0.0;
vpd->edge_pan_start_time_y = 0.0;
vpd->edge_pan_last_time = PIL_check_seconds_timer();
op->customdata = MEM_callocN(sizeof(View2DEdgePanData), "View2DEdgePanData");
View2DEdgePanData *vpd = op->customdata;
UI_view2d_edge_pan_operator_init(C, vpd, op);
WM_event_add_modal_handler(C, op);
return (OPERATOR_RUNNING_MODAL | OPERATOR_PASS_THROUGH);
}
/**
* Reset the edge pan timers if the mouse isn't in the scroll zone and
* start the timers when the mouse enters a scroll zone.
*/
static void edge_pan_manage_delay_timers(v2dViewPanData *vpd,
int pan_dir_x,
int pan_dir_y,
const double current_time)
{
if (pan_dir_x == 0) {
vpd->edge_pan_start_time_x = 0.0;
}
else if (vpd->edge_pan_start_time_x == 0.0) {
vpd->edge_pan_start_time_x = current_time;
}
if (pan_dir_y == 0) {
vpd->edge_pan_start_time_y = 0.0;
}
else if (vpd->edge_pan_start_time_y == 0.0) {
vpd->edge_pan_start_time_y = current_time;
}
}
/**
* Used to calculate a "fade in" factor for edge panning to make the interaction feel smooth
* and more purposeful.
*
* \note Assumes a domain_min of 0.0f.
*/
static float smootherstep(const float domain_max, float x)
{
x = clamp_f(x / domain_max, 0.0, 1.0);
return x * x * x * (x * (x * 6.0 - 15.0) + 10.0);
}
static float edge_pan_speed(v2dViewPanData *vpd,
int event_loc,
bool x_dir,
const double current_time)
{
ARegion *region = vpd->region;
/* Find the distance from the start of the drag zone. */
const int min = (x_dir ? region->winrct.xmin : region->winrct.ymin) + EDGE_PAN_REGION_PAD;
const int max = (x_dir ? region->winrct.xmax : region->winrct.ymax) - EDGE_PAN_REGION_PAD;
int distance = 0.0;
if (event_loc > max) {
distance = event_loc - max;
}
else if (event_loc < min) {
distance = min - event_loc;
}
else {
BLI_assert(!"Calculating speed outside of pan zones");
return 0.0f;
}
/* Apply a fade in to the speed based on a start time delay. */
const double start_time = x_dir ? vpd->edge_pan_start_time_x : vpd->edge_pan_start_time_y;
const float delay_factor = smootherstep(EDGE_PAN_DELAY, (float)(current_time - start_time));
return distance * EDGE_PAN_SPEED_PER_PIXEL * delay_factor;
}
static int view_edge_pan_modal(bContext *C, wmOperator *op, const wmEvent *event)
{
v2dViewPanData *vpd = op->customdata;
ARegion *region = vpd->region;
View2DEdgePanData *vpd = op->customdata;
if (event->val == KM_RELEASE || event->type == EVT_ESCKEY) {
view_pan_exit(op);
MEM_SAFE_FREE(op->customdata);
return (OPERATOR_FINISHED | OPERATOR_PASS_THROUGH);
}
/* Only mousemove events matter here, ignore others. */
if (event->type != MOUSEMOVE) {
return OPERATOR_PASS_THROUGH;
}
UI_view2d_edge_pan_apply_event(C, vpd, event);
/* This operator is supposed to run together with some drag action.
* On successful handling, always pass events on to other handlers. */
const int success_retval = OPERATOR_PASS_THROUGH;
const int outside_padding = RNA_int_get(op->ptr, "outside_padding") * UI_UNIT_X;
rcti padding_rect;
if (outside_padding != 0) {
padding_rect = region->winrct;
BLI_rcti_pad(&padding_rect, outside_padding, outside_padding);
}
int pan_dir_x = 0;
int pan_dir_y = 0;
if ((outside_padding == 0) || BLI_rcti_isect_pt(&padding_rect, event->x, event->y)) {
/* Find whether the mouse is beyond X and Y edges. */
if (event->x > region->winrct.xmax - EDGE_PAN_REGION_PAD) {
pan_dir_x = 1;
}
else if (event->x < region->winrct.xmin + EDGE_PAN_REGION_PAD) {
pan_dir_x = -1;
}
if (event->y > region->winrct.ymax - EDGE_PAN_REGION_PAD) {
pan_dir_y = 1;
}
else if (event->y < region->winrct.ymin + EDGE_PAN_REGION_PAD) {
pan_dir_y = -1;
}
}
const double current_time = PIL_check_seconds_timer();
edge_pan_manage_delay_timers(vpd, pan_dir_x, pan_dir_y, current_time);
/* Calculate the delta since the last time the operator was called. */
const float dtime = (float)(current_time - vpd->edge_pan_last_time);
float dx = 0.0f, dy = 0.0f;
if (pan_dir_x != 0) {
const float speed = edge_pan_speed(vpd, event->x, true, current_time);
dx = dtime * speed * (float)pan_dir_x;
}
if (pan_dir_y != 0) {
const float speed = edge_pan_speed(vpd, event->y, false, current_time);
dy = dtime * speed * (float)pan_dir_y;
}
vpd->edge_pan_last_time = current_time;
/* Pan, clamping inside the regions's total bounds. */
view_pan_apply_ex(C, vpd, dx, dy);
return success_retval;
return OPERATOR_PASS_THROUGH;
}
static void view_edge_pan_cancel(bContext *UNUSED(C), wmOperator *op)
{
view_pan_exit(op);
MEM_SAFE_FREE(op->customdata);
}
static void VIEW2D_OT_edge_pan(wmOperatorType *ot)
@ -510,26 +385,13 @@ static void VIEW2D_OT_edge_pan(wmOperatorType *ot)
ot->invoke = view_edge_pan_invoke;
ot->modal = view_edge_pan_modal;
ot->cancel = view_edge_pan_cancel;
ot->poll = view_pan_poll;
ot->poll = UI_view2d_edge_pan_poll;
/* operator is modal */
ot->flag = OPTYPE_INTERNAL;
RNA_def_int(ot->srna,
"outside_padding",
0,
0,
100,
"Outside Padding",
"Padding around the region in UI units within which panning is activated (0 to "
"disable boundary)",
0,
100);
UI_view2d_edge_pan_operator_properties(ot);
}
#undef EDGE_PAN_REGION_PAD
#undef EDGE_PAN_SPEED_PER_PIXEL
#undef EDGE_PAN_DELAY
/** \} */
/* -------------------------------------------------------------------- */

View File

@ -3817,7 +3817,7 @@ static int edbm_blend_from_shape_exec(bContext *C, wmOperator *op)
EDBM_update(me,
&(const struct EDBMUpdate_Params){
.calc_looptri = true,
.calc_normals = false,
.calc_normals = true,
.is_destructive = false,
});
}

View File

@ -626,7 +626,7 @@ static void *undomesh_from_editmesh(UndoMesh *um, BMEditMesh *em, Key *key, Undo
# ifdef USE_ARRAY_STORE_THREAD
if (um_arraystore.task_pool == NULL) {
um_arraystore.task_pool = BLI_task_pool_create_background(NULL, TASK_PRIORITY_LOW, true);
um_arraystore.task_pool = BLI_task_pool_create_background(NULL, TASK_PRIORITY_LOW);
}
struct UMArrayData *um_data = MEM_mallocN(sizeof(*um_data), __func__);

View File

@ -696,12 +696,11 @@ static bool edit_constraint_poll_generic(bContext *C,
Object *ob = (ptr.owner_id) ? (Object *)ptr.owner_id : ED_object_active_context(C);
bConstraint *con = ptr.data;
if (!ob) {
CTX_wm_operator_poll_msg_set(C, "Context missing active object");
if (!ED_operator_object_active_editable_ex(C, ob)) {
return false;
}
if (ID_IS_LINKED(ob) || (ptr.owner_id && ID_IS_LINKED(ptr.owner_id))) {
if (ptr.owner_id != NULL && ID_IS_LINKED(ptr.owner_id)) {
CTX_wm_operator_poll_msg_set(C, "Cannot edit library data");
return false;
}
@ -1746,8 +1745,8 @@ void POSE_OT_constraints_clear(wmOperatorType *ot)
/* callbacks */
ot->exec = pose_constraints_clear_exec;
ot->poll = ED_operator_posemode_exclusive; /* XXX - do we want to ensure there are selected
* bones too? */
/* XXX - do we want to ensure there are selected bones too? */
ot->poll = ED_operator_object_active_local_editable_posemode_exclusive;
}
static int object_constraints_clear_exec(bContext *C, wmOperator *UNUSED(op))
@ -2480,7 +2479,7 @@ void POSE_OT_ik_clear(wmOperatorType *ot)
/* api callbacks */
ot->exec = pose_ik_clear_exec;
ot->poll = ED_operator_posemode_exclusive;
ot->poll = ED_operator_object_active_local_editable_posemode_exclusive;
/* flags */
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;

View File

@ -261,6 +261,49 @@ void ED_object_shaderfx_copy(Object *dst, ShaderFxData *fx)
WM_main_add_notifier(NC_OBJECT | ND_SHADERFX, dst);
}
/**************** Generic poll callback helpers. ************************/
static bool edit_shaderfx_poll_generic(bContext *C,
StructRNA *rna_type,
int obtype_flag,
const bool is_liboverride_allowed)
{
PointerRNA ptr = CTX_data_pointer_get_type(C, "shaderfx", rna_type);
Object *ob = (ptr.owner_id) ? (Object *)ptr.owner_id : ED_object_active_context(C);
ShaderFxData *fx = ptr.data; /* May be NULL. */
if (!ED_operator_object_active_editable_ex(C, ob)) {
return false;
}
/* NOTE: Temporary 'forbid all' for overrides, until we implement support to add shaderfx to
* overrides. */
if (ID_IS_OVERRIDE_LIBRARY(ob)) {
CTX_wm_operator_poll_msg_set(C, "Cannot edit shaderfxs in a library override");
return false;
}
if (obtype_flag != 0 && ((1 << ob->type) & obtype_flag) == 0) {
CTX_wm_operator_poll_msg_set(C, "Object type is not supported");
return false;
}
if (ptr.owner_id != NULL && ID_IS_LINKED(ptr.owner_id)) {
CTX_wm_operator_poll_msg_set(C, "Cannot edit library data");
return false;
}
if (!is_liboverride_allowed && BKE_shaderfx_is_nonlocal_in_liboverride(ob, fx)) {
CTX_wm_operator_poll_msg_set(
C, "Cannot edit shaderfxs coming from linked data in a library override");
return false;
}
return true;
}
static bool edit_shaderfx_poll(bContext *C)
{
return edit_shaderfx_poll_generic(C, &RNA_ShaderFx, 0, false);
}
/************************ add effect operator *********************/
static int shaderfx_add_exec(bContext *C, wmOperator *op)
@ -334,7 +377,7 @@ void OBJECT_OT_shaderfx_add(wmOperatorType *ot)
/* api callbacks */
ot->invoke = WM_menu_invoke;
ot->exec = shaderfx_add_exec;
ot->poll = ED_operator_object_active_editable;
ot->poll = edit_shaderfx_poll;
/* flags */
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;
@ -352,37 +395,6 @@ void OBJECT_OT_shaderfx_add(wmOperatorType *ot)
/** \name Generic Functions for Operators Using Names and Data Context
* \{ */
static bool edit_shaderfx_poll_generic(bContext *C, StructRNA *rna_type, int obtype_flag)
{
PointerRNA ptr = CTX_data_pointer_get_type(C, "shaderfx", rna_type);
Object *ob = (ptr.owner_id) ? (Object *)ptr.owner_id : ED_object_active_context(C);
ShaderFxData *fx = ptr.data; /* May be NULL. */
if (!ob || ID_IS_LINKED(ob)) {
return false;
}
if (obtype_flag && ((1 << ob->type) & obtype_flag) == 0) {
return false;
}
if (ptr.owner_id && ID_IS_LINKED(ptr.owner_id)) {
return false;
}
if (ID_IS_OVERRIDE_LIBRARY(ob)) {
if ((fx == NULL) || (fx->flag & eShaderFxFlag_OverrideLibrary_Local) == 0) {
CTX_wm_operator_poll_msg_set(C, "Cannot edit shaderfxs coming from library override");
return false;
}
}
return true;
}
static bool edit_shaderfx_poll(bContext *C)
{
return edit_shaderfx_poll_generic(C, &RNA_ShaderFx, 0);
}
static void edit_shaderfx_properties(wmOperatorType *ot)
{
PropertyRNA *prop = RNA_def_string(
@ -595,11 +607,6 @@ void OBJECT_OT_shaderfx_move_down(wmOperatorType *ot)
/************************ move shaderfx to index operator *********************/
static bool shaderfx_move_to_index_poll(bContext *C)
{
return edit_shaderfx_poll_generic(C, &RNA_ShaderFx, 0);
}
static int shaderfx_move_to_index_exec(bContext *C, wmOperator *op)
{
Object *ob = ED_object_active_context(C);
@ -632,7 +639,7 @@ void OBJECT_OT_shaderfx_move_to_index(wmOperatorType *ot)
ot->invoke = shaderfx_move_to_index_invoke;
ot->exec = shaderfx_move_to_index_exec;
ot->poll = shaderfx_move_to_index_poll;
ot->poll = edit_shaderfx_poll;
/* flags */
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO | OPTYPE_INTERNAL;
@ -675,11 +682,6 @@ static int shaderfx_copy_invoke(bContext *C, wmOperator *op, const wmEvent *even
return retval;
}
static bool shaderfx_copy_poll(bContext *C)
{
return edit_shaderfx_poll_generic(C, &RNA_ShaderFx, 0);
}
void OBJECT_OT_shaderfx_copy(wmOperatorType *ot)
{
ot->name = "Copy Effect";
@ -688,7 +690,7 @@ void OBJECT_OT_shaderfx_copy(wmOperatorType *ot)
ot->invoke = shaderfx_copy_invoke;
ot->exec = shaderfx_copy_exec;
ot->poll = shaderfx_copy_poll;
ot->poll = edit_shaderfx_poll;
/* flags */
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO | OPTYPE_INTERNAL;

View File

@ -859,7 +859,7 @@ static bool screen_opengl_render_init(bContext *C, wmOperator *op)
oglrender->task_pool = BLI_task_pool_create_background_serial(oglrender, TASK_PRIORITY_LOW);
}
else {
oglrender->task_pool = BLI_task_pool_create(oglrender, TASK_PRIORITY_LOW, TASK_ISOLATION_ON);
oglrender->task_pool = BLI_task_pool_create(oglrender, TASK_PRIORITY_LOW);
}
oglrender->pool_ok = true;
BLI_spin_init(&oglrender->reports_lock);

View File

@ -1275,11 +1275,14 @@ void ED_screen_scene_change(bContext *C, wmWindow *win, Scene *scene)
ScrArea *ED_screen_full_newspace(bContext *C, ScrArea *area, int type)
{
bScreen *newscreen = NULL;
ScrArea *newsa = NULL;
SpaceLink *newsl;
if (!area || area->full == NULL) {
newsa = ED_screen_state_maximized_create(C);
newscreen = ED_screen_state_maximized_create(C);
newsa = newscreen->areabase.first;
BLI_assert(newsa->spacetype == SPACE_EMPTY);
}
if (!newsa) {
@ -1296,6 +1299,10 @@ ScrArea *ED_screen_full_newspace(bContext *C, ScrArea *area, int type)
ED_area_newspace(C, newsa, type, (newsl && newsl->link_flag & SPACE_FLAG_TYPE_TEMPORARY));
if (newscreen) {
ED_screen_change(C, newscreen);
}
return newsa;
}
@ -1361,6 +1368,10 @@ void ED_screen_full_restore(bContext *C, ScrArea *area)
* \param toggle_area: If this is set, its space data will be swapped with the one of the new empty
* area, when toggling back it can be swapped back again.
* \return The newly created screen with the non-normal area.
*
* \note The caller must run #ED_screen_change this is not done in this function
* as it would attempt to initialize areas that don't yet have a space-type assigned
* (converting them to 3D view without creating the space-data).
*/
static bScreen *screen_state_to_nonnormal(bContext *C,
wmWindow *win,
@ -1429,7 +1440,6 @@ static bScreen *screen_state_to_nonnormal(bContext *C,
}
newa->full = oldscreen;
ED_screen_change(C, screen);
ED_area_tag_refresh(newa);
return screen;
@ -1442,10 +1452,9 @@ static bScreen *screen_state_to_nonnormal(bContext *C,
* Use this to just create a new maximized screen/area, rather than maximizing an existing one.
* Otherwise, maximize with #ED_screen_state_toggle().
*/
ScrArea *ED_screen_state_maximized_create(bContext *C)
bScreen *ED_screen_state_maximized_create(bContext *C)
{
bScreen *screen = screen_state_to_nonnormal(C, CTX_wm_window(C), NULL, SCREENMAXIMIZED);
return screen->areabase.first;
return screen_state_to_nonnormal(C, CTX_wm_window(C), NULL, SCREENMAXIMIZED);
}
/**
@ -1548,6 +1557,8 @@ ScrArea *ED_screen_state_toggle(bContext *C, wmWindow *win, ScrArea *area, const
}
screen = screen_state_to_nonnormal(C, win, toggle_area, state);
ED_screen_change(C, screen);
}
BLI_assert(CTX_wm_screen(C) == screen);

View File

@ -358,9 +358,24 @@ bool ED_operator_object_active(bContext *C)
return ((ob != NULL) && !ed_object_hidden(ob));
}
bool ED_operator_object_active_editable_ex(bContext *UNUSED(C), const Object *ob)
bool ED_operator_object_active_editable_ex(bContext *C, const Object *ob)
{
return ((ob != NULL) && !ID_IS_LINKED(ob) && !ed_object_hidden(ob));
if (ob == NULL) {
CTX_wm_operator_poll_msg_set(C, "Context missing active object");
return false;
}
if (ID_IS_LINKED(ob)) {
CTX_wm_operator_poll_msg_set(C, "Cannot edit library linked object");
return false;
}
if (ed_object_hidden(ob)) {
CTX_wm_operator_poll_msg_set(C, "Cannot edit hidden obect");
return false;
}
return true;
}
bool ED_operator_object_active_editable(bContext *C)
@ -444,28 +459,48 @@ bool ED_operator_editarmature(bContext *C)
}
/**
* \brief check for pose mode (no mixed modes)
* Check for pose mode (no mixed modes).
*
* We want to enable most pose operations in weight paint mode,
* when it comes to transforming bones, but managing bones layers/groups
* can be left for pose mode only. (not weight paint mode)
* We want to enable most pose operations in weight paint mode, when it comes to transforming
* bones, but managing bones layers/groups and their constraints can be left for pose mode only
* (not weight paint mode).
*/
bool ED_operator_posemode_exclusive(bContext *C)
static bool ed_operator_posemode_exclusive_ex(bContext *C, Object *obact)
{
Object *obact = CTX_data_active_object(C);
if (obact && !(obact->mode & OB_MODE_EDIT)) {
Object *obpose = BKE_object_pose_armature_get(obact);
if (obpose != NULL) {
if (obact == obpose) {
return true;
}
if (obact != NULL && !(obact->mode & OB_MODE_EDIT)) {
if (obact == BKE_object_pose_armature_get(obact)) {
return true;
}
}
CTX_wm_operator_poll_msg_set(C, "No object, or not exclusively in pose mode");
return false;
}
bool ED_operator_posemode_exclusive(bContext *C)
{
Object *obact = ED_object_active_context(C);
return ed_operator_posemode_exclusive_ex(C, obact);
}
/** Object must be editable, fully local (i.e. not an override), and exclusively in Pose mode. */
bool ED_operator_object_active_local_editable_posemode_exclusive(bContext *C)
{
Object *obact = ED_object_active_context(C);
if (!ed_operator_posemode_exclusive_ex(C, obact)) {
return false;
}
if (ID_IS_OVERRIDE_LIBRARY(obact)) {
CTX_wm_operator_poll_msg_set(C, "Object is a local library override");
return false;
}
return true;
}
/* allows for pinned pose objects to be used in the object buttons
* and the non-active pose object to be used in the 3D view */
bool ED_operator_posemode_context(bContext *C)
@ -4453,7 +4488,8 @@ static void screen_animation_region_tag_redraw(ScrArea *area,
}
if (area->spacetype == SPACE_SEQ) {
if (!ED_space_sequencer_has_visible_animation_on_strip(scene)) {
const SpaceSeq *sseq = area->spacedata.first;
if (!ED_space_sequencer_has_playback_animation(sseq, scene)) {
return;
}
}

View File

@ -5571,7 +5571,7 @@ static bool project_paint_op(void *state, const float lastpos[2], const float po
}
if (ps->thread_tot > 1) {
task_pool = BLI_task_pool_create_suspended(NULL, TASK_PRIORITY_HIGH, TASK_ISOLATION_ON);
task_pool = BLI_task_pool_create_suspended(NULL, TASK_PRIORITY_HIGH);
}
image_pool = BKE_image_pool_new();

View File

@ -153,6 +153,8 @@ static SpaceLink *action_duplicate(SpaceLink *sl)
{
SpaceAction *sactionn = MEM_dupallocN(sl);
memset(&sactionn->runtime, 0x0, sizeof(sactionn->runtime));
/* clear or remove stuff from old */
return (SpaceLink *)sactionn;

View File

@ -904,7 +904,7 @@ static void start_prefetch_threads(MovieClip *clip,
queue.do_update = do_update;
queue.progress = progress;
TaskPool *task_pool = BLI_task_pool_create(&queue, TASK_PRIORITY_LOW, TASK_ISOLATION_ON);
TaskPool *task_pool = BLI_task_pool_create(&queue, TASK_PRIORITY_LOW);
for (int i = 0; i < tot_thread; i++) {
BLI_task_pool_push(task_pool, prefetch_task_func, clip, false, NULL);
}

View File

@ -1429,7 +1429,7 @@ static void do_sequence_proxy(void *pjv,
queue.do_update = do_update;
queue.progress = progress;
TaskPool *task_pool = BLI_task_pool_create(&queue, TASK_PRIORITY_LOW, TASK_ISOLATION_ON);
TaskPool *task_pool = BLI_task_pool_create(&queue, TASK_PRIORITY_LOW);
handles = MEM_callocN(sizeof(ProxyThread) * tot_thread, "proxy threaded handles");
for (int i = 0; i < tot_thread; i++) {
ProxyThread *handle = &handles[i];

Some files were not shown because too many files have changed in this diff Show More