Improve multires performance.

Added a new api function to stich multires grids
on specific faces in a mesh,
subdiv_ccg_average_faces_boundaries_and_corners,
and changed multires normal calc to use it.

VTune profiling showed that this was a major
performance hit once you get above 10,000 or so
base mesh faces and/or have a high number of
subdivision levels.

Here's a video comparing the difference. Note the
bpy.app_debug switch is not in the final commit.

{F10145323}

And the .blend file:

{F10145346}

Reviewed By: Sergey Sharybin (sergey)

Differential Revision:
https://developer.blender.org/D11334
This commit is contained in:
Joseph Eagar 2021-06-07 15:19:19 -07:00
parent a6715213c3
commit ef5a362a5b
Notes: blender-bot 2023-02-14 11:07:28 +01:00
Referenced by issue #90641, Wireframe overlay doesn't work as expected (does not hide edges of coplanar faces)
Referenced by issue #77651, ChromeOS: Black screen/window on startup:
Referenced by issue #77651, ChromeOS: Black screen/window on startup:
1 changed files with 139 additions and 15 deletions

View File

@ -28,12 +28,14 @@
#include "MEM_guardedalloc.h"
#include "BLI_ghash.h"
#include "BLI_math_bits.h"
#include "BLI_math_vector.h"
#include "BLI_task.h"
#include "BKE_DerivedMesh.h"
#include "BKE_ccg.h"
#include "BKE_global.h"
#include "BKE_mesh.h"
#include "BKE_subdiv.h"
#include "BKE_subdiv_eval.h"
@ -50,6 +52,11 @@ static void subdiv_ccg_average_inner_face_grids(SubdivCCG *subdiv_ccg,
CCGKey *key,
SubdivCCGFace *face);
void subdiv_ccg_average_faces_boundaries_and_corners(SubdivCCG *subdiv_ccg,
CCGKey *key,
struct CCGFace **effected_faces,
int num_effected_faces);
/** \} */
/* -------------------------------------------------------------------- */
@ -889,11 +896,12 @@ void BKE_subdiv_ccg_update_normals(SubdivCCG *subdiv_ccg,
return;
}
subdiv_ccg_recalc_modified_inner_grid_normals(subdiv_ccg, effected_faces, num_effected_faces);
/* TODO(sergey): Only average elements which are adjacent to modified
* faces. */
CCGKey key;
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
subdiv_ccg_average_faces_boundaries_and_corners(
subdiv_ccg, &key, effected_faces, num_effected_faces);
}
/** \} */
@ -1032,6 +1040,9 @@ static void subdiv_ccg_average_inner_grids_task(void *__restrict userdata_v,
typedef struct AverageGridsBoundariesData {
SubdivCCG *subdiv_ccg;
CCGKey *key;
/* Optional lookup table. Maps task index to index in `subdiv_ccg->adjacent_vertices`. */
int *adjacent_edge_index_map;
} AverageGridsBoundariesData;
typedef struct AverageGridsBoundariesTLSData {
@ -1079,10 +1090,14 @@ static void subdiv_ccg_average_grids_boundary(SubdivCCG *subdiv_ccg,
}
static void subdiv_ccg_average_grids_boundaries_task(void *__restrict userdata_v,
const int adjacent_edge_index,
const int n,
const TaskParallelTLS *__restrict tls_v)
{
AverageGridsBoundariesData *data = userdata_v;
const int adjacent_edge_index = data->adjacent_edge_index_map ?
data->adjacent_edge_index_map[n] :
n;
AverageGridsBoundariesTLSData *tls = tls_v->userdata_chunk;
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
CCGKey *key = data->key;
@ -1100,6 +1115,9 @@ static void subdiv_ccg_average_grids_boundaries_free(const void *__restrict UNUS
typedef struct AverageGridsCornerData {
SubdivCCG *subdiv_ccg;
CCGKey *key;
/* Optional lookup table. Maps task range index to index in subdiv_ccg->adjacent_vertices*/
int *adjacent_vert_index_map;
} AverageGridsCornerData;
static void subdiv_ccg_average_grids_corners(SubdivCCG *subdiv_ccg,
@ -1128,49 +1146,63 @@ static void subdiv_ccg_average_grids_corners(SubdivCCG *subdiv_ccg,
}
static void subdiv_ccg_average_grids_corners_task(void *__restrict userdata_v,
const int adjacent_vertex_index,
const int n,
const TaskParallelTLS *__restrict UNUSED(tls_v))
{
AverageGridsCornerData *data = userdata_v;
const int adjacent_vertex_index = data->adjacent_vert_index_map ?
data->adjacent_vert_index_map[n] :
n;
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
CCGKey *key = data->key;
SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[adjacent_vertex_index];
subdiv_ccg_average_grids_corners(subdiv_ccg, key, adjacent_vertex);
}
static void subdiv_ccg_average_all_boundaries(SubdivCCG *subdiv_ccg, CCGKey *key)
static void subdiv_ccg_average_boundaries(SubdivCCG *subdiv_ccg,
CCGKey *key,
int *adjacent_edge_index_map,
int num_adjacent_edges)
{
TaskParallelSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
AverageGridsBoundariesData boundaries_data = {
.subdiv_ccg = subdiv_ccg,
.key = key,
};
.subdiv_ccg = subdiv_ccg, .key = key, .adjacent_edge_index_map = adjacent_edge_index_map};
AverageGridsBoundariesTLSData tls_data = {NULL};
parallel_range_settings.userdata_chunk = &tls_data;
parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
parallel_range_settings.func_free = subdiv_ccg_average_grids_boundaries_free;
BLI_task_parallel_range(0,
subdiv_ccg->num_adjacent_edges,
num_adjacent_edges,
&boundaries_data,
subdiv_ccg_average_grids_boundaries_task,
&parallel_range_settings);
}
static void subdiv_ccg_average_all_corners(SubdivCCG *subdiv_ccg, CCGKey *key)
static void subdiv_ccg_average_all_boundaries(SubdivCCG *subdiv_ccg, CCGKey *key)
{
subdiv_ccg_average_boundaries(subdiv_ccg, key, NULL, subdiv_ccg->num_adjacent_edges);
}
static void subdiv_ccg_average_corners(SubdivCCG *subdiv_ccg,
CCGKey *key,
int *adjacent_vert_index_map,
int num_adjacent_vertices)
{
TaskParallelSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
AverageGridsCornerData corner_data = {
.subdiv_ccg = subdiv_ccg,
.key = key,
};
.subdiv_ccg = subdiv_ccg, .key = key, .adjacent_vert_index_map = adjacent_vert_index_map};
BLI_task_parallel_range(0,
subdiv_ccg->num_adjacent_vertices,
num_adjacent_vertices,
&corner_data,
subdiv_ccg_average_grids_corners_task,
&parallel_range_settings);
}
static void subdiv_ccg_average_all_corners(SubdivCCG *subdiv_ccg, CCGKey *key)
{
subdiv_ccg_average_corners(subdiv_ccg, key, NULL, subdiv_ccg->num_adjacent_vertices);
}
static void subdiv_ccg_average_all_boundaries_and_corners(SubdivCCG *subdiv_ccg, CCGKey *key)
{
@ -1198,6 +1230,98 @@ void BKE_subdiv_ccg_average_grids(SubdivCCG *subdiv_ccg)
subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
}
static void subdiv_ccg_affected_face_adjacency(SubdivCCG *subdiv_ccg,
struct CCGFace **effected_faces,
int num_effected_faces,
GSet *r_adjacent_vertices,
GSet *r_adjacent_edges)
{
Subdiv *subdiv = subdiv_ccg->subdiv;
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
StaticOrHeapIntStorage face_vertices_storage;
StaticOrHeapIntStorage face_edges_storage;
static_or_heap_storage_init(&face_vertices_storage);
static_or_heap_storage_init(&face_edges_storage);
for (int i = 0; i < num_effected_faces; i++) {
SubdivCCGFace *face = (SubdivCCGFace *)effected_faces[i];
int face_index = face - subdiv_ccg->faces;
const int num_face_grids = face->num_grids;
const int num_face_edges = num_face_grids;
int *face_vertices = static_or_heap_storage_get(&face_vertices_storage, num_face_edges);
topology_refiner->getFaceVertices(topology_refiner, face_index, face_vertices);
/* Note that order of edges is same as order of MLoops, which also
* means it's the same as order of grids. */
int *face_edges = static_or_heap_storage_get(&face_edges_storage, num_face_edges);
topology_refiner->getFaceEdges(topology_refiner, face_index, face_edges);
for (int corner = 0; corner < num_face_edges; corner++) {
const int vertex_index = face_vertices[corner];
const int edge_index = face_edges[corner];
int edge_vertices[2];
topology_refiner->getEdgeVertices(topology_refiner, edge_index, edge_vertices);
SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[edge_index];
BLI_gset_add(r_adjacent_edges, adjacent_edge);
SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[vertex_index];
BLI_gset_add(r_adjacent_vertices, adjacent_vertex);
}
}
static_or_heap_storage_free(&face_vertices_storage);
static_or_heap_storage_free(&face_edges_storage);
}
void subdiv_ccg_average_faces_boundaries_and_corners(SubdivCCG *subdiv_ccg,
CCGKey *key,
struct CCGFace **effected_faces,
int num_effected_faces)
{
GSet *adjacent_vertices = BLI_gset_ptr_new(__func__);
GSet *adjacent_edges = BLI_gset_ptr_new(__func__);
GSetIterator gi;
subdiv_ccg_affected_face_adjacency(
subdiv_ccg, effected_faces, num_effected_faces, adjacent_vertices, adjacent_edges);
int *adjacent_vertex_index_map;
int *adjacent_edge_index_map;
StaticOrHeapIntStorage index_heap;
static_or_heap_storage_init(&index_heap);
int i = 0;
/* Average boundaries. */
adjacent_edge_index_map = static_or_heap_storage_get(&index_heap, BLI_gset_len(adjacent_edges));
GSET_ITER_INDEX (gi, adjacent_edges, i) {
SubdivCCGAdjacentEdge *adjacent_edge = BLI_gsetIterator_getKey(&gi);
adjacent_edge_index_map[i] = adjacent_edge - subdiv_ccg->adjacent_edges;
}
subdiv_ccg_average_boundaries(
subdiv_ccg, key, adjacent_edge_index_map, BLI_gset_len(adjacent_edges));
/* Average corners. */
adjacent_vertex_index_map = static_or_heap_storage_get(&index_heap,
BLI_gset_len(adjacent_vertices));
GSET_ITER_INDEX (gi, adjacent_vertices, i) {
SubdivCCGAdjacentVertex *adjacent_vertex = BLI_gsetIterator_getKey(&gi);
adjacent_vertex_index_map[i] = adjacent_vertex - subdiv_ccg->adjacent_vertices;
}
subdiv_ccg_average_corners(
subdiv_ccg, key, adjacent_vertex_index_map, BLI_gset_len(adjacent_vertices));
BLI_gset_free(adjacent_vertices, NULL);
BLI_gset_free(adjacent_edges, NULL);
static_or_heap_storage_free(&index_heap);
}
typedef struct StitchFacesInnerGridsData {
SubdivCCG *subdiv_ccg;
CCGKey *key;