Sculpt Dyntopo: PBVH draw fixes

* The PBVH draw subsystem is now told whether any viewports
  have drawtype >= OB_MATERIAL before anything in any windows
  are drawn.  There are no alternatives given the design
  constraints of sculpting, where simply uploading data to the GPU
  quickly becomes a bottleneck.

* Fixed flat vcol shading mode.
This commit is contained in:
Joseph Eagar 2021-08-07 22:28:00 -07:00
parent 1e9a7383ef
commit a80c381ec5
8 changed files with 130 additions and 80 deletions

View File

@ -349,8 +349,7 @@ void BKE_pbvh_draw_cb(PBVH *pbvh,
PBVHFrustumPlanes *update_frustum,
PBVHFrustumPlanes *draw_frustum,
void (*draw_fn)(void *user_data, struct GPU_PBVH_Buffers *buffers),
void *user_data,
bool active_vcol_only);
void *user_data);
void BKE_pbvh_draw_debug_cb(
PBVH *pbvh,

View File

@ -1045,7 +1045,6 @@ typedef struct PBVHUpdateData {
int flag;
bool show_sculpt_face_sets;
bool flat_vcol_shading;
bool active_vcol_only;
} PBVHUpdateData;
static void pbvh_update_normals_accum_task_cb(void *__restrict userdata,
@ -1401,7 +1400,6 @@ static void pbvh_update_draw_buffer_cb(void *__restrict userdata,
pbvh->face_sets_color_seed,
pbvh->face_sets_color_default,
data->flat_vcol_shading,
data->active_vcol_only,
node->tri_buffers[i].mat_nr);
}
break;
@ -1456,8 +1454,7 @@ void pbvh_update_free_all_draw_buffers(PBVH *pbvh, PBVHNode *node)
}
}
static void pbvh_update_draw_buffers(
PBVH *pbvh, PBVHNode **nodes, int totnode, int update_flag, bool active_vcol_only)
static void pbvh_update_draw_buffers(PBVH *pbvh, PBVHNode **nodes, int totnode, int update_flag)
{
if ((update_flag & PBVH_RebuildDrawBuffers) || ELEM(pbvh->type, PBVH_GRIDS, PBVH_BMESH)) {
/* Free buffers uses OpenGL, so not in parallel. */
@ -1489,13 +1486,11 @@ static void pbvh_update_draw_buffers(
ldata = pbvh->ldata;
}
GPU_pbvh_update_attribute_names(vdata, ldata, active_vcol_only);
GPU_pbvh_update_attribute_names(vdata, ldata, GPU_pbvh_need_full_render_get());
/* Parallel creation and update of draw buffers. */
PBVHUpdateData data = {.pbvh = pbvh,
.nodes = nodes,
.flat_vcol_shading = pbvh->flat_vcol_shading,
.active_vcol_only = active_vcol_only};
PBVHUpdateData data = {
.pbvh = pbvh, .nodes = nodes, .flat_vcol_shading = pbvh->flat_vcol_shading};
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, true, totnode);
@ -2926,8 +2921,7 @@ void BKE_pbvh_draw_cb(PBVH *pbvh,
PBVHFrustumPlanes *update_frustum,
PBVHFrustumPlanes *draw_frustum,
void (*draw_fn)(void *user_data, GPU_PBVH_Buffers *buffers),
void *user_data,
bool active_vcol_only)
void *user_data)
{
PBVHNode **nodes;
int totnode;
@ -2950,7 +2944,28 @@ void BKE_pbvh_draw_cb(PBVH *pbvh,
/* Update draw buffers. */
if (totnode != 0 && (update_flag & (PBVH_RebuildDrawBuffers | PBVH_UpdateDrawBuffers))) {
pbvh_update_draw_buffers(pbvh, nodes, totnode, update_flag, active_vcol_only);
// check that need_full_render is set to GPU_pbvh_need_full_render_get(),
// but only if nodes need updating
if (pbvh->type == PBVH_BMESH && pbvh->need_full_render != GPU_pbvh_need_full_render_get()) {
// update all nodes
MEM_SAFE_FREE(nodes);
printf("Rebuilding PBVH draw buffers...\n");
for (int i = 0; i < pbvh->totnode; i++) {
PBVHNode *node = pbvh->nodes + i;
node->flag |= PBVH_UpdateDrawBuffers | PBVH_RebuildDrawBuffers;
}
pbvh->need_full_render = GPU_pbvh_need_full_render_get();
BKE_pbvh_draw_cb(
pbvh, update_only_visible, update_frustum, draw_frustum, draw_fn, user_data);
return;
}
pbvh_update_draw_buffers(pbvh, nodes, totnode, update_flag);
}
MEM_SAFE_FREE(nodes);

View File

@ -205,6 +205,7 @@ struct PBVH {
struct SubdivCCG *subdiv_ccg;
bool flat_vcol_shading;
bool need_full_render; // used by pbvh drawing for PBVH_BMESH
};
/* pbvh.c */

View File

@ -29,6 +29,7 @@
#include "BLI_string.h"
#include "BLI_task.h"
#include "BLI_threads.h"
#include "BLI_utildefines.h"
#include "BLF_api.h"
@ -64,6 +65,7 @@
#include "ED_space_api.h"
#include "ED_view3d.h"
#include "GPU_buffers.h"
#include "GPU_capabilities.h"
#include "GPU_framebuffer.h"
#include "GPU_immediate.h"

View File

@ -924,7 +924,6 @@ typedef struct DRWSculptCallbackData {
bool fast_mode; /* Set by draw manager. Do not init. */
int debug_node_nr;
bool active_vcol_only;
} DRWSculptCallbackData;
#define SCULPT_DEBUG_COLOR(id) (sculpt_debug_colors[id % 9])
@ -1092,8 +1091,7 @@ static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd)
&update_frustum,
&draw_frustum,
(void (*)(void *, GPU_PBVH_Buffers *))sculpt_draw_cb,
scd,
scd->active_vcol_only);
scd);
if (SCULPT_DEBUG_BUFFERS) {
int debug_node_nr = 0;
@ -1113,8 +1111,7 @@ void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup, Object *ob, bool use_wire
.num_shading_groups = 1,
.use_wire = use_wire,
.use_mats = false,
.use_mask = use_mask,
.active_vcol_only = true};
.use_mask = use_mask};
drw_sculpt_generate_calls(&scd);
}
@ -1127,8 +1124,8 @@ void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **shgroups,
.num_shading_groups = num_shgroups,
.use_wire = false,
.use_mats = true,
.use_mask = false,
.active_vcol_only = false};
.use_mask = false};
drw_sculpt_generate_calls(&scd);
}

View File

@ -87,11 +87,9 @@ void GPU_pbvh_mesh_buffers_update(GPU_PBVH_Buffers *buffers,
const struct MPropCol *vtcol,
const int update_flags);
/** if active_vcol_only is true, only the active (not render!) layer will
be uploaded to GPU*/
void GPU_pbvh_update_attribute_names(struct CustomData *vdata,
struct CustomData *ldata,
bool active_vcol_only);
bool need_full_render);
void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
struct BMesh *bm,
@ -104,7 +102,6 @@ void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
int face_sets_color_seed,
int face_sets_color_default,
bool flat_vcol,
bool active_vcol_only,
short mat_nr);
void GPU_pbvh_grid_buffers_update(GPU_PBVH_Buffers *buffers,
@ -132,6 +129,12 @@ short GPU_pbvh_buffers_material_index_get(GPU_PBVH_Buffers *buffers);
bool GPU_pbvh_buffers_has_overlays(GPU_PBVH_Buffers *buffers);
float *GPU_pbvh_get_extra_matrix(GPU_PBVH_Buffers *buffers);
/** if need_full_render is false, only the active (not render!) vcol layer will
be uploaded to GPU*/
void GPU_pbvh_need_full_render_set(bool state);
bool GPU_pbvh_need_full_render_get(void);
#ifdef __cplusplus
}
#endif

View File

@ -154,6 +154,9 @@ static struct {
int vertex_attrs_len;
int loop_attrs_len;
#endif
bool active_vcol_only;
bool need_full_render;
} g_vbo_id = {{0}};
#ifdef NEW_ATTR_SYSTEM
@ -1218,8 +1221,21 @@ static int gpu_pbvh_bmesh_make_vcol_offs(CustomData *vdata,
return count;
}
void GPU_pbvh_update_attribute_names(CustomData *vdata, CustomData *ldata, bool active_only)
void GPU_pbvh_need_full_render_set(bool state)
{
g_vbo_id.need_full_render = state;
g_vbo_id.active_vcol_only = !state;
}
bool GPU_pbvh_need_full_render_get()
{
return g_vbo_id.need_full_render;
}
void GPU_pbvh_update_attribute_names(CustomData *vdata, CustomData *ldata, bool need_full_render)
{
const bool active_only = !need_full_render;
GPU_vertformat_clear(&g_vbo_id.format);
// g_vbo_id.loop_attrs = build_cd_layers(vdata, )
@ -1288,7 +1304,6 @@ void GPU_pbvh_update_attribute_names(CustomData *vdata, CustomData *ldata, bool
#ifndef NEW_ATTR_SYSTEM
if (vdata && CustomData_has_layer(vdata, CD_PROP_COLOR)) {
const int cd_vcol_index = CustomData_get_layer_index(vdata, CD_PROP_COLOR);
const int act = CustomData_get_active_layer_index(vdata, CD_PROP_COLOR);
int ci = 0;
@ -1390,23 +1405,20 @@ static void GPU_pbvh_bmesh_buffers_update_flat_vcol(GPU_PBVH_Buffers *buffers,
TableGSet *bm_faces,
TableGSet *bm_unique_verts,
TableGSet *bm_other_verts,
PBVHTriBuf *tribuf,
const int update_flags,
const int cd_vert_node_offset,
int face_sets_color_seed,
int face_sets_color_default,
bool active_vcol_only,
short mat_nr)
{
const bool have_uv = CustomData_has_layer(&bm->ldata, CD_MLOOPUV);
const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
const bool show_vcol = (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
bool active_vcol_only = g_vbo_id.active_vcol_only;
const bool show_face_sets = CustomData_has_layer(&bm->pdata, CD_SCULPT_FACE_SETS) &&
(update_flags & GPU_PBVH_BUFFERS_SHOW_SCULPT_FACE_SETS) != 0;
int tottri, totvert;
bool empty_mask = true;
BMFace *f = NULL;
int cd_vcol_offset = CustomData_get_offset(&bm->vdata, CD_PROP_COLOR);
int cd_fset_offset = CustomData_get_offset(&bm->pdata, CD_SCULPT_FACE_SETS);
int cd_vcols[MAX_MCOL];
@ -1416,7 +1428,7 @@ static void GPU_pbvh_bmesh_buffers_update_flat_vcol(GPU_PBVH_Buffers *buffers,
&bm->vdata, cd_vcols, cd_vcol_layers, active_vcol_only);
/* Count visible triangles */
tottri = gpu_bmesh_face_visible_count(bm_faces, mat_nr) * 6;
tottri = tribuf->tottri * 6;
totvert = tottri * 3;
if (!tottri) {
@ -1432,8 +1444,6 @@ static void GPU_pbvh_bmesh_buffers_update_flat_vcol(GPU_PBVH_Buffers *buffers,
/* TODO: make mask layer optional for bmesh buffer. */
const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
const int cd_mcol_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPCOL);
const int cd_uv_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPUV);
bool default_face_set = true;
@ -1445,30 +1455,29 @@ static void GPU_pbvh_bmesh_buffers_update_flat_vcol(GPU_PBVH_Buffers *buffers,
int v_index = 0;
// disable shared vertex mode for now
GPUIndexBufBuilder elb_lines;
GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, tottri * 3);
TGSET_ITER (f, bm_faces) {
for (int i = 0; i < tribuf->tottri; i++) {
PBVHTri *tri = tribuf->tris + i;
BMFace *f = (BMFace *)tri->f.i;
if (f->mat_nr != mat_nr) {
continue;
}
BLI_assert(f->len == 3);
if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
BMVert *v[3];
BMLoop *l[3] = {f->l_first, f->l_first->next, f->l_first->prev};
float fmask = 0.0f;
int i;
BM_face_as_array_vert_tri(f, v);
v[0] = (BMVert *)tribuf->verts[tri->v[0]].i;
v[1] = (BMVert *)tribuf->verts[tri->v[1]].i;
v[2] = (BMVert *)tribuf->verts[tri->v[2]].i;
/* Average mask value */
for (i = 0; i < 3; i++) {
fmask += BM_ELEM_CD_GET_FLOAT(v[i], cd_vert_mask_offset);
for (int j = 0; j < 3; j++) {
fmask += BM_ELEM_CD_GET_FLOAT(v[j], cd_vert_mask_offset);
}
fmask /= 3.0f;
@ -1496,11 +1505,6 @@ static void GPU_pbvh_bmesh_buffers_update_flat_vcol(GPU_PBVH_Buffers *buffers,
copy_v3_v3(cos[1], v[1]->co);
copy_v3_v3(cos[2], v[2]->co);
float v3[3];
float v4[3];
float v5[3];
float v6[3];
copy_v3_v3(cos[6], cent);
interp_v3_v3v3(cos[3], v[0]->co, v[1]->co, 0.5f);
@ -1539,21 +1543,24 @@ static void GPU_pbvh_bmesh_buffers_update_flat_vcol(GPU_PBVH_Buffers *buffers,
next = j == 2 ? v_start : v_index + 6;
GPU_indexbuf_add_line_verts(&elb_lines, v_index, next);
// GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
// GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index + 0);
if (tri->eflag & 1) {
GPU_indexbuf_add_line_verts(&elb_lines, v_index, next);
// GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
// GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index + 0);
}
if (tri->eflag & 2) {
// GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
}
if (tri->eflag & 4) {
// GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index + 0);
}
v_index += 6;
}
/*
if (have_uv) {
MLoopUV *mu = BM_ELEM_CD_GET_VOID_P(l[i], cd_uv_offset);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.uv, v_index, mu->uv);
}
*/
}
}
TGSET_ITER_END
buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
buffers->tot_tri = tottri;
@ -1577,21 +1584,16 @@ static void GPU_pbvh_bmesh_buffers_update_indexed(GPU_PBVH_Buffers *buffers,
int face_sets_color_seed,
int face_sets_color_default,
bool flat_vcol,
bool active_vcol_only,
short mat_nr)
{
const bool have_uv = CustomData_has_layer(&bm->ldata, CD_MLOOPUV);
bool active_vcol_only = g_vbo_id.active_vcol_only;
const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
const bool show_vcol = (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
const bool show_face_sets = CustomData_has_layer(&bm->pdata, CD_SCULPT_FACE_SETS) &&
(update_flags & GPU_PBVH_BUFFERS_SHOW_SCULPT_FACE_SETS) != 0;
int tottri, totvert;
bool empty_mask = true;
BMFace *f = NULL;
int cd_vcol_offset = CustomData_get_offset(&bm->vdata, CD_PROP_COLOR);
int cd_fset_offset = CustomData_get_offset(&bm->pdata, CD_SCULPT_FACE_SETS);
int cd_vcols[MAX_MCOL];
int cd_vcol_layers[MAX_MCOL];
@ -1618,8 +1620,6 @@ static void GPU_pbvh_bmesh_buffers_update_indexed(GPU_PBVH_Buffers *buffers,
/* TODO, make mask layer optional for bmesh buffer */
const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
const int cd_mcol_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPCOL);
const int cd_uv_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPUV);
bool default_face_set = true;
@ -1629,16 +1629,14 @@ static void GPU_pbvh_bmesh_buffers_update_indexed(GPU_PBVH_Buffers *buffers,
return;
}
int v_index = 0;
/* Fill the vertex and triangle buffer in one pass over faces. */
GPUIndexBufBuilder elb, elb_lines;
GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, totvert);
GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, totvert);
#ifdef QUANTIZED_PERF_TEST
GPUVertBuf *vert_buf = buffers->vert_buf;
#ifdef QUANTIZED_PERF_TEST
float min[3];
float max[3];
float mat[4][4];
@ -1760,35 +1758,33 @@ void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
int face_sets_color_seed,
int face_sets_color_default,
bool flat_vcol,
bool active_vcol_only,
short mat_nr)
{
bool active_vcol_only = g_vbo_id.active_vcol_only;
if (flat_vcol && CustomData_has_layer(&bm->vdata, CD_PROP_COLOR)) {
GPU_pbvh_bmesh_buffers_update_flat_vcol(buffers,
bm,
bm_faces,
bm_unique_verts,
bm_other_verts,
tribuf,
update_flags,
cd_vert_node_offset,
face_sets_color_seed,
face_sets_color_default,
active_vcol_only,
mat_nr);
return;
}
const bool have_uv = CustomData_has_layer(&bm->ldata, CD_MLOOPUV);
const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
const bool show_vcol = (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
const bool show_face_sets = CustomData_has_layer(&bm->pdata, CD_SCULPT_FACE_SETS) &&
(update_flags & GPU_PBVH_BUFFERS_SHOW_SCULPT_FACE_SETS) != 0;
int tottri, totvert;
bool empty_mask = true;
BMFace *f = NULL;
int cd_vcol_offset = CustomData_get_offset(&bm->vdata, CD_PROP_COLOR);
int cd_fset_offset = CustomData_get_offset(&bm->pdata, CD_SCULPT_FACE_SETS);
int cd_vcols[MAX_MCOL];
@ -1810,7 +1806,6 @@ void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
face_sets_color_seed,
face_sets_color_default,
flat_vcol,
active_vcol_only,
mat_nr);
return;
}
@ -1828,7 +1823,7 @@ void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
if (!tottri) {
/* empty node (i.e. not just hidden)? */
if (!BLI_table_gset_len(bm_faces) != 0) {
if (BLI_table_gset_len(bm_faces) == 0) {
buffers->clear_bmesh_on_flush = true;
}

View File

@ -52,6 +52,7 @@
#include "ED_view3d.h"
#include "GPU_batch_presets.h"
#include "GPU_buffers.h"
#include "GPU_context.h"
#include "GPU_debug.h"
#include "GPU_framebuffer.h"
@ -1039,6 +1040,43 @@ void wm_draw_update(bContext *C)
GPU_context_main_lock();
BKE_image_free_unused_gpu_textures();
/*We can save GPU bandwidth for PBVH drawing if we know for sure that no
viewport has EEVEE running in it. As in no viewport in any windows.
This is because PBVH only supplies one set of drawing buffers
to the draw manager. Creating more buffers for specific drawengines
is simply not feasible for performance reasons.
*/
LISTBASE_FOREACH (wmWindow *, win, &wm->windows) {
GHOST_TWindowState state = GHOST_GetWindowState(win->ghostwin);
if (state == GHOST_kWindowStateMinimized) {
continue;
}
CTX_wm_window_set(C, win);
GPU_pbvh_need_full_render_set(false);
if (wm_draw_update_test_window(bmain, C, win)) {
bScreen *screen = WM_window_get_active_screen(win);
/* Draw screen areas into own frame buffer. */
ED_screen_areas_iter (win, screen, area) {
if (area->spacetype != SPACE_VIEW3D) {
continue;
}
CTX_wm_area_set(C, area);
View3D *v3d = CTX_wm_view3d(C);
if (v3d->shading.type >= OB_MATERIAL) {
GPU_pbvh_need_full_render_set(true);
}
}
}
}
LISTBASE_FOREACH (wmWindow *, win, &wm->windows) {
#ifdef WIN32
GHOST_TWindowState state = GHOST_GetWindowState(win->ghostwin);