Workbench: Shadow: Use depth fail method for manifold objects.

Since this method have no failure case for manifold objects, use it.
This commit is contained in:
Clément Foucault 2018-05-21 13:21:26 +02:00
parent 44935fdfa3
commit 0c9974c8cd
Notes: blender-bot 2023-02-14 08:40:26 +01:00
Referenced by issue #54931, Workbench: Performance Shadows
7 changed files with 25 additions and 13 deletions

View File

@ -502,7 +502,8 @@ void workbench_deferred_solid_cache_populate(WORKBENCH_Data *vedata, Object *ob)
}
if (SHADOW_ENABLED(wpd) && (ob->display.flag & OB_SHOW_SHADOW) > 0) {
struct Gwn_Batch *geom_shadow = DRW_cache_object_edge_detection_get(ob);
bool is_manifold;
struct Gwn_Batch *geom_shadow = DRW_cache_object_edge_detection_get(ob, &is_manifold);
if (geom_shadow) {
if (is_sculpt_mode) {
/* Currently unsupported in sculpt mode. We could revert to the slow
@ -518,7 +519,7 @@ void workbench_deferred_solid_cache_populate(WORKBENCH_Data *vedata, Object *ob)
mul_v3_mat3_m4v3(engine_object_data->shadow_dir, ob->imat, e_data.display.light_direction);
DRWShadingGroup *grp;
if (true) {
if (!is_manifold) {
grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
DRW_shgroup_call_object_add(grp, geom_shadow, ob);

View File

@ -327,7 +327,7 @@ static void drw_shgroup_bone_custom_solid(
DRW_shgroup_call_dynamic_add(shgrp_geom_solid, final_bonemat, bone_color, hint_color);
}
geom = DRW_cache_object_edge_detection_get(custom);
geom = DRW_cache_object_edge_detection_get(custom, NULL);
if (geom && outline_color[3] > 0.0f) {
DRWShadingGroup *shgrp_geom_wire = shgroup_instance_bone_shape_outline(g_data.passes.bone_outline, geom);
float final_bonemat[4][4], final_color[4];

View File

@ -503,11 +503,11 @@ Gwn_Batch *DRW_cache_object_wire_outline_get(Object *ob)
}
/* Returns a buffer texture. */
Gwn_Batch *DRW_cache_object_edge_detection_get(Object *ob)
Gwn_Batch *DRW_cache_object_edge_detection_get(Object *ob, bool *r_is_manifold)
{
switch (ob->type) {
case OB_MESH:
return DRW_cache_mesh_edge_detection_get(ob);
return DRW_cache_mesh_edge_detection_get(ob, r_is_manifold);
/* TODO, should match 'DRW_cache_object_surface_get' */
default:
@ -2584,12 +2584,12 @@ Gwn_Batch *DRW_cache_mesh_wire_outline_get(Object *ob)
return DRW_mesh_batch_cache_get_fancy_edges(me);
}
Gwn_Batch *DRW_cache_mesh_edge_detection_get(Object *ob)
Gwn_Batch *DRW_cache_mesh_edge_detection_get(Object *ob, bool *r_is_manifold)
{
BLI_assert(ob->type == OB_MESH);
Mesh *me = ob->data;
return DRW_mesh_batch_cache_get_edge_detection(me);
return DRW_mesh_batch_cache_get_edge_detection(me, r_is_manifold);
}
Gwn_Batch *DRW_cache_mesh_surface_get(Object *ob)

View File

@ -48,7 +48,7 @@ struct Gwn_Batch *DRW_cache_screenspace_circle_get(void);
/* Common Object */
struct Gwn_Batch *DRW_cache_object_wire_outline_get(struct Object *ob);
struct Gwn_Batch *DRW_cache_object_edge_detection_get(struct Object *ob);
struct Gwn_Batch *DRW_cache_object_edge_detection_get(struct Object *ob, bool *r_is_manifold);
struct Gwn_Batch *DRW_cache_object_surface_get(struct Object *ob);
struct Gwn_Batch **DRW_cache_object_surface_material_get(
struct Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len,
@ -120,7 +120,7 @@ void DRW_cache_mesh_normals_overlay_get(
struct Gwn_Batch **r_tris, struct Gwn_Batch **r_ledges, struct Gwn_Batch **r_lverts);
struct Gwn_Batch *DRW_cache_face_centers_get(struct Object *ob);
struct Gwn_Batch *DRW_cache_mesh_wire_outline_get(struct Object *ob);
struct Gwn_Batch *DRW_cache_mesh_edge_detection_get(struct Object *ob);
struct Gwn_Batch *DRW_cache_mesh_edge_detection_get(struct Object *ob, bool *r_is_manifold);
struct Gwn_Batch *DRW_cache_mesh_surface_get(struct Object *ob);
struct Gwn_Batch *DRW_cache_mesh_surface_weights_get(struct Object *ob);
struct Gwn_Batch *DRW_cache_mesh_surface_vert_colors_get(struct Object *ob);

View File

@ -110,7 +110,7 @@ struct Gwn_Batch *DRW_mesh_batch_cache_get_triangles_with_select_mask(struct Mes
struct Gwn_Batch *DRW_mesh_batch_cache_get_points_with_normals(struct Mesh *me);
struct Gwn_Batch *DRW_mesh_batch_cache_get_all_verts(struct Mesh *me);
struct Gwn_Batch *DRW_mesh_batch_cache_get_fancy_edges(struct Mesh *me);
struct Gwn_Batch *DRW_mesh_batch_cache_get_edge_detection(struct Mesh *me);
struct Gwn_Batch *DRW_mesh_batch_cache_get_edge_detection(struct Mesh *me, bool *r_is_manifold);
struct Gwn_Batch *DRW_mesh_batch_cache_get_overlay_triangles(struct Mesh *me);
struct Gwn_Batch *DRW_mesh_batch_cache_get_overlay_triangles_nor(struct Mesh *me);
struct Gwn_Batch *DRW_mesh_batch_cache_get_overlay_loose_edges(struct Mesh *me);

View File

@ -1633,6 +1633,9 @@ typedef struct MeshBatchCache {
/* XXX, only keep for as long as sculpt mode uses shaded drawing. */
bool is_sculpt_points_tag;
/* Valid only if edges_adjacency is up to date. */
bool is_manifold;
} MeshBatchCache;
/* Gwn_Batch cache management. */
@ -3234,6 +3237,8 @@ static Gwn_IndexBuf *mesh_batch_cache_get_edges_adjacency(MeshRenderData *rdata,
const int vert_len = mesh_render_data_verts_len_get(rdata);
const int tri_len = mesh_render_data_looptri_len_get(rdata);
cache->is_manifold = true;
/* Allocate max but only used indices are sent to GPU. */
Gwn_IndexBufBuilder elb;
GWN_indexbuf_init(&elb, GWN_PRIM_LINES_ADJ, tri_len * 3, vert_len);
@ -3279,6 +3284,7 @@ static Gwn_IndexBuf *mesh_batch_cache_get_edges_adjacency(MeshRenderData *rdata,
/* Don't share edge if triangles have non matching winding. */
GWN_indexbuf_add_line_adj_verts(&elb, v0, v1, v2, v0);
GWN_indexbuf_add_line_adj_verts(&elb, v_opposite, v1, v2, v_opposite);
cache->is_manifold = false;
}
else {
GWN_indexbuf_add_line_adj_verts(&elb, v0, v1, v2, v_opposite);
@ -3303,6 +3309,7 @@ static Gwn_IndexBuf *mesh_batch_cache_get_edges_adjacency(MeshRenderData *rdata,
SWAP(unsigned int, v1, v2);
}
GWN_indexbuf_add_line_adj_verts(&elb, v0, v1, v2, v0);
cache->is_manifold = false;
}
BLI_edgehashIterator_free(ehi);
BLI_edgehash_free(eh, NULL);
@ -3822,7 +3829,7 @@ Gwn_Batch *DRW_mesh_batch_cache_get_fancy_edges(Mesh *me)
return cache->fancy_edges;
}
Gwn_Batch *DRW_mesh_batch_cache_get_edge_detection(Mesh *me)
Gwn_Batch *DRW_mesh_batch_cache_get_edge_detection(Mesh *me, bool *r_is_manifold)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
@ -3838,6 +3845,10 @@ Gwn_Batch *DRW_mesh_batch_cache_get_edge_detection(Mesh *me)
mesh_render_data_free(rdata);
}
if (r_is_manifold) {
*r_is_manifold = cache->is_manifold;
}
return cache->edge_detection;
}

View File

@ -333,8 +333,8 @@ void drw_state_set(DRWState state)
}
else if ((state & DRW_STATE_WRITE_STENCIL_SHADOW_FAIL) != 0) {
glStencilMask(0xFF);
glStencilOpSeparate(GL_BACK, GL_KEEP, GL_INCR_WRAP, GL_KEEP);
glStencilOpSeparate(GL_FRONT, GL_KEEP, GL_DECR_WRAP, GL_KEEP);
glStencilOpSeparate(GL_BACK, GL_KEEP, GL_DECR_WRAP, GL_KEEP);
glStencilOpSeparate(GL_FRONT, GL_KEEP, GL_INCR_WRAP, GL_KEEP);
}
/* Stencil Test */
else if ((state & (DRW_STATE_STENCIL_EQUAL | DRW_STATE_STENCIL_NEQUAL)) != 0) {