Fix T95697: GPU subdivision ignores custom normals
Similarly to the CPU subdivision, we interpolate custom loop normals from the coarse mesh, and this for the final normals.
This commit is contained in:
parent
430ced76d5
commit
48b26d9c2e
Notes:
blender-bot
2023-02-14 10:35:28 +01:00
Referenced by issue #95697, GPU Subdivision doesn't retain custom normals
|
@ -1355,8 +1355,9 @@ void draw_subdiv_interp_custom_data(const DRWSubdivCache *cache,
|
|||
|
||||
drw_subdiv_compute_dispatch(cache, shader, 0, dst_offset, cache->num_subdiv_quads);
|
||||
|
||||
/* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. */
|
||||
GPU_memory_barrier(GPU_BARRIER_VERTEX_ATTRIB_ARRAY);
|
||||
/* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array. Put
|
||||
* a barrier on the shader storage as we may use the result in another compute shader. */
|
||||
GPU_memory_barrier(GPU_BARRIER_SHADER_STORAGE | GPU_BARRIER_VERTEX_ATTRIB_ARRAY);
|
||||
|
||||
/* Cleanup. */
|
||||
GPU_shader_unbind();
|
||||
|
@ -1437,6 +1438,28 @@ void draw_subdiv_finalize_normals(const DRWSubdivCache *cache,
|
|||
GPU_shader_unbind();
|
||||
}
|
||||
|
||||
void draw_subdiv_finalize_custom_normals(const DRWSubdivCache *cache,
|
||||
GPUVertBuf *src_custom_normals,
|
||||
GPUVertBuf *pos_nor)
|
||||
{
|
||||
GPUShader *shader = get_subdiv_shader(SHADER_BUFFER_NORMALS_FINALIZE, "#define CUSTOM_NORMALS");
|
||||
GPU_shader_bind(shader);
|
||||
|
||||
GPU_vertbuf_bind_as_ssbo(src_custom_normals, 0);
|
||||
/* outputPosNor is bound at index 2 in the base shader. */
|
||||
GPU_vertbuf_bind_as_ssbo(pos_nor, 2);
|
||||
|
||||
drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache->num_subdiv_quads);
|
||||
|
||||
/* This generates a vertex buffer, so we need to put a barrier on the vertex attribute array.
|
||||
* We also need it for subsequent compute shaders, so a barrier on the shader storage is also
|
||||
* needed. */
|
||||
GPU_memory_barrier(GPU_BARRIER_SHADER_STORAGE | GPU_BARRIER_VERTEX_ATTRIB_ARRAY);
|
||||
|
||||
/* Cleanup. */
|
||||
GPU_shader_unbind();
|
||||
}
|
||||
|
||||
void draw_subdiv_build_tris_buffer(const DRWSubdivCache *cache,
|
||||
GPUIndexBuf *subdiv_tris,
|
||||
const int material_count)
|
||||
|
@ -1828,6 +1851,11 @@ static bool draw_subdiv_create_requested_buffers(const Scene *scene,
|
|||
/* We can only evaluate limit normals if the patches are adaptive. */
|
||||
draw_cache->do_limit_normals = settings.is_adaptive;
|
||||
|
||||
draw_cache->use_custom_loop_normals = (smd->flags & eSubsurfModifierFlag_UseCustomNormals) &&
|
||||
(mesh_eval->flag & ME_AUTOSMOOTH) &&
|
||||
CustomData_has_layer(&mesh_eval->ldata,
|
||||
CD_CUSTOMLOOPNORMAL);
|
||||
|
||||
if (DRW_ibo_requested(mbc->buff.ibo.tris)) {
|
||||
draw_subdiv_cache_ensure_mat_offsets(draw_cache, mesh_eval, batch_cache->mat_len);
|
||||
}
|
||||
|
|
|
@ -67,6 +67,7 @@ typedef struct DRWSubdivCache {
|
|||
struct Subdiv *subdiv;
|
||||
bool optimal_display;
|
||||
bool do_limit_normals;
|
||||
bool use_custom_loop_normals;
|
||||
|
||||
/* Coordinates used to evaluate patches for UVs, positions, and normals. */
|
||||
struct GPUVertBuf *patch_coords;
|
||||
|
@ -186,6 +187,10 @@ void draw_subdiv_finalize_normals(const DRWSubdivCache *cache,
|
|||
struct GPUVertBuf *subdiv_loop_subdiv_vert_index,
|
||||
struct GPUVertBuf *pos_nor);
|
||||
|
||||
void draw_subdiv_finalize_custom_normals(const DRWSubdivCache *cache,
|
||||
GPUVertBuf *src_custom_normals,
|
||||
GPUVertBuf *pos_nor);
|
||||
|
||||
void draw_subdiv_extract_pos_nor(const DRWSubdivCache *cache,
|
||||
struct GPUVertBuf *pos_nor,
|
||||
bool do_limit_normals);
|
||||
|
|
|
@ -216,6 +216,16 @@ static GPUVertFormat *get_normals_format()
|
|||
return &format;
|
||||
}
|
||||
|
||||
static GPUVertFormat *get_custom_normals_format()
|
||||
{
|
||||
static GPUVertFormat format = {0};
|
||||
if (format.attr_len == 0) {
|
||||
GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
|
||||
GPU_vertformat_alias_add(&format, "lnor");
|
||||
}
|
||||
return &format;
|
||||
}
|
||||
|
||||
static void extract_pos_nor_init_subdiv(const DRWSubdivCache *subdiv_cache,
|
||||
const MeshRenderData *mr,
|
||||
struct MeshBatchCache *UNUSED(cache),
|
||||
|
@ -223,7 +233,8 @@ static void extract_pos_nor_init_subdiv(const DRWSubdivCache *subdiv_cache,
|
|||
void *UNUSED(data))
|
||||
{
|
||||
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buffer);
|
||||
const bool do_limit_normals = subdiv_cache->do_limit_normals;
|
||||
const bool do_limit_normals = subdiv_cache->do_limit_normals &&
|
||||
!subdiv_cache->use_custom_loop_normals;
|
||||
|
||||
/* Initialize the vertex buffer, it was already allocated. */
|
||||
GPU_vertbuf_init_build_on_device(
|
||||
|
@ -231,7 +242,31 @@ static void extract_pos_nor_init_subdiv(const DRWSubdivCache *subdiv_cache,
|
|||
|
||||
draw_subdiv_extract_pos_nor(subdiv_cache, vbo, do_limit_normals);
|
||||
|
||||
if (!do_limit_normals) {
|
||||
if (subdiv_cache->use_custom_loop_normals) {
|
||||
Mesh *coarse_mesh = subdiv_cache->mesh;
|
||||
float(*lnors)[3] = static_cast<float(*)[3]>(
|
||||
CustomData_get_layer(&coarse_mesh->ldata, CD_NORMAL));
|
||||
BLI_assert(lnors != NULL);
|
||||
|
||||
GPUVertBuf *src_custom_normals = GPU_vertbuf_calloc();
|
||||
GPU_vertbuf_init_with_format(src_custom_normals, get_custom_normals_format());
|
||||
GPU_vertbuf_data_alloc(src_custom_normals, coarse_mesh->totloop);
|
||||
|
||||
memcpy(
|
||||
GPU_vertbuf_get_data(src_custom_normals), lnors, sizeof(float[3]) * coarse_mesh->totloop);
|
||||
|
||||
GPUVertBuf *dst_custom_normals = GPU_vertbuf_calloc();
|
||||
GPU_vertbuf_init_build_on_device(
|
||||
dst_custom_normals, get_custom_normals_format(), subdiv_cache->num_subdiv_loops);
|
||||
|
||||
draw_subdiv_interp_custom_data(subdiv_cache, src_custom_normals, dst_custom_normals, 3, 0);
|
||||
|
||||
draw_subdiv_finalize_custom_normals(subdiv_cache, dst_custom_normals, vbo);
|
||||
|
||||
GPU_vertbuf_discard(src_custom_normals);
|
||||
GPU_vertbuf_discard(dst_custom_normals);
|
||||
}
|
||||
else if (!do_limit_normals) {
|
||||
/* We cannot evaluate vertex normals using the limit surface, so compute them manually. */
|
||||
GPUVertBuf *subdiv_loop_subdiv_vert_index = draw_subdiv_build_origindex_buffer(
|
||||
subdiv_cache->subdiv_loop_subdiv_vert_index, subdiv_cache->num_subdiv_loops);
|
||||
|
|
|
@ -1,6 +1,18 @@
|
|||
|
||||
/* To be compile with common_subdiv_lib.glsl */
|
||||
|
||||
#ifdef CUSTOM_NORMALS
|
||||
struct CustomNormal {
|
||||
float x;
|
||||
float y;
|
||||
float z;
|
||||
};
|
||||
|
||||
layout(std430, binding = 0) readonly buffer inputNormals
|
||||
{
|
||||
CustomNormal custom_normals[];
|
||||
};
|
||||
#else
|
||||
layout(std430, binding = 0) readonly buffer inputNormals
|
||||
{
|
||||
vec3 vertex_normals[];
|
||||
|
@ -10,6 +22,7 @@ layout(std430, binding = 1) readonly buffer inputSubdivVertLoopMap
|
|||
{
|
||||
uint vert_loop_map[];
|
||||
};
|
||||
#endif
|
||||
|
||||
layout(std430, binding = 2) buffer outputPosNor
|
||||
{
|
||||
|
@ -26,9 +39,17 @@ void main()
|
|||
|
||||
uint start_loop_index = quad_index * 4;
|
||||
|
||||
#ifdef CUSTOM_NORMALS
|
||||
for (int i = 0; i < 4; i++) {
|
||||
CustomNormal custom_normal = custom_normals[start_loop_index + i];
|
||||
vec3 nor = vec3(custom_normal.x, custom_normal.y, custom_normal.z);
|
||||
set_vertex_nor(pos_nor[start_loop_index + i], normalize(nor));
|
||||
}
|
||||
#else
|
||||
for (int i = 0; i < 4; i++) {
|
||||
uint subdiv_vert_index = vert_loop_map[start_loop_index + i];
|
||||
vec3 nor = vertex_normals[subdiv_vert_index];
|
||||
set_vertex_nor(pos_nor[start_loop_index + i], nor);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue