DrawManager: High quality normals for non meshes

This adds high quality normals for non meshes. These include

* Volumetric Object Wireframe
* Metaballs
* Extracted Curves
* Curves in edit mode

This is in preparation to fix a regression in recent AMD
drivers where the `GL_INT_2_10_10_10_REV` data type isn't
working in Polaris cards.
This commit is contained in:
Jeroen Bakker 2020-12-18 16:06:26 +01:00
parent 17be2149a8
commit d11a87b88c
Notes: blender-bot 2023-02-13 22:19:24 +01:00
Referenced by issue #84460, Missing surface in 3D viewport with AMD GPU
Referenced by issue #83216, Potential candidates for corrective releases
Referenced by issue #82856, After updating the video card drivers to 20.11.2, interface artifacts appeared.
Referenced by issue #77348, Blender LTS: Maintenance Task 2.83
7 changed files with 226 additions and 89 deletions

View File

@ -3590,7 +3590,7 @@ void drw_batch_cache_generate_requested(Object *ob)
DRW_mesh_batch_cache_create_requested(
DST.task_graph, ob, mesh_eval, scene, is_paint_mode, use_hide);
}
DRW_curve_batch_cache_create_requested(ob);
DRW_curve_batch_cache_create_requested(ob, scene);
break;
/* TODO all cases */
default:

View File

@ -87,7 +87,7 @@ void DRW_mesh_batch_cache_free_old(struct Mesh *me, int ctime);
void DRW_vertbuf_create_wiredata(struct GPUVertBuf *vbo, const int vert_len);
/* Curve */
void DRW_curve_batch_cache_create_requested(struct Object *ob);
void DRW_curve_batch_cache_create_requested(struct Object *ob, const struct Scene *scene);
int DRW_curve_material_count_get(struct Curve *cu);
@ -116,12 +116,15 @@ struct GPUBatch *DRW_metaball_batch_cache_get_edge_detection(struct Object *ob,
bool *r_is_manifold);
/* DispList */
void DRW_displist_vertbuf_create_pos_and_nor(struct ListBase *lb, struct GPUVertBuf *vbo);
void DRW_displist_vertbuf_create_pos_and_nor(struct ListBase *lb,
struct GPUVertBuf *vbo,
const struct Scene *scene);
void DRW_displist_vertbuf_create_wiredata(struct ListBase *lb, struct GPUVertBuf *vbo);
void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(struct ListBase *lb,
struct GPUVertBuf *vbo_pos_nor,
struct GPUVertBuf *vbo_uv,
struct GPUVertBuf *vbo_tan);
struct GPUVertBuf *vbo_tan,
const struct Scene *scene);
void DRW_displist_indexbuf_create_lines_in_order(struct ListBase *lb, struct GPUIndexBuf *ibo);
void DRW_displist_indexbuf_create_triangles_in_order(struct ListBase *lb, struct GPUIndexBuf *ibo);
void DRW_displist_indexbuf_create_triangles_loop_split_by_material(struct ListBase *lb,

View File

@ -630,11 +630,18 @@ static void curve_create_curves_lines(CurveRenderData *rdata, GPUIndexBuf *ibo_c
GPU_indexbuf_build_in_place(&elb, ibo_curve_lines);
}
static void curve_create_edit_curves_nor(CurveRenderData *rdata, GPUVertBuf *vbo_curves_nor)
static void curve_create_edit_curves_nor(CurveRenderData *rdata,
GPUVertBuf *vbo_curves_nor,
const Scene *scene)
{
const bool do_hq_normals = (scene->r.perf_flag & SCE_PERF_HQ_NORMALS) != 0;
static GPUVertFormat format = {0};
static GPUVertFormat format_hq = {0};
/* TODO(jeroen): add support for high quality normals */
static struct {
uint pos, nor, tan, rad;
uint pos_hq, nor_hq, tan_hq, rad_hq;
} attr_id;
if (format.attr_len == 0) {
/* initialize vertex formats */
@ -644,17 +651,31 @@ static void curve_create_edit_curves_nor(CurveRenderData *rdata, GPUVertBuf *vbo
&format, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
attr_id.tan = GPU_vertformat_attr_add(
&format, "tan", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
attr_id.pos_hq = GPU_vertformat_attr_add(&format_hq, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.rad_hq = GPU_vertformat_attr_add(&format_hq, "rad", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
attr_id.nor_hq = GPU_vertformat_attr_add(
&format_hq, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
attr_id.tan_hq = GPU_vertformat_attr_add(
&format_hq, "tan", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
const GPUVertFormat *format_ptr = do_hq_normals ? &format_hq : &format;
int verts_len_capacity = curve_render_data_normal_len_get(rdata) * 2;
int vbo_len_used = 0;
GPU_vertbuf_init_with_format(vbo_curves_nor, &format);
GPU_vertbuf_init_with_format(vbo_curves_nor, format_ptr);
GPU_vertbuf_data_alloc(vbo_curves_nor, verts_len_capacity);
const BevList *bl;
const Nurb *nu;
const uint pos_id = do_hq_normals ? attr_id.pos_hq : attr_id.pos;
const uint nor_id = do_hq_normals ? attr_id.nor_hq : attr_id.nor;
const uint tan_id = do_hq_normals ? attr_id.tan_hq : attr_id.tan;
const uint rad_id = do_hq_normals ? attr_id.rad_hq : attr_id.rad;
for (bl = rdata->ob_curve_cache->bev.first, nu = rdata->nurbs->first; nu && bl;
bl = bl->next, nu = nu->next) {
const BevPoint *bevp = bl->bevpoints;
@ -665,14 +686,15 @@ static void curve_create_edit_curves_nor(CurveRenderData *rdata, GPUVertBuf *vbo
float nor[3] = {1.0f, 0.0f, 0.0f};
mul_qt_v3(bevp->quat, nor);
GPUPackedNormal pnor = GPU_normal_convert_i10_v3(nor);
GPUPackedNormal ptan = GPU_normal_convert_i10_v3(bevp->dir);
GPUNormal pnor;
GPUNormal ptan;
GPU_normal_convert_v3(&pnor, nor, do_hq_normals);
GPU_normal_convert_v3(&ptan, bevp->dir, do_hq_normals);
/* Only set attributes for one vertex. */
GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.pos, vbo_len_used, bevp->vec);
GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.rad, vbo_len_used, &bevp->radius);
GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.nor, vbo_len_used, &pnor);
GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.tan, vbo_len_used, &ptan);
GPU_vertbuf_attr_set(vbo_curves_nor, pos_id, vbo_len_used, bevp->vec);
GPU_vertbuf_attr_set(vbo_curves_nor, rad_id, vbo_len_used, &bevp->radius);
GPU_vertbuf_attr_set(vbo_curves_nor, nor_id, vbo_len_used, &pnor);
GPU_vertbuf_attr_set(vbo_curves_nor, tan_id, vbo_len_used, &ptan);
vbo_len_used++;
/* Skip the other vertex (it does not need to be offsetted). */
@ -941,7 +963,7 @@ int DRW_curve_material_count_get(Curve *cu)
/** \name Grouped batch generation
* \{ */
void DRW_curve_batch_cache_create_requested(Object *ob)
void DRW_curve_batch_cache_create_requested(Object *ob, const struct Scene *scene)
{
BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF, OB_FONT));
@ -1047,7 +1069,7 @@ void DRW_curve_batch_cache_create_requested(Object *ob)
/* Generate VBOs */
if (DRW_vbo_requested(cache->ordered.pos_nor)) {
DRW_displist_vertbuf_create_pos_and_nor(lb, cache->ordered.pos_nor);
DRW_displist_vertbuf_create_pos_and_nor(lb, cache->ordered.pos_nor, scene);
}
if (DRW_vbo_requested(cache->ordered.edge_fac)) {
DRW_displist_vertbuf_create_wiredata(lb, cache->ordered.edge_fac);
@ -1059,7 +1081,7 @@ void DRW_curve_batch_cache_create_requested(Object *ob)
if (DRW_vbo_requested(cache->ordered.loop_pos_nor) ||
DRW_vbo_requested(cache->ordered.loop_uv) || DRW_vbo_requested(cache->ordered.loop_tan)) {
DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(
lb, cache->ordered.loop_pos_nor, cache->ordered.loop_uv, cache->ordered.loop_tan);
lb, cache->ordered.loop_pos_nor, cache->ordered.loop_uv, cache->ordered.loop_tan, scene);
}
if (DRW_ibo_requested(cache->surf_per_mat_tris[0])) {
@ -1087,7 +1109,7 @@ void DRW_curve_batch_cache_create_requested(Object *ob)
rdata, cache->edit.pos, cache->edit.data, cache->ibo.edit_verts, cache->ibo.edit_lines);
}
if (DRW_vbo_requested(cache->edit.curves_nor)) {
curve_create_edit_curves_nor(rdata, cache->edit.curves_nor);
curve_create_edit_curves_nor(rdata, cache->edit.curves_nor, scene);
}
curve_render_data_free(rdata);

View File

@ -32,6 +32,7 @@
#include "BLI_utildefines.h"
#include "DNA_curve_types.h"
#include "DNA_scene_types.h"
#include "BKE_displist.h"
#include "BKE_displist_tangent.h"
@ -174,20 +175,31 @@ static int displist_indexbufbuilder_tess_set(
return v_idx;
}
void DRW_displist_vertbuf_create_pos_and_nor(ListBase *lb, GPUVertBuf *vbo)
void DRW_displist_vertbuf_create_pos_and_nor(ListBase *lb, GPUVertBuf *vbo, const Scene *scene)
{
const bool do_hq_normals = (scene->r.perf_flag & SCE_PERF_HQ_NORMALS) != 0;
static GPUVertFormat format = {0};
static GPUVertFormat format_hq = {0};
static struct {
uint pos, nor;
uint pos_hq, nor_hq;
} attr_id;
if (format.attr_len == 0) {
/* initialize vertex format */
attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.nor = GPU_vertformat_attr_add(
&format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
/* initialize vertex format */
attr_id.pos_hq = GPU_vertformat_attr_add(&format_hq, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.nor_hq = GPU_vertformat_attr_add(
&format_hq, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
GPU_vertbuf_init_with_format(vbo, &format);
uint pos_id = do_hq_normals ? attr_id.pos_hq : attr_id.pos;
uint nor_id = do_hq_normals ? attr_id.nor_hq : attr_id.nor;
GPU_vertbuf_init_with_format(vbo, do_hq_normals ? &format_hq : &format);
GPU_vertbuf_data_alloc(vbo, curve_render_surface_vert_len_get(lb));
BKE_displist_normals_add(lb);
@ -200,10 +212,11 @@ void DRW_displist_vertbuf_create_pos_and_nor(ListBase *lb, GPUVertBuf *vbo)
const float *fp_no = dl->nors;
const int vbo_end = vbo_len_used + dl_vert_len(dl);
while (vbo_len_used < vbo_end) {
GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, fp_co);
GPU_vertbuf_attr_set(vbo, pos_id, vbo_len_used, fp_co);
if (fp_no) {
GPUPackedNormal vnor_pack = GPU_normal_convert_i10_v3(fp_no);
GPU_vertbuf_attr_set(vbo, attr_id.nor, vbo_len_used, &vnor_pack);
GPUNormal vnor_pack;
GPU_normal_convert_v3(&vnor_pack, fp_no, do_hq_normals);
GPU_vertbuf_attr_set(vbo, nor_id, vbo_len_used, &vnor_pack);
if (ndata_is_single == false) {
fp_no += 3;
}
@ -367,6 +380,24 @@ static void surf_uv_quad(const DispList *dl, const uint quad[4], float r_uv[4][2
}
}
static void displist_vertbuf_attr_set_nor(GPUVertBufRaw *step,
const GPUNormal *n1,
const GPUNormal *n2,
const GPUNormal *n3,
const bool do_hq_normals)
{
if (do_hq_normals) {
copy_v3_v3_short(GPU_vertbuf_raw_step(step), n1->high);
copy_v3_v3_short(GPU_vertbuf_raw_step(step), n2->high);
copy_v3_v3_short(GPU_vertbuf_raw_step(step), n3->high);
}
else {
*(GPUPackedNormal *)GPU_vertbuf_raw_step(step) = n1->low;
*(GPUPackedNormal *)GPU_vertbuf_raw_step(step) = n2->low;
*(GPUPackedNormal *)GPU_vertbuf_raw_step(step) = n3->low;
}
}
static void displist_vertbuf_attr_set_tri_pos_nor_uv(GPUVertBufRaw *pos_step,
GPUVertBufRaw *nor_step,
GPUVertBufRaw *uv_step,
@ -374,36 +405,30 @@ static void displist_vertbuf_attr_set_tri_pos_nor_uv(GPUVertBufRaw *pos_step,
const float v1[3],
const float v2[3],
const float v3[3],
const GPUPackedNormal *n1,
const GPUPackedNormal *n2,
const GPUPackedNormal *n3,
const GPUPackedNormal *t1,
const GPUPackedNormal *t2,
const GPUPackedNormal *t3,
const GPUNormal *n1,
const GPUNormal *n2,
const GPUNormal *n3,
const GPUNormal *t1,
const GPUNormal *t2,
const GPUNormal *t3,
const float uv1[2],
const float uv2[2],
const float uv3[2])
const float uv3[2],
const bool do_hq_normals)
{
if (pos_step->size != 0) {
copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v1);
copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v2);
copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v3);
*(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = *n1;
*(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = *n2;
*(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = *n3;
displist_vertbuf_attr_set_nor(nor_step, n1, n2, n3, do_hq_normals);
}
if (uv_step->size != 0) {
normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv1);
normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv2);
normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv3);
}
if (tan_step->size != 0) {
*(GPUPackedNormal *)GPU_vertbuf_raw_step(tan_step) = *t1;
*(GPUPackedNormal *)GPU_vertbuf_raw_step(tan_step) = *t2;
*(GPUPackedNormal *)GPU_vertbuf_raw_step(tan_step) = *t3;
displist_vertbuf_attr_set_nor(tan_step, t1, t2, t3, do_hq_normals);
}
}
@ -457,13 +482,19 @@ static void displist_surf_fnors_ensure(const DispList *dl, float (**fnors)[3])
void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
GPUVertBuf *vbo_pos_nor,
GPUVertBuf *vbo_uv,
GPUVertBuf *vbo_tan)
GPUVertBuf *vbo_tan,
const Scene *scene)
{
const bool do_hq_normals = (scene->r.perf_flag & SCE_PERF_HQ_NORMALS) != 0;
static GPUVertFormat format_pos_nor = {0};
static GPUVertFormat format_pos_nor_hq = {0};
static GPUVertFormat format_uv = {0};
static GPUVertFormat format_tan = {0};
static GPUVertFormat format_tan_hq = {0};
static struct {
uint pos, nor, uv, tan;
uint pos_hq, nor_hq, tan_hq;
} attr_id;
if (format_pos_nor.attr_len == 0) {
/* initialize vertex format */
@ -471,6 +502,10 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
&format_pos_nor, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.nor = GPU_vertformat_attr_add(
&format_pos_nor, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
attr_id.pos_hq = GPU_vertformat_attr_add(
&format_pos_nor_hq, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.nor_hq = GPU_vertformat_attr_add(
&format_pos_nor_hq, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
/* UVs are in [0..1] range. We can compress them. */
attr_id.uv = GPU_vertformat_attr_add(
@ -480,7 +515,13 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
attr_id.tan = GPU_vertformat_attr_add(
&format_tan, "t", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertformat_alias_add(&format_tan, "at");
attr_id.tan_hq = GPU_vertformat_attr_add(
&format_tan_hq, "t", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertformat_alias_add(&format_tan_hq, "at");
}
uint pos_id = do_hq_normals ? attr_id.pos_hq : attr_id.pos;
uint nor_id = do_hq_normals ? attr_id.nor_hq : attr_id.nor;
uint tan_id = do_hq_normals ? attr_id.tan_hq : attr_id.tan;
int vbo_len_capacity = curve_render_surface_tri_len_get(lb) * 3;
@ -490,10 +531,11 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
GPUVertBufRaw tan_step = {0};
if (DRW_TEST_ASSIGN_VBO(vbo_pos_nor)) {
GPU_vertbuf_init_with_format(vbo_pos_nor, &format_pos_nor);
GPU_vertbuf_init_with_format(vbo_pos_nor,
do_hq_normals ? &format_pos_nor_hq : &format_pos_nor);
GPU_vertbuf_data_alloc(vbo_pos_nor, vbo_len_capacity);
GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.pos, &pos_step);
GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.nor, &nor_step);
GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, pos_id, &pos_step);
GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, nor_id, &nor_step);
}
if (DRW_TEST_ASSIGN_VBO(vbo_uv)) {
GPU_vertbuf_init_with_format(vbo_uv, &format_uv);
@ -501,9 +543,9 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
GPU_vertbuf_attr_get_raw_data(vbo_uv, attr_id.uv, &uv_step);
}
if (DRW_TEST_ASSIGN_VBO(vbo_tan)) {
GPU_vertbuf_init_with_format(vbo_tan, &format_tan);
GPU_vertbuf_init_with_format(vbo_tan, do_hq_normals ? &format_tan_hq : &format_tan);
GPU_vertbuf_data_alloc(vbo_tan, vbo_len_capacity);
GPU_vertbuf_attr_get_raw_data(vbo_tan, attr_id.tan, &tan_step);
GPU_vertbuf_attr_get_raw_data(vbo_tan, tan_id, &tan_step);
}
BKE_displist_normals_add(lb);
@ -518,16 +560,25 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
if (dl->type == DL_INDEX3) {
/* Currently 'DL_INDEX3' is always a flat surface with a single normal. */
const GPUPackedNormal pnor = GPU_normal_convert_i10_v3(dl->nors);
GPUPackedNormal ptan = {0, 0, 0, 1};
GPUNormal tangent_packed;
GPUNormal normal_packed;
GPU_normal_convert_v3(&normal_packed, dl->nors, do_hq_normals);
if (vbo_tan) {
float tan[4];
float(*tan_ptr)[4] = &tan;
BKE_displist_tangent_calc(dl, NULL, &tan_ptr);
ptan = GPU_normal_convert_i10_v3(tan);
ptan.w = (tan[3] > 0.0) ? 1 : -2;
GPU_normal_convert_v3(&tangent_packed, tan, do_hq_normals);
normal_float_to_short_v3(tangent_packed.high, tan);
}
else {
if (do_hq_normals) {
tangent_packed.high[0] = 0;
tangent_packed.high[1] = 0;
tangent_packed.high[2] = 0;
}
else {
tangent_packed.low = (GPUPackedNormal){0, 0, 0, 1};
}
}
const float x_max = (float)(dl->nr - 1);
@ -547,15 +598,16 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
verts[idx[0]],
verts[idx[2]],
verts[idx[1]],
&pnor,
&pnor,
&pnor,
&ptan,
&ptan,
&ptan,
&normal_packed,
&normal_packed,
&normal_packed,
&tangent_packed,
&tangent_packed,
&tangent_packed,
uv[0],
uv[2],
uv[1]);
uv[1],
do_hq_normals);
}
}
else if (dl->type == DL_SURF) {
@ -574,24 +626,23 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
if (vbo_uv) {
surf_uv_quad(dl, quad, uv);
}
GPUNormal pnors_quad[4];
GPUNormal ptans_quad[4];
GPUPackedNormal pnors_quad[4];
if (is_smooth) {
for (int j = 0; j < 4; j++) {
pnors_quad[j] = GPU_normal_convert_i10_v3(nors[quad[j]]);
GPU_normal_convert_v3(&pnors_quad[j], nors[quad[j]], do_hq_normals);
}
}
else {
pnors_quad[0] = GPU_normal_convert_i10_v3(fnors[quad_index]);
GPU_normal_convert_v3(&pnors_quad[0], fnors[quad_index], do_hq_normals);
pnors_quad[1] = pnors_quad[2] = pnors_quad[3] = pnors_quad[0];
}
GPUPackedNormal ptans_quad[4];
if (vbo_tan) {
for (int j = 0; j < 4; j++) {
float *tan = tangents[quad_index * 4 + j];
ptans_quad[j] = GPU_normal_convert_i10_v3(tan);
ptans_quad[j].w = (tan[3] > 0.0f) ? 1 : -2;
GPU_normal_convert_v3(&ptans_quad[j], tan, do_hq_normals);
}
}
@ -610,7 +661,8 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
&ptans_quad[1],
uv[2],
uv[0],
uv[1]);
uv[1],
do_hq_normals);
displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
&nor_step,
@ -627,7 +679,8 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
&ptans_quad[3],
uv[0],
uv[2],
uv[3]);
uv[3],
do_hq_normals);
}
SURFACE_QUAD_ITER_END
@ -643,12 +696,12 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
for (int i = 0; i < i_end; i++, idx += 4) {
const bool is_tri = idx[2] != idx[3];
GPUPackedNormal ptan = {0};
GPUPackedNormal pnors_idx[4];
GPUNormal ptan = {0};
GPUNormal pnors_idx[4];
if (is_smooth) {
int idx_len = is_tri ? 3 : 4;
for (int j = 0; j < idx_len; j++) {
pnors_idx[j] = GPU_normal_convert_i10_v3(nors[idx[j]]);
GPU_normal_convert_v3(&pnors_idx[j], nors[idx[j]], do_hq_normals);
}
}
else {
@ -659,7 +712,7 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
else {
normal_quad_v3(nor_flat, verts[idx[0]], verts[idx[1]], verts[idx[2]], verts[idx[3]]);
}
pnors_idx[0] = GPU_normal_convert_i10_v3(nor_flat);
GPU_normal_convert_v3(&pnors_idx[0], nor_flat, do_hq_normals);
pnors_idx[1] = pnors_idx[2] = pnors_idx[3] = pnors_idx[0];
}
@ -678,7 +731,8 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
&ptan,
uv[0],
uv[2],
uv[1]);
uv[1],
do_hq_normals);
if (is_tri) {
displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
@ -696,7 +750,8 @@ void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
&ptan,
uv[2],
uv[0],
uv[3]);
uv[3],
do_hq_normals);
}
}
}

View File

@ -36,6 +36,7 @@
#include "GPU_batch.h"
#include "DRW_render.h"
#include "draw_cache_impl.h" /* own include */
static void metaball_batch_cache_clear(MetaBall *mb);
@ -151,12 +152,14 @@ void DRW_mball_batch_cache_free(MetaBall *mb)
MEM_SAFE_FREE(mb->batch_cache);
}
static GPUVertBuf *mball_batch_cache_get_pos_and_normals(Object *ob, MetaBallBatchCache *cache)
static GPUVertBuf *mball_batch_cache_get_pos_and_normals(Object *ob,
MetaBallBatchCache *cache,
const struct Scene *scene)
{
if (cache->pos_nor_in_order == NULL) {
ListBase *lb = &ob->runtime.curve_cache->disp;
cache->pos_nor_in_order = GPU_vertbuf_calloc();
DRW_displist_vertbuf_create_pos_and_nor(lb, cache->pos_nor_in_order);
DRW_displist_vertbuf_create_pos_and_nor(lb, cache->pos_nor_in_order, scene);
}
return cache->pos_nor_in_order;
}
@ -184,13 +187,15 @@ GPUBatch *DRW_metaball_batch_cache_get_triangles_with_normals(Object *ob)
MetaBall *mb = ob->data;
MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
const DRWContextState *draw_ctx = DRW_context_state_get();
const struct Scene *scene = draw_ctx->scene;
if (cache->batch == NULL) {
ListBase *lb = &ob->runtime.curve_cache->disp;
GPUIndexBuf *ibo = GPU_indexbuf_calloc();
DRW_displist_indexbuf_create_triangles_in_order(lb, ibo);
cache->batch = GPU_batch_create_ex(GPU_PRIM_TRIS,
mball_batch_cache_get_pos_and_normals(ob, cache),
mball_batch_cache_get_pos_and_normals(ob, cache, scene),
ibo,
GPU_BATCH_OWNS_INDEX);
}
@ -230,6 +235,8 @@ GPUBatch *DRW_metaball_batch_cache_get_wireframes_face(Object *ob)
MetaBall *mb = ob->data;
MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
const DRWContextState *draw_ctx = DRW_context_state_get();
const struct Scene *scene = draw_ctx->scene;
if (cache->face_wire.batch == NULL) {
ListBase *lb = &ob->runtime.curve_cache->disp;
@ -240,10 +247,11 @@ GPUBatch *DRW_metaball_batch_cache_get_wireframes_face(Object *ob)
GPUIndexBuf *ibo = GPU_indexbuf_calloc();
DRW_displist_indexbuf_create_lines_in_order(lb, ibo);
cache->face_wire.batch = GPU_batch_create_ex(GPU_PRIM_LINES,
mball_batch_cache_get_pos_and_normals(ob, cache),
ibo,
GPU_BATCH_OWNS_INDEX);
cache->face_wire.batch = GPU_batch_create_ex(
GPU_PRIM_LINES,
mball_batch_cache_get_pos_and_normals(ob, cache, scene),
ibo,
GPU_BATCH_OWNS_INDEX);
GPU_batch_vertbuf_add_ex(cache->face_wire.batch, vbo_wiredata, true);
}
@ -260,11 +268,14 @@ struct GPUBatch *DRW_metaball_batch_cache_get_edge_detection(struct Object *ob,
MetaBall *mb = ob->data;
MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
const DRWContextState *draw_ctx = DRW_context_state_get();
const struct Scene *scene = draw_ctx->scene;
if (cache->edge_detection == NULL) {
cache->edge_detection = GPU_batch_create(GPU_PRIM_LINES_ADJ,
mball_batch_cache_get_pos_and_normals(ob, cache),
mball_batch_cache_get_edges_adj_lines(ob, cache));
cache->edge_detection = GPU_batch_create(
GPU_PRIM_LINES_ADJ,
mball_batch_cache_get_pos_and_normals(ob, cache, scene),
mball_batch_cache_get_edges_adj_lines(ob, cache));
}
if (r_is_manifold) {
@ -282,8 +293,10 @@ struct GPUVertBuf *DRW_mball_batch_cache_pos_vertbuf_get(Object *ob)
MetaBall *mb = ob->data;
MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
const DRWContextState *draw_ctx = DRW_context_state_get();
const struct Scene *scene = draw_ctx->scene;
return mball_batch_cache_get_pos_and_normals(ob, cache);
return mball_batch_cache_get_pos_and_normals(ob, cache, scene);
}
int DRW_metaball_material_count_get(MetaBall *mb)

View File

@ -145,25 +145,45 @@ void DRW_volume_batch_cache_free(Volume *volume)
volume_batch_cache_clear(volume);
MEM_SAFE_FREE(volume->batch_cache);
}
typedef struct VolumeWireframeUserData {
Volume *volume;
Scene *scene;
} VolumeWireframeUserData;
static void drw_volume_wireframe_cb(
void *userdata, float (*verts)[3], int (*edges)[2], int totvert, int totedge)
{
Volume *volume = userdata;
VolumeWireframeUserData *data = userdata;
Scene *scene = data->scene;
Volume *volume = data->volume;
VolumeBatchCache *cache = volume->batch_cache;
const bool do_hq_normals = (scene->r.perf_flag & SCE_PERF_HQ_NORMALS) != 0;
/* Create vertex buffer. */
static GPUVertFormat format = {0};
static uint pos_id, nor_id;
static GPUVertFormat format_hq = {0};
static struct {
uint pos_id, nor_id;
uint pos_hq_id, nor_hq_id;
} attr_id;
if (format.attr_len == 0) {
pos_id = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
nor_id = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
attr_id.pos_id = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.nor_id = GPU_vertformat_attr_add(
&format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
attr_id.pos_id = GPU_vertformat_attr_add(&format_hq, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.nor_id = GPU_vertformat_attr_add(
&format_hq, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
static float normal[3] = {1.0f, 0.0f, 0.0f};
GPUPackedNormal packed_normal = GPU_normal_convert_i10_v3(normal);
GPUNormal packed_normal;
GPU_normal_convert_v3(&packed_normal, normal, do_hq_normals);
uint pos_id = do_hq_normals ? attr_id.pos_hq_id : attr_id.pos_id;
uint nor_id = do_hq_normals ? attr_id.nor_hq_id : attr_id.nor_id;
cache->face_wire.pos_nor_in_order = GPU_vertbuf_create_with_format(&format);
cache->face_wire.pos_nor_in_order = GPU_vertbuf_create_with_format(do_hq_normals ? &format_hq :
&format);
GPU_vertbuf_data_alloc(cache->face_wire.pos_nor_in_order, totvert);
GPU_vertbuf_attr_fill(cache->face_wire.pos_nor_in_order, pos_id, verts);
GPU_vertbuf_attr_fill_stride(cache->face_wire.pos_nor_in_order, nor_id, 0, &packed_normal);
@ -209,7 +229,11 @@ GPUBatch *DRW_volume_batch_cache_get_wireframes_face(Volume *volume)
}
/* Create wireframe from OpenVDB tree. */
BKE_volume_grid_wireframe(volume, volume_grid, drw_volume_wireframe_cb, volume);
const DRWContextState *draw_ctx = DRW_context_state_get();
VolumeWireframeUserData userdata;
userdata.volume = volume;
userdata.scene = draw_ctx->scene;
BKE_volume_grid_wireframe(volume, volume_grid, drw_volume_wireframe_cb, &userdata);
}
return cache->face_wire.batch;

View File

@ -27,6 +27,7 @@
#include "BLI_assert.h"
#include "BLI_compiler_compat.h"
#include "BLI_math_geom.h"
#include "GPU_common.h"
#ifdef __cplusplus
@ -140,6 +141,13 @@ typedef struct GPUPackedNormal {
int w : 2; /* 0 by default, can manually set to { -2, -1, 0, 1 } */
} GPUPackedNormal;
typedef struct GPUNormal {
union {
GPUPackedNormal low;
short high[3];
};
} GPUNormal;
/* OpenGL ES packs in a different order as desktop GL but component conversion is the same.
* Of the code here, only struct GPUPackedNormal needs to change. */
@ -195,6 +203,18 @@ BLI_INLINE GPUPackedNormal GPU_normal_convert_i10_s3(const short data[3])
return n;
}
BLI_INLINE void GPU_normal_convert_v3(GPUNormal *gpu_normal,
const float data[3],
const bool do_hq_normals)
{
if (do_hq_normals) {
normal_float_to_short_v3(gpu_normal->high, data);
}
else {
gpu_normal->low = GPU_normal_convert_i10_v3(data);
}
}
#ifdef __cplusplus
}
#endif