Refactor: Evaluate surface objects as mesh components

This commit furthers some of the changes that were started in
rBb9febb54a492 and subsequent commits by changing the way surface
objects are presented to render engines and other users of evaluated
objects in the same way. Instead of presenting evaluated surface objects
as an `OB_SURF` object with an evaluated mesh, `OB_SURF` objects
can now have an evaluated geometry set, which uses the same system
as other object types to deal with multi-type evaluated data.

This clarification makes it more obvious that lots of code that dealt
with the `DispList` type isn't used. It wasn't before either, now it's
just *by design*. Over 1100 lines can be removed. The legacy curve
draw cache code is much simpler now too. The idea behind the further
removal of `DispList` is that it's better to focus optimization efforts
on a single mesh data structure.

One expected functional change is that the evaluated mesh from surface
objects can now be used in geometry nodes with the object info node.

Cycles and the OBJ IO tests had to be tweaked to avoid using evaluated
surface objects instead of the newly exposed mesh objects.

Differential Revision: https://developer.blender.org/D14550
This commit is contained in:
Hans Goudey 2022-04-05 11:30:49 -05:00
parent e513687288
commit edcbf741df
Notes: blender-bot 2023-02-14 00:20:19 +01:00
Referenced by issue #100706, Regression: Instanced NURBs or CURVES on mesh vertices are not visible in 3.2+
12 changed files with 19 additions and 1160 deletions

View File

@ -66,9 +66,8 @@ bool BlenderSync::object_is_geometry(BObjectInfo &b_ob_info)
}
/* Other object types that are not meshes but evaluate to meshes are presented to render engines
* as separate instance objects. Metaballs and surface objects have not been affected by that
* change yet. */
if (type == BL::Object::type_SURFACE || type == BL::Object::type_META) {
* as separate instance objects. Metaballs have not been affected by that change yet. */
if (type == BL::Object::type_META) {
return true;
}

View File

@ -1,17 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
/** \file
* \ingroup bke
*/
#ifdef __cplusplus
extern "C" {
#endif
void BKE_displist_tangent_calc(const DispList *dl, float (*fnormals)[3], float (**r_tangent)[4]);
#ifdef __cplusplus
}
#endif

View File

@ -121,7 +121,6 @@ set(SRC
intern/data_transfer.c
intern/deform.c
intern/displist.cc
intern/displist_tangent.c
intern/dynamicpaint.c
intern/editlattice.c
intern/editmesh.c
@ -357,7 +356,6 @@ set(SRC
BKE_data_transfer.h
BKE_deform.h
BKE_displist.h
BKE_displist_tangent.h
BKE_duplilist.h
BKE_dynamicpaint.h
BKE_editlattice.h

View File

@ -948,12 +948,11 @@ static void displist_surf_indices(DispList *dl)
}
}
static void evaluate_surface_object(Depsgraph *depsgraph,
const Scene *scene,
Object *ob,
const bool for_render,
ListBase *r_dispbase,
Mesh **r_final)
static GeometrySet evaluate_surface_object(Depsgraph *depsgraph,
const Scene *scene,
Object *ob,
const bool for_render,
ListBase *r_dispbase)
{
BLI_assert(ob->type == OB_SURF);
const Curve *cu = (const Curve *)ob->data;
@ -1036,8 +1035,7 @@ static void evaluate_surface_object(Depsgraph *depsgraph,
if (!geometry_set.has_mesh()) {
geometry_set.replace_mesh(BKE_mesh_new_nomain(0, 0, 0, 0, 0));
}
MeshComponent &mesh_component = geometry_set.get_component_for_write<MeshComponent>();
*r_final = mesh_component.release();
return geometry_set;
}
static void rotateBevelPiece(const Curve *cu,
@ -1483,9 +1481,8 @@ void BKE_displist_make_curveTypes(Depsgraph *depsgraph,
ListBase *dispbase = &ob->runtime.curve_cache->disp;
if (ob->type == OB_SURF) {
Mesh *mesh_eval;
evaluate_surface_object(depsgraph, scene, ob, for_render, dispbase, &mesh_eval);
BKE_object_eval_assign_data(ob, &mesh_eval->id, true);
GeometrySet geometry = evaluate_surface_object(depsgraph, scene, ob, for_render, dispbase);
ob->runtime.geometry_set_eval = new GeometrySet(std::move(geometry));
}
else {
GeometrySet geometry = evaluate_curve_type_object(depsgraph, scene, ob, for_render, dispbase);

View File

@ -1,269 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup bke
*/
#include "BLI_math.h"
#include "BLI_task.h"
#include "BKE_displist.h"
#include "BKE_displist_tangent.h"
#include "MEM_guardedalloc.h"
/* interface */
#include "mikktspace.h"
/* -------------------------------------------------------------------- */
/** \name Internal Types
* \{ */
typedef struct {
const DispList *dl;
float (*tangent)[4]; /* destination */
/** Face normal for flat shading. */
float (*fnormals)[3];
/** Use by surfaces. Size of the surface in faces. */
int u_len, v_len;
} SGLSLDisplistToTangent;
/** \} */
/* ---------------------------------------------------------------------- */
/** \name DL_INDEX3 tangents
* \{ */
static int dl3_ts_GetNumFaces(const SMikkTSpaceContext *pContext)
{
SGLSLDisplistToTangent *dlt = pContext->m_pUserData;
return dlt->dl->parts;
}
static int dl3_ts_GetNumVertsOfFace(const SMikkTSpaceContext *pContext, const int face_num)
{
UNUSED_VARS(pContext, face_num);
return 3;
}
static void dl3_ts_GetPosition(const SMikkTSpaceContext *pContext,
float r_co[3],
const int face_num,
const int vert_index)
{
SGLSLDisplistToTangent *dlt = pContext->m_pUserData;
const float(*verts)[3] = (float(*)[3])dlt->dl->verts;
const int(*idx)[3] = (int(*)[3])dlt->dl->index;
copy_v3_v3(r_co, verts[idx[face_num][vert_index]]);
}
static void dl3_ts_GetTextureCoordinate(const SMikkTSpaceContext *pContext,
float r_uv[2],
const int face_num,
const int vert_index)
{
SGLSLDisplistToTangent *dlt = pContext->m_pUserData;
const int(*idx)[3] = (int(*)[3])dlt->dl->index;
r_uv[0] = idx[face_num][vert_index] / (float)(dlt->dl->nr - 1);
r_uv[1] = 0.0f;
}
static void dl3_ts_GetNormal(const SMikkTSpaceContext *pContext,
float r_no[3],
const int face_num,
const int vert_index)
{
SGLSLDisplistToTangent *dlt = pContext->m_pUserData;
UNUSED_VARS(face_num, vert_index);
copy_v3_v3(r_no, dlt->dl->nors);
}
static void dl3_ts_SetTSpace(const SMikkTSpaceContext *pContext,
const float fvTangent[3],
const float fSign,
const int face_num,
const int vert_index)
{
SGLSLDisplistToTangent *dlt = pContext->m_pUserData;
UNUSED_VARS(face_num, vert_index);
copy_v3_v3(dlt->tangent[0], fvTangent);
dlt->tangent[0][3] = fSign;
}
/** \} */
/* ---------------------------------------------------------------------- */
/** \name DL_SURF tangents
* \{ */
static int dlsurf_ts_GetNumFaces(const SMikkTSpaceContext *pContext)
{
SGLSLDisplistToTangent *dlt = pContext->m_pUserData;
return dlt->v_len * dlt->u_len;
}
static int dlsurf_ts_GetNumVertsOfFace(const SMikkTSpaceContext *pContext, const int face_num)
{
UNUSED_VARS(pContext, face_num);
return 4;
}
static int face_to_vert_index(SGLSLDisplistToTangent *dlt,
const int face_num,
const int vert_index)
{
int u = face_num % dlt->u_len;
int v = face_num / dlt->u_len;
if (vert_index == 0) {
u += 1;
}
else if (vert_index == 1) {
u += 1;
v += 1;
}
else if (vert_index == 2) {
v += 1;
}
/* Cyclic correction. */
u = u % dlt->dl->nr;
v = v % dlt->dl->parts;
return v * dlt->dl->nr + u;
}
static void dlsurf_ts_GetPosition(const SMikkTSpaceContext *pContext,
float r_co[3],
const int face_num,
const int vert_index)
{
SGLSLDisplistToTangent *dlt = pContext->m_pUserData;
const float(*verts)[3] = (float(*)[3])dlt->dl->verts;
copy_v3_v3(r_co, verts[face_to_vert_index(dlt, face_num, vert_index)]);
}
static void dlsurf_ts_GetTextureCoordinate(const SMikkTSpaceContext *pContext,
float r_uv[2],
const int face_num,
const int vert_index)
{
SGLSLDisplistToTangent *dlt = pContext->m_pUserData;
int idx = face_to_vert_index(dlt, face_num, vert_index);
/* NOTE: For some reason the shading U and V are swapped compared to the
* one described in the surface format. */
r_uv[0] = (idx / dlt->dl->nr) / (float)(dlt->v_len);
r_uv[1] = (idx % dlt->dl->nr) / (float)(dlt->u_len);
if (r_uv[0] == 0.0f && ELEM(vert_index, 1, 2)) {
r_uv[0] = 1.0f;
}
if (r_uv[1] == 0.0f && ELEM(vert_index, 0, 1)) {
r_uv[1] = 1.0f;
}
}
static void dlsurf_ts_GetNormal(const SMikkTSpaceContext *pContext,
float r_no[3],
const int face_num,
const int vert_index)
{
SGLSLDisplistToTangent *dlt = pContext->m_pUserData;
const float(*nors)[3] = (float(*)[3])dlt->dl->nors;
if (dlt->fnormals) {
copy_v3_v3(r_no, dlt->fnormals[face_num]);
}
else {
copy_v3_v3(r_no, nors[face_to_vert_index(dlt, face_num, vert_index)]);
}
}
static void dlsurf_ts_SetTSpace(const SMikkTSpaceContext *pContext,
const float fvTangent[3],
const float fSign,
const int face_num,
const int vert_index)
{
SGLSLDisplistToTangent *dlt = pContext->m_pUserData;
UNUSED_VARS(face_num, vert_index);
float *r_tan = dlt->tangent[face_num * 4 + vert_index];
copy_v3_v3(r_tan, fvTangent);
r_tan[3] = fSign;
}
/** \} */
/* ---------------------------------------------------------------------- */
/** \name Entry point
* \{ */
void BKE_displist_tangent_calc(const DispList *dl, float (*fnormals)[3], float (**r_tangent)[4])
{
if (dl->type == DL_INDEX3) {
/* INDEX3 have only one tangent so we don't need actual allocation. */
BLI_assert(*r_tangent != NULL);
SGLSLDisplistToTangent mesh2tangent = {
.tangent = *r_tangent,
.dl = dl,
};
SMikkTSpaceContext sContext = {NULL};
SMikkTSpaceInterface sInterface = {NULL};
sContext.m_pUserData = &mesh2tangent;
sContext.m_pInterface = &sInterface;
sInterface.m_getNumFaces = dl3_ts_GetNumFaces;
sInterface.m_getNumVerticesOfFace = dl3_ts_GetNumVertsOfFace;
sInterface.m_getPosition = dl3_ts_GetPosition;
sInterface.m_getTexCoord = dl3_ts_GetTextureCoordinate;
sInterface.m_getNormal = dl3_ts_GetNormal;
sInterface.m_setTSpaceBasic = dl3_ts_SetTSpace;
/* 0 if failed */
genTangSpaceDefault(&sContext);
}
else if (dl->type == DL_SURF) {
SGLSLDisplistToTangent mesh2tangent = {
.dl = dl,
.u_len = dl->nr - ((dl->flag & DL_CYCL_U) ? 0 : 1),
.v_len = dl->parts - ((dl->flag & DL_CYCL_V) ? 0 : 1),
.fnormals = fnormals,
};
int loop_len = mesh2tangent.u_len * mesh2tangent.v_len * 4;
if (*r_tangent == NULL) {
*r_tangent = MEM_mallocN(sizeof(float[4]) * loop_len, "displist tangents");
}
mesh2tangent.tangent = *r_tangent;
SMikkTSpaceContext sContext = {NULL};
SMikkTSpaceInterface sInterface = {NULL};
sContext.m_pUserData = &mesh2tangent;
sContext.m_pInterface = &sInterface;
sInterface.m_getNumFaces = dlsurf_ts_GetNumFaces;
sInterface.m_getNumVerticesOfFace = dlsurf_ts_GetNumVertsOfFace;
sInterface.m_getPosition = dlsurf_ts_GetPosition;
sInterface.m_getTexCoord = dlsurf_ts_GetTextureCoordinate;
sInterface.m_getNormal = dlsurf_ts_GetNormal;
sInterface.m_setTSpaceBasic = dlsurf_ts_SetTSpace;
/* 0 if failed */
genTangSpaceDefault(&sContext);
}
else {
/* Unsupported. */
BLI_assert(0);
}
}
/** \} */

View File

@ -817,7 +817,7 @@ GPUBatch *DRW_cache_object_edge_detection_get(Object *ob, bool *r_is_manifold)
case OB_CURVES_LEGACY:
return NULL;
case OB_SURF:
return DRW_cache_surf_edge_detection_get(ob, r_is_manifold);
return NULL;
case OB_FONT:
return NULL;
case OB_MBALL:
@ -841,7 +841,7 @@ GPUBatch *DRW_cache_object_face_wireframe_get(Object *ob)
case OB_CURVES_LEGACY:
return NULL;
case OB_SURF:
return DRW_cache_surf_face_wireframe_get(ob);
return NULL;
case OB_FONT:
return NULL;
case OB_MBALL:
@ -868,7 +868,7 @@ GPUBatch *DRW_cache_object_loose_edges_get(struct Object *ob)
case OB_CURVES_LEGACY:
return NULL;
case OB_SURF:
return DRW_cache_surf_loose_edges_get(ob);
return NULL;
case OB_FONT:
return NULL;
case OB_MBALL:
@ -892,7 +892,7 @@ GPUBatch *DRW_cache_object_surface_get(Object *ob)
case OB_CURVES_LEGACY:
return NULL;
case OB_SURF:
return DRW_cache_surf_surface_get(ob);
return NULL;
case OB_FONT:
return NULL;
case OB_MBALL:
@ -919,7 +919,7 @@ GPUVertBuf *DRW_cache_object_pos_vertbuf_get(Object *ob)
case OB_CURVES_LEGACY:
case OB_SURF:
case OB_FONT:
return DRW_curve_batch_cache_pos_vertbuf_get(ob->data);
return NULL;
case OB_MBALL:
return DRW_mball_batch_cache_pos_vertbuf_get(ob);
case OB_CURVES:
@ -976,7 +976,7 @@ GPUBatch **DRW_cache_object_surface_material_get(struct Object *ob,
case OB_CURVES_LEGACY:
return NULL;
case OB_SURF:
return DRW_cache_surf_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
return NULL;
case OB_FONT:
return NULL;
case OB_MBALL:
@ -3003,87 +3003,13 @@ GPUBatch *DRW_cache_text_edge_wire_get(Object *ob)
/** \name Surface
* \{ */
GPUBatch *DRW_cache_surf_surface_get(Object *ob)
{
BLI_assert(ob->type == OB_SURF);
struct Curve *cu = ob->data;
struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh_no_subsurf(ob);
if (mesh_eval != NULL) {
return DRW_mesh_batch_cache_get_surface(mesh_eval);
}
return DRW_curve_batch_cache_get_triangles_with_normals(cu);
}
GPUBatch *DRW_cache_surf_edge_wire_get(Object *ob)
{
BLI_assert(ob->type == OB_SURF);
struct Curve *cu = ob->data;
struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh_no_subsurf(ob);
if (mesh_eval != NULL) {
return DRW_mesh_batch_cache_get_loose_edges(mesh_eval);
}
return DRW_curve_batch_cache_get_wire_edge(cu);
}
GPUBatch *DRW_cache_surf_face_wireframe_get(Object *ob)
{
BLI_assert(ob->type == OB_SURF);
struct Curve *cu = ob->data;
struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh_no_subsurf(ob);
if (mesh_eval != NULL) {
return DRW_mesh_batch_cache_get_wireframes_face(mesh_eval);
}
return DRW_curve_batch_cache_get_wireframes_face(cu);
}
GPUBatch *DRW_cache_surf_edge_detection_get(Object *ob, bool *r_is_manifold)
{
BLI_assert(ob->type == OB_SURF);
struct Curve *cu = ob->data;
struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh_no_subsurf(ob);
if (mesh_eval != NULL) {
return DRW_mesh_batch_cache_get_edge_detection(mesh_eval, r_is_manifold);
}
return DRW_curve_batch_cache_get_edge_detection(cu, r_is_manifold);
}
GPUBatch *DRW_cache_surf_loose_edges_get(Object *ob)
{
BLI_assert(ob->type == OB_SURF);
struct Curve *cu = ob->data;
struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh_no_subsurf(ob);
if (mesh_eval != NULL) {
return DRW_mesh_batch_cache_get_loose_edges(mesh_eval);
}
/* TODO */
UNUSED_VARS(cu);
return NULL;
}
GPUBatch **DRW_cache_surf_surface_shaded_get(Object *ob,
struct GPUMaterial **gpumat_array,
uint gpumat_array_len)
{
BLI_assert(ob->type == OB_SURF);
struct Curve *cu = ob->data;
struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh_no_subsurf(ob);
if (mesh_eval != NULL) {
return DRW_mesh_batch_cache_get_surface_shaded(ob, mesh_eval, gpumat_array, gpumat_array_len);
}
return DRW_curve_batch_cache_get_surface_shaded(cu, gpumat_array, gpumat_array_len);
}
/** \} */
/* -------------------------------------------------------------------- */
@ -3369,19 +3295,13 @@ GPUBatch *DRW_cache_cursor_get(bool crosshair_lines)
void drw_batch_cache_validate(Object *ob)
{
struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh_no_subsurf(ob);
switch (ob->type) {
case OB_MESH:
DRW_mesh_batch_cache_validate(ob, (Mesh *)ob->data);
break;
case OB_CURVES_LEGACY:
case OB_FONT:
DRW_curve_batch_cache_validate((Curve *)ob->data);
break;
case OB_SURF:
if (mesh_eval != NULL) {
DRW_mesh_batch_cache_validate(ob, mesh_eval);
}
DRW_curve_batch_cache_validate((Curve *)ob->data);
break;
case OB_MBALL:
@ -3418,7 +3338,6 @@ void drw_batch_cache_generate_requested(Object *ob)
DRW_object_use_hide_faces(ob)) ||
((mode == CTX_MODE_EDIT_MESH) && DRW_object_is_in_edit_mode(ob))));
struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh_no_subsurf(ob);
switch (ob->type) {
case OB_MESH:
DRW_mesh_batch_cache_create_requested(
@ -3426,13 +3345,7 @@ void drw_batch_cache_generate_requested(Object *ob)
break;
case OB_CURVES_LEGACY:
case OB_FONT:
DRW_curve_batch_cache_create_requested(ob, scene);
break;
case OB_SURF:
if (mesh_eval) {
DRW_mesh_batch_cache_create_requested(
DST.task_graph, ob, mesh_eval, scene, is_paint_mode, use_hide);
}
DRW_curve_batch_cache_create_requested(ob, scene);
break;
/* TODO: all cases. */
@ -3468,17 +3381,10 @@ void drw_batch_cache_generate_requested_delayed(Object *ob)
void DRW_batch_cache_free_old(Object *ob, int ctime)
{
struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh_no_subsurf(ob);
switch (ob->type) {
case OB_MESH:
DRW_mesh_batch_cache_free_old((Mesh *)ob->data, ctime);
break;
case OB_SURF:
if (mesh_eval) {
DRW_mesh_batch_cache_free_old(mesh_eval, ctime);
}
break;
/* TODO: all cases. */
default:
break;

View File

@ -186,17 +186,7 @@ struct GPUBatch *DRW_cache_text_edge_wire_get(struct Object *ob);
/* Surface */
struct GPUBatch *DRW_cache_surf_surface_get(struct Object *ob);
struct GPUBatch *DRW_cache_surf_edge_wire_get(struct Object *ob);
struct GPUBatch *DRW_cache_surf_loose_edges_get(struct Object *ob);
/* Return list of batches */
struct GPUBatch **DRW_cache_surf_surface_shaded_get(struct Object *ob,
struct GPUMaterial **gpumat_array,
uint gpumat_array_len);
struct GPUBatch *DRW_cache_surf_face_wireframe_get(struct Object *ob);
struct GPUBatch *DRW_cache_surf_edge_detection_get(struct Object *ob, bool *r_is_manifold);
/* Lattice */

View File

@ -104,16 +104,9 @@ int DRW_curve_material_count_get(struct Curve *cu);
struct GPUBatch *DRW_curve_batch_cache_get_wire_edge(struct Curve *cu);
struct GPUBatch *DRW_curve_batch_cache_get_normal_edge(struct Curve *cu);
struct GPUBatch *DRW_curve_batch_cache_get_edge_detection(struct Curve *cu, bool *r_is_manifold);
struct GPUBatch *DRW_curve_batch_cache_get_edit_edges(struct Curve *cu);
struct GPUBatch *DRW_curve_batch_cache_get_edit_verts(struct Curve *cu);
struct GPUBatch *DRW_curve_batch_cache_get_triangles_with_normals(struct Curve *cu);
struct GPUBatch **DRW_curve_batch_cache_get_surface_shaded(struct Curve *cu,
struct GPUMaterial **gpumat_array,
uint gpumat_array_len);
struct GPUBatch *DRW_curve_batch_cache_get_wireframes_face(struct Curve *cu);
/** \} */
/* -------------------------------------------------------------------- */
@ -141,16 +134,8 @@ void DRW_displist_vertbuf_create_pos_and_nor(struct ListBase *lb,
struct GPUVertBuf *vbo,
const struct Scene *scene);
void DRW_displist_vertbuf_create_wiredata(struct ListBase *lb, struct GPUVertBuf *vbo);
void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(struct ListBase *lb,
struct GPUVertBuf *vbo_pos_nor,
struct GPUVertBuf *vbo_uv,
struct GPUVertBuf *vbo_tan,
const struct Scene *scene);
void DRW_displist_indexbuf_create_lines_in_order(struct ListBase *lb, struct GPUIndexBuf *ibo);
void DRW_displist_indexbuf_create_triangles_in_order(struct ListBase *lb, struct GPUIndexBuf *ibo);
void DRW_displist_indexbuf_create_triangles_loop_split_by_material(struct ListBase *lb,
struct GPUIndexBuf **ibo_mat,
uint mat_len);
void DRW_displist_indexbuf_create_edges_adjacency_lines(struct ListBase *lb,
struct GPUIndexBuf *ibo,
bool *r_is_manifold);
@ -309,7 +294,6 @@ struct GPUBatch *DRW_mesh_batch_cache_get_edit_mesh_analysis(struct Mesh *me);
* \{ */
struct GPUVertBuf *DRW_mesh_batch_cache_pos_vertbuf_get(struct Mesh *me);
struct GPUVertBuf *DRW_curve_batch_cache_pos_vertbuf_get(struct Curve *cu);
struct GPUVertBuf *DRW_mball_batch_cache_pos_vertbuf_get(struct Object *ob);
int DRW_mesh_material_count_get(const struct Object *object, const struct Mesh *me);

View File

@ -288,107 +288,39 @@ static int curve_render_data_normal_len_get(const CurveRenderData *rdata)
return rdata->normal.len;
}
static void curve_cd_calc_used_gpu_layers(CustomDataMask *cd_layers,
struct GPUMaterial **gpumat_array,
int gpumat_array_len)
{
for (int i = 0; i < gpumat_array_len; i++) {
struct GPUMaterial *gpumat = gpumat_array[i];
if (gpumat == nullptr) {
continue;
}
ListBase gpu_attrs = GPU_material_attributes(gpumat);
LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
const char *name = gpu_attr->name;
int type = gpu_attr->type;
/* Curves cannot have named layers.
* NOTE: We could relax this assumption later. */
if (name[0] != '\0') {
continue;
}
if (type == CD_AUTO_FROM_NAME) {
type = CD_MTFACE;
}
switch (type) {
case CD_MTFACE:
*cd_layers |= CD_MASK_MLOOPUV;
break;
case CD_TANGENT:
*cd_layers |= CD_MASK_TANGENT;
break;
case CD_MCOL:
/* Curve object don't have Color data. */
break;
case CD_ORCO:
*cd_layers |= CD_MASK_ORCO;
break;
case CD_HAIRLENGTH:
*cd_layers |= CD_MASK_HAIRLENGTH;
break;
}
}
}
}
/* ---------------------------------------------------------------------- */
/* Curve GPUBatch Cache */
struct CurveBatchCache {
struct {
GPUVertBuf *pos_nor;
GPUVertBuf *edge_fac;
GPUVertBuf *curves_pos;
GPUVertBuf *loop_pos_nor;
GPUVertBuf *loop_uv;
GPUVertBuf *loop_tan;
} ordered;
struct {
/* Curve points. Aligned with ordered.pos_nor */
GPUVertBuf *curves_nor;
GPUVertBuf *curves_weight; /* TODO. */
/* Edit points (beztriples and bpoints) */
GPUVertBuf *pos;
GPUVertBuf *data;
} edit;
struct {
GPUIndexBuf *surfaces_tris;
GPUIndexBuf *surfaces_lines;
GPUIndexBuf *curves_lines;
GPUIndexBuf *edges_adj_lines;
/* Edit mode */
GPUIndexBuf *edit_verts;
GPUIndexBuf *edit_lines;
} ibo;
struct {
GPUBatch *surfaces;
GPUBatch *surfaces_edges;
GPUBatch *curves;
/* control handles and vertices */
GPUBatch *edit_edges;
GPUBatch *edit_verts;
GPUBatch *edit_normals;
GPUBatch *edge_detection;
} batch;
GPUIndexBuf **surf_per_mat_tris;
GPUBatch **surf_per_mat;
int mat_len;
CustomDataMask cd_used, cd_needed;
/* settings to determine if cache is invalid */
bool is_dirty;
bool is_editmode;
/* Valid only if edge_detection is up to date. */
bool is_manifold;
};
/* GPUBatch cache management. */
@ -401,10 +333,6 @@ static bool curve_batch_cache_valid(Curve *cu)
return false;
}
if (cache->mat_len != DRW_curve_material_count_get(cu)) {
return false;
}
if (cache->is_dirty) {
return false;
}
@ -445,13 +373,6 @@ static void curve_batch_cache_init(Curve *cu)
}
#endif
cache->cd_used = 0;
cache->mat_len = DRW_curve_material_count_get(cu);
cache->surf_per_mat_tris = (GPUIndexBuf **)MEM_callocN(
sizeof(*cache->surf_per_mat_tris) * cache->mat_len, __func__);
cache->surf_per_mat = (GPUBatch **)MEM_callocN(sizeof(*cache->surf_per_mat) * cache->mat_len,
__func__);
cache->is_editmode = (cu->editnurb != nullptr) || (cu->editfont != nullptr);
cache->is_dirty = false;
@ -514,15 +435,6 @@ static void curve_batch_cache_clear(Curve *cu)
GPUBatch **batch = (GPUBatch **)&cache->batch;
GPU_BATCH_DISCARD_SAFE(batch[i]);
}
for (int i = 0; i < cache->mat_len; i++) {
GPU_INDEXBUF_DISCARD_SAFE(cache->surf_per_mat_tris[i]);
GPU_BATCH_DISCARD_SAFE(cache->surf_per_mat[i]);
}
MEM_SAFE_FREE(cache->surf_per_mat_tris);
MEM_SAFE_FREE(cache->surf_per_mat);
cache->mat_len = 0;
cache->cd_used = 0;
}
void DRW_curve_batch_cache_free(Curve *cu)
@ -883,55 +795,6 @@ GPUBatch *DRW_curve_batch_cache_get_edit_verts(Curve *cu)
return DRW_batch_request(&cache->batch.edit_verts);
}
GPUBatch *DRW_curve_batch_cache_get_triangles_with_normals(struct Curve *cu)
{
CurveBatchCache *cache = curve_batch_cache_get(cu);
return DRW_batch_request(&cache->batch.surfaces);
}
GPUBatch **DRW_curve_batch_cache_get_surface_shaded(struct Curve *cu,
struct GPUMaterial **gpumat_array,
uint gpumat_array_len)
{
CurveBatchCache *cache = curve_batch_cache_get(cu);
BLI_assert(gpumat_array_len == cache->mat_len);
curve_cd_calc_used_gpu_layers(&cache->cd_needed, gpumat_array, gpumat_array_len);
for (int i = 0; i < cache->mat_len; i++) {
DRW_batch_request(&cache->surf_per_mat[i]);
}
return cache->surf_per_mat;
}
GPUVertBuf *DRW_curve_batch_cache_pos_vertbuf_get(struct Curve *cu)
{
CurveBatchCache *cache = curve_batch_cache_get(cu);
/* Request surface to trigger the vbo filling. Otherwise it may do nothing. */
DRW_batch_request(&cache->batch.surfaces);
DRW_vbo_request(nullptr, &cache->ordered.loop_pos_nor);
return cache->ordered.loop_pos_nor;
}
GPUBatch *DRW_curve_batch_cache_get_wireframes_face(Curve *cu)
{
CurveBatchCache *cache = curve_batch_cache_get(cu);
return DRW_batch_request(&cache->batch.surfaces_edges);
}
GPUBatch *DRW_curve_batch_cache_get_edge_detection(Curve *cu, bool *r_is_manifold)
{
CurveBatchCache *cache = curve_batch_cache_get(cu);
/* Even if is_manifold is not correct (not updated),
* the default (not manifold) is just the worst case. */
if (r_is_manifold) {
*r_is_manifold = cache->is_manifold;
}
return DRW_batch_request(&cache->batch.edge_detection);
}
int DRW_curve_material_count_get(Curve *cu)
{
return max_ii(1, cu->totcol);
@ -950,36 +813,11 @@ void DRW_curve_batch_cache_create_requested(Object *ob, const struct Scene *scen
Curve *cu = (Curve *)ob->data;
CurveBatchCache *cache = curve_batch_cache_get(cu);
/* Verify that all surface batches have needed attribute layers. */
/* TODO(fclem): We could be a bit smarter here and only do it per material. */
if ((cache->cd_used & cache->cd_needed) != cache->cd_needed) {
for (int i = 0; i < cache->mat_len; i++) {
/* We can't discard batches at this point as they have been
* referenced for drawing. Just clear them in place. */
GPU_BATCH_CLEAR_SAFE(cache->surf_per_mat[i]);
}
cache->cd_used |= cache->cd_needed;
cache->cd_needed = 0;
}
/* Init batches and request VBOs & IBOs */
if (DRW_batch_requested(cache->batch.surfaces, GPU_PRIM_TRIS)) {
DRW_vbo_request(cache->batch.surfaces, &cache->ordered.loop_pos_nor);
}
if (DRW_batch_requested(cache->batch.surfaces_edges, GPU_PRIM_LINES)) {
DRW_ibo_request(cache->batch.surfaces_edges, &cache->ibo.surfaces_lines);
DRW_vbo_request(cache->batch.surfaces_edges, &cache->ordered.pos_nor);
DRW_vbo_request(cache->batch.surfaces_edges, &cache->ordered.edge_fac);
}
if (DRW_batch_requested(cache->batch.curves, GPU_PRIM_LINE_STRIP)) {
DRW_ibo_request(cache->batch.curves, &cache->ibo.curves_lines);
DRW_vbo_request(cache->batch.curves, &cache->ordered.curves_pos);
}
if (DRW_batch_requested(cache->batch.edge_detection, GPU_PRIM_LINES_ADJ)) {
DRW_ibo_request(cache->batch.edge_detection, &cache->ibo.edges_adj_lines);
DRW_vbo_request(cache->batch.edge_detection, &cache->ordered.pos_nor);
}
/* Edit mode */
if (DRW_batch_requested(cache->batch.edit_edges, GPU_PRIM_LINES)) {
@ -995,20 +833,6 @@ void DRW_curve_batch_cache_create_requested(Object *ob, const struct Scene *scen
if (DRW_batch_requested(cache->batch.edit_normals, GPU_PRIM_LINES)) {
DRW_vbo_request(cache->batch.edit_normals, &cache->edit.curves_nor);
}
for (int i = 0; i < cache->mat_len; i++) {
if (DRW_batch_requested(cache->surf_per_mat[i], GPU_PRIM_TRIS)) {
if (cache->mat_len > 1) {
DRW_ibo_request(cache->surf_per_mat[i], &cache->surf_per_mat_tris[i]);
}
if (cache->cd_used & CD_MASK_MLOOPUV) {
DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_uv);
}
if (cache->cd_used & CD_MASK_TANGENT) {
DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_tan);
}
DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_pos_nor);
}
}
#ifdef DRW_DEBUG_MESH_CACHE_REQUEST
printf("-- %s %s --\n", __func__, ob->id.name + 2);
@ -1016,76 +840,28 @@ void DRW_curve_batch_cache_create_requested(Object *ob, const struct Scene *scen
/* Generate MeshRenderData flags */
int mr_flag = 0;
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.pos_nor, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.edge_fac, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.curves_pos, CU_DATATYPE_WIRE);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_pos_nor, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_uv, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_tan, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.surfaces_tris, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.surfaces_lines, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.curves_lines, CU_DATATYPE_WIRE);
DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edges_adj_lines, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.pos, CU_DATATYPE_OVERLAY);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.data, CU_DATATYPE_OVERLAY);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.curves_nor, CU_DATATYPE_NORMAL);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.curves_weight, CU_DATATYPE_OVERLAY);
DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edit_verts, CU_DATATYPE_OVERLAY);
DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edit_lines, CU_DATATYPE_OVERLAY);
for (int i = 0; i < cache->mat_len; i++) {
DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->surf_per_mat_tris[i], CU_DATATYPE_SURFACE);
}
#ifdef DRW_DEBUG_MESH_CACHE_REQUEST
printf(" mr_flag %d\n\n", mr_flag);
#endif
CurveRenderData *rdata = curve_render_data_create(cu, ob->runtime.curve_cache, mr_flag);
/* The object's curve cache can be empty (in one case because we use #CurveEval's cache instead),
* If so, point to an empty DispList list to avoid the need to check for null in the following
* functions. */
ListBase empty_lb = {nullptr, nullptr};
ListBase *lb = rdata->ob_curve_cache == nullptr ? &empty_lb : &rdata->ob_curve_cache->disp;
/* Generate VBOs */
if (DRW_vbo_requested(cache->ordered.pos_nor)) {
DRW_displist_vertbuf_create_pos_and_nor(lb, cache->ordered.pos_nor, scene);
}
if (DRW_vbo_requested(cache->ordered.edge_fac)) {
DRW_displist_vertbuf_create_wiredata(lb, cache->ordered.edge_fac);
}
if (DRW_vbo_requested(cache->ordered.curves_pos)) {
curve_create_curves_pos(rdata, cache->ordered.curves_pos);
}
if (DRW_vbo_requested(cache->ordered.loop_pos_nor) ||
DRW_vbo_requested(cache->ordered.loop_uv) || DRW_vbo_requested(cache->ordered.loop_tan)) {
DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(
lb, cache->ordered.loop_pos_nor, cache->ordered.loop_uv, cache->ordered.loop_tan, scene);
}
if (DRW_ibo_requested(cache->surf_per_mat_tris[0])) {
DRW_displist_indexbuf_create_triangles_loop_split_by_material(
lb, cache->surf_per_mat_tris, cache->mat_len);
}
if (DRW_ibo_requested(cache->ibo.curves_lines)) {
curve_create_curves_lines(rdata, cache->ibo.curves_lines);
}
if (DRW_ibo_requested(cache->ibo.surfaces_tris)) {
DRW_displist_indexbuf_create_triangles_in_order(lb, cache->ibo.surfaces_tris);
}
if (DRW_ibo_requested(cache->ibo.surfaces_lines)) {
DRW_displist_indexbuf_create_lines_in_order(lb, cache->ibo.surfaces_lines);
}
if (DRW_ibo_requested(cache->ibo.edges_adj_lines)) {
DRW_displist_indexbuf_create_edges_adjacency_lines(
lb, cache->ibo.edges_adj_lines, &cache->is_manifold);
}
if (DRW_vbo_requested(cache->edit.pos) || DRW_vbo_requested(cache->edit.data) ||
DRW_ibo_requested(cache->ibo.edit_verts) || DRW_ibo_requested(cache->ibo.edit_lines)) {
curve_create_edit_data_and_handles(

View File

@ -9,7 +9,6 @@
* \note DispList may be removed soon! This is a utility for object types that use render.
*/
#include "BLI_alloca.h"
#include "BLI_edgehash.h"
#include "BLI_listbase.h"
#include "BLI_math_vector.h"
@ -19,7 +18,6 @@
#include "DNA_scene_types.h"
#include "BKE_displist.h"
#include "BKE_displist_tangent.h"
#include "GPU_batch.h"
#include "GPU_capabilities.h"
@ -112,53 +110,6 @@ static void displist_indexbufbuilder_set(
}
}
static int displist_indexbufbuilder_tess_set(
SetTriIndicesFn *set_tri_indices,
SetTriIndicesFn *set_quad_tri_indices, /* meh, find a better solution. */
void *thunk,
const DispList *dl,
const int ofs)
{
int v_idx = ofs;
if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
if (dl->type == DL_INDEX3) {
for (int i = 0; i < dl->parts; i++) {
set_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
v_idx += 3;
}
}
else if (dl->type == DL_SURF) {
for (int a = 0; a < dl->parts; a++) {
if ((dl->flag & DL_CYCL_V) == 0 && a == dl->parts - 1) {
break;
}
int b = (dl->flag & DL_CYCL_U) ? 0 : 1;
for (; b < dl->nr; b++) {
set_quad_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
set_quad_tri_indices(thunk, v_idx + 3, v_idx + 4, v_idx + 5);
v_idx += 6;
}
}
}
else {
BLI_assert(dl->type == DL_INDEX4);
const int *idx = dl->index;
for (int i = 0; i < dl->parts; i++, idx += 4) {
if (idx[2] != idx[3]) {
set_quad_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
set_quad_tri_indices(thunk, v_idx + 3, v_idx + 4, v_idx + 5);
v_idx += 6;
}
else {
set_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
v_idx += 3;
}
}
}
}
return v_idx;
}
void DRW_displist_vertbuf_create_pos_and_nor(ListBase *lb, GPUVertBuf *vbo, const Scene *scene)
{
const bool do_hq_normals = (scene->r.perf_flag & SCE_PERF_HQ_NORMALS) != 0 ||
@ -273,35 +224,6 @@ void DRW_displist_indexbuf_create_triangles_in_order(ListBase *lb, GPUIndexBuf *
GPU_indexbuf_build_in_place(&elb, ibo);
}
void DRW_displist_indexbuf_create_triangles_loop_split_by_material(ListBase *lb,
GPUIndexBuf **ibo_mats,
uint mat_len)
{
GPUIndexBufBuilder *elb = BLI_array_alloca(elb, mat_len);
const int tri_len = curve_render_surface_tri_len_get(lb);
/* Init each index buffer builder */
for (int i = 0; i < mat_len; i++) {
GPU_indexbuf_init(&elb[i], GPU_PRIM_TRIS, tri_len * 3, tri_len * 3);
}
/* calc each index buffer builder */
uint v_idx = 0;
LISTBASE_FOREACH (const DispList *, dl, lb) {
v_idx = displist_indexbufbuilder_tess_set((SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
(SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
&elb[dl->col],
dl,
v_idx);
}
/* build each indexbuf */
for (int i = 0; i < mat_len; i++) {
GPU_indexbuf_build_in_place(&elb[i], ibo_mats[i]);
}
}
static void set_overlay_wires_tri_indices(void *thunk, uint v1, uint v2, uint v3)
{
GPUIndexBufBuilder *eld = (GPUIndexBufBuilder *)thunk;
@ -335,433 +257,6 @@ void DRW_displist_indexbuf_create_lines_in_order(ListBase *lb, GPUIndexBuf *ibo)
GPU_indexbuf_build_in_place(&elb, ibo);
}
static void surf_uv_quad(const DispList *dl, const uint quad[4], float r_uv[4][2])
{
int orco_sizeu = dl->nr - 1;
int orco_sizev = dl->parts - 1;
/* exception as handled in convertblender.c too */
if (dl->flag & DL_CYCL_U) {
orco_sizeu++;
}
if (dl->flag & DL_CYCL_V) {
orco_sizev++;
}
for (int i = 0; i < 4; i++) {
/* NOTE: For some reason the shading U and V are swapped compared to the
* one described in the surface format. */
/* find uv based on vertex index into grid array */
r_uv[i][0] = (quad[i] / dl->nr) / (float)orco_sizev;
r_uv[i][1] = (quad[i] % dl->nr) / (float)orco_sizeu;
/* cyclic correction */
if (ELEM(i, 1, 2) && r_uv[i][0] == 0.0f) {
r_uv[i][0] = 1.0f;
}
if (ELEM(i, 0, 1) && r_uv[i][1] == 0.0f) {
r_uv[i][1] = 1.0f;
}
}
}
static void displist_vertbuf_attr_set_nor(GPUVertBufRaw *step,
const GPUNormal *n1,
const GPUNormal *n2,
const GPUNormal *n3,
const bool do_hq_normals)
{
if (do_hq_normals) {
copy_v3_v3_short(GPU_vertbuf_raw_step(step), n1->high);
copy_v3_v3_short(GPU_vertbuf_raw_step(step), n2->high);
copy_v3_v3_short(GPU_vertbuf_raw_step(step), n3->high);
}
else {
*(GPUPackedNormal *)GPU_vertbuf_raw_step(step) = n1->low;
*(GPUPackedNormal *)GPU_vertbuf_raw_step(step) = n2->low;
*(GPUPackedNormal *)GPU_vertbuf_raw_step(step) = n3->low;
}
}
static void displist_vertbuf_attr_set_tri_pos_nor_uv(GPUVertBufRaw *pos_step,
GPUVertBufRaw *nor_step,
GPUVertBufRaw *uv_step,
GPUVertBufRaw *tan_step,
const float v1[3],
const float v2[3],
const float v3[3],
const GPUNormal *n1,
const GPUNormal *n2,
const GPUNormal *n3,
const GPUNormal *t1,
const GPUNormal *t2,
const GPUNormal *t3,
const float uv1[2],
const float uv2[2],
const float uv3[2],
const bool do_hq_normals)
{
if (pos_step->size != 0) {
copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v1);
copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v2);
copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v3);
displist_vertbuf_attr_set_nor(nor_step, n1, n2, n3, do_hq_normals);
}
if (uv_step->size != 0) {
normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv1);
normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv2);
normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv3);
}
if (tan_step->size != 0) {
displist_vertbuf_attr_set_nor(tan_step, t1, t2, t3, do_hq_normals);
}
}
#define SURFACE_QUAD_ITER_BEGIN(dl) \
{ \
uint quad[4]; \
int quad_index = 0; \
int max_v = (dl->flag & DL_CYCL_V) ? dl->parts : (dl->parts - 1); \
int max_u = (dl->flag & DL_CYCL_U) ? dl->nr : (dl->nr - 1); \
for (int v = 0; v < max_v; v++) { \
quad[3] = dl->nr * v; \
quad[0] = quad[3] + 1; \
quad[2] = quad[3] + dl->nr; \
quad[1] = quad[0] + dl->nr; \
/* Cyclic wrap */ \
if (v == dl->parts - 1) { \
quad[1] -= dl->parts * dl->nr; \
quad[2] -= dl->parts * dl->nr; \
} \
for (int u = 0; u < max_u; u++, quad_index++) { \
/* Cyclic wrap */ \
if (u == dl->nr - 1) { \
quad[0] -= dl->nr; \
quad[1] -= dl->nr; \
}
#define SURFACE_QUAD_ITER_END \
quad[2] = quad[1]; \
quad[1]++; \
quad[3] = quad[0]; \
quad[0]++; \
} \
} \
}
static void displist_surf_fnors_ensure(const DispList *dl, float (**fnors)[3])
{
int u_len = dl->nr - ((dl->flag & DL_CYCL_U) ? 0 : 1);
int v_len = dl->parts - ((dl->flag & DL_CYCL_V) ? 0 : 1);
const float(*verts)[3] = (const float(*)[3])dl->verts;
float(*nor_flat)[3] = MEM_mallocN(sizeof(float[3]) * u_len * v_len, __func__);
*fnors = nor_flat;
SURFACE_QUAD_ITER_BEGIN (dl) {
normal_quad_v3(*nor_flat, verts[quad[0]], verts[quad[1]], verts[quad[2]], verts[quad[3]]);
nor_flat++;
}
SURFACE_QUAD_ITER_END
}
void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
GPUVertBuf *vbo_pos_nor,
GPUVertBuf *vbo_uv,
GPUVertBuf *vbo_tan,
const Scene *scene)
{
const bool do_hq_normals = (scene->r.perf_flag & SCE_PERF_HQ_NORMALS) != 0 ||
GPU_use_hq_normals_workaround();
static GPUVertFormat format_pos_nor = {0};
static GPUVertFormat format_pos_nor_hq = {0};
static GPUVertFormat format_uv = {0};
static GPUVertFormat format_tan = {0};
static GPUVertFormat format_tan_hq = {0};
static struct {
uint pos, nor, uv, tan;
uint pos_hq, nor_hq, tan_hq;
} attr_id;
if (format_pos_nor.attr_len == 0) {
/* initialize vertex format */
attr_id.pos = GPU_vertformat_attr_add(
&format_pos_nor, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.nor = GPU_vertformat_attr_add(
&format_pos_nor, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
attr_id.pos_hq = GPU_vertformat_attr_add(
&format_pos_nor_hq, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
attr_id.nor_hq = GPU_vertformat_attr_add(
&format_pos_nor_hq, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
/* UVs are in [0..1] range. We can compress them. */
attr_id.uv = GPU_vertformat_attr_add(
&format_uv, "u", GPU_COMP_I16, 2, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertformat_alias_add(&format_uv, "au");
attr_id.tan = GPU_vertformat_attr_add(
&format_tan, "t", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertformat_alias_add(&format_tan, "at");
attr_id.tan_hq = GPU_vertformat_attr_add(
&format_tan_hq, "t", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
GPU_vertformat_alias_add(&format_tan_hq, "at");
}
uint pos_id = do_hq_normals ? attr_id.pos_hq : attr_id.pos;
uint nor_id = do_hq_normals ? attr_id.nor_hq : attr_id.nor;
uint tan_id = do_hq_normals ? attr_id.tan_hq : attr_id.tan;
int vbo_len_capacity = curve_render_surface_tri_len_get(lb) * 3;
GPUVertBufRaw pos_step = {0};
GPUVertBufRaw nor_step = {0};
GPUVertBufRaw uv_step = {0};
GPUVertBufRaw tan_step = {0};
#define DRW_TEST_ASSIGN_VBO(v) (v = (DRW_vbo_requested(v) ? (v) : NULL))
if (DRW_TEST_ASSIGN_VBO(vbo_pos_nor)) {
GPU_vertbuf_init_with_format(vbo_pos_nor,
do_hq_normals ? &format_pos_nor_hq : &format_pos_nor);
GPU_vertbuf_data_alloc(vbo_pos_nor, vbo_len_capacity);
GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, pos_id, &pos_step);
GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, nor_id, &nor_step);
}
if (DRW_TEST_ASSIGN_VBO(vbo_uv)) {
GPU_vertbuf_init_with_format(vbo_uv, &format_uv);
GPU_vertbuf_data_alloc(vbo_uv, vbo_len_capacity);
GPU_vertbuf_attr_get_raw_data(vbo_uv, attr_id.uv, &uv_step);
}
if (DRW_TEST_ASSIGN_VBO(vbo_tan)) {
GPU_vertbuf_init_with_format(vbo_tan, do_hq_normals ? &format_tan_hq : &format_tan);
GPU_vertbuf_data_alloc(vbo_tan, vbo_len_capacity);
GPU_vertbuf_attr_get_raw_data(vbo_tan, tan_id, &tan_step);
}
#undef DRW_TEST_ASSIGN_VBO
BKE_displist_normals_add(lb);
LISTBASE_FOREACH (const DispList *, dl, lb) {
const bool is_smooth = (dl->rt & CU_SMOOTH) != 0;
if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
const float(*verts)[3] = (const float(*)[3])dl->verts;
const float(*nors)[3] = (const float(*)[3])dl->nors;
const int *idx = dl->index;
float uv[4][2];
if (dl->type == DL_INDEX3) {
/* Currently 'DL_INDEX3' is always a flat surface with a single normal. */
GPUNormal tangent_packed;
GPUNormal normal_packed;
GPU_normal_convert_v3(&normal_packed, dl->nors, do_hq_normals);
if (vbo_tan) {
float tan[4];
float(*tan_ptr)[4] = &tan;
BKE_displist_tangent_calc(dl, NULL, &tan_ptr);
GPU_normal_convert_v3(&tangent_packed, tan, do_hq_normals);
normal_float_to_short_v3(tangent_packed.high, tan);
}
else {
if (do_hq_normals) {
tangent_packed.high[0] = 0;
tangent_packed.high[1] = 0;
tangent_packed.high[2] = 0;
}
else {
tangent_packed.low = (GPUPackedNormal){0, 0, 0, 1};
}
}
const float x_max = (float)(dl->nr - 1);
uv[0][1] = uv[1][1] = uv[2][1] = 0.0f;
const int i_end = dl->parts;
for (int i = 0; i < i_end; i++, idx += 3) {
if (vbo_uv) {
uv[0][0] = idx[0] / x_max;
uv[1][0] = idx[1] / x_max;
uv[2][0] = idx[2] / x_max;
}
displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
&nor_step,
&uv_step,
&tan_step,
verts[idx[0]],
verts[idx[2]],
verts[idx[1]],
&normal_packed,
&normal_packed,
&normal_packed,
&tangent_packed,
&tangent_packed,
&tangent_packed,
uv[0],
uv[2],
uv[1],
do_hq_normals);
}
}
else if (dl->type == DL_SURF) {
float(*tangents)[4] = NULL;
float(*fnors)[3] = NULL;
if (!is_smooth) {
displist_surf_fnors_ensure(dl, &fnors);
}
if (vbo_tan) {
BKE_displist_tangent_calc(dl, fnors, &tangents);
}
SURFACE_QUAD_ITER_BEGIN (dl) {
if (vbo_uv) {
surf_uv_quad(dl, quad, uv);
}
GPUNormal pnors_quad[4];
GPUNormal ptans_quad[4];
if (is_smooth) {
for (int j = 0; j < 4; j++) {
GPU_normal_convert_v3(&pnors_quad[j], nors[quad[j]], do_hq_normals);
}
}
else {
GPU_normal_convert_v3(&pnors_quad[0], fnors[quad_index], do_hq_normals);
pnors_quad[1] = pnors_quad[2] = pnors_quad[3] = pnors_quad[0];
}
if (vbo_tan) {
for (int j = 0; j < 4; j++) {
float *tan = tangents[quad_index * 4 + j];
GPU_normal_convert_v3(&ptans_quad[j], tan, do_hq_normals);
}
}
displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
&nor_step,
&uv_step,
&tan_step,
verts[quad[2]],
verts[quad[0]],
verts[quad[1]],
&pnors_quad[2],
&pnors_quad[0],
&pnors_quad[1],
&ptans_quad[2],
&ptans_quad[0],
&ptans_quad[1],
uv[2],
uv[0],
uv[1],
do_hq_normals);
displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
&nor_step,
&uv_step,
&tan_step,
verts[quad[0]],
verts[quad[2]],
verts[quad[3]],
&pnors_quad[0],
&pnors_quad[2],
&pnors_quad[3],
&ptans_quad[0],
&ptans_quad[2],
&ptans_quad[3],
uv[0],
uv[2],
uv[3],
do_hq_normals);
}
SURFACE_QUAD_ITER_END
MEM_SAFE_FREE(tangents);
MEM_SAFE_FREE(fnors);
}
else {
BLI_assert(dl->type == DL_INDEX4);
uv[0][0] = uv[0][1] = uv[1][0] = uv[3][1] = 0.0f;
uv[1][1] = uv[2][0] = uv[2][1] = uv[3][0] = 1.0f;
const int i_end = dl->parts;
for (int i = 0; i < i_end; i++, idx += 4) {
const bool is_tri = idx[2] != idx[3];
GPUNormal ptan = {0};
GPUNormal pnors_idx[4];
if (is_smooth) {
int idx_len = is_tri ? 3 : 4;
for (int j = 0; j < idx_len; j++) {
GPU_normal_convert_v3(&pnors_idx[j], nors[idx[j]], do_hq_normals);
}
}
else {
float nor_flat[3];
if (is_tri) {
normal_tri_v3(nor_flat, verts[idx[0]], verts[idx[1]], verts[idx[2]]);
}
else {
normal_quad_v3(nor_flat, verts[idx[0]], verts[idx[1]], verts[idx[2]], verts[idx[3]]);
}
GPU_normal_convert_v3(&pnors_idx[0], nor_flat, do_hq_normals);
pnors_idx[1] = pnors_idx[2] = pnors_idx[3] = pnors_idx[0];
}
displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
&nor_step,
&uv_step,
&tan_step,
verts[idx[0]],
verts[idx[2]],
verts[idx[1]],
&pnors_idx[0],
&pnors_idx[2],
&pnors_idx[1],
&ptan,
&ptan,
&ptan,
uv[0],
uv[2],
uv[1],
do_hq_normals);
if (is_tri) {
displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
&nor_step,
&uv_step,
&tan_step,
verts[idx[2]],
verts[idx[0]],
verts[idx[3]],
&pnors_idx[2],
&pnors_idx[0],
&pnors_idx[3],
&ptan,
&ptan,
&ptan,
uv[2],
uv[0],
uv[3],
do_hq_normals);
}
}
}
}
}
/* Resize and finish. */
if (pos_step.size != 0) {
int vbo_len_used = GPU_vertbuf_raw_used(&pos_step);
if (vbo_len_used < vbo_len_capacity) {
GPU_vertbuf_data_resize(vbo_pos_nor, vbo_len_used);
}
}
if (uv_step.size != 0) {
int vbo_len_used = GPU_vertbuf_raw_used(&uv_step);
if (vbo_len_used < vbo_len_capacity) {
GPU_vertbuf_data_resize(vbo_uv, vbo_len_used);
}
}
}
/* Edge detection/adjacency. */
#define NO_EDGE INT_MAX
static void set_edge_adjacency_lines_indices(

View File

@ -88,8 +88,8 @@ filter_supported_objects(Depsgraph *depsgraph, const OBJExportParams &export_par
}
switch (object->type) {
case OB_SURF:
/* Export in mesh form: vertices and polygons. */
ATTR_FALLTHROUGH;
/* Evaluated surface objects appear as mesh objects from the iterator. */
break;
case OB_MESH:
r_exportable_meshes.append(std::make_unique<OBJMesh>(depsgraph, export_params, object));
break;

View File

@ -82,7 +82,7 @@ class obj_importer_test : public BlendfileLoadingBaseTest {
EXPECT_V3_NEAR(object->rot, float3(M_PI_2, 0, 0), 0.0001f);
}
EXPECT_V3_NEAR(object->scale, float3(1, 1, 1), 0.0001f);
if (object->type == OB_MESH || object->type == OB_SURF) {
if (object->type == OB_MESH) {
Mesh *mesh = BKE_object_get_evaluated_mesh(object);
EXPECT_EQ(mesh->totvert, exp.totvert);
EXPECT_EQ(mesh->totedge, exp.mesh_totedge_or_curve_endp);