OpenSubdiv: Cleanup, Remove from legacy SubsurfCCG code

The CPU side implementation is done on a new dedicate base ground.
The GPU side must be redone anyway.
This commit is contained in:
Sergey Sharybin 2020-05-18 16:42:59 +02:00
parent 151fb62190
commit 54665233eb
13 changed files with 58 additions and 2218 deletions

View File

@ -194,6 +194,12 @@ typedef struct Subdiv {
} cache_;
} Subdiv;
/* =================----====--===== MODULE ==========================------== */
/* (De)initialize the entire subdivision surface module. */
void BKE_subdiv_init(void);
void BKE_subdiv_exit(void);
/* ========================== CONVERSION HELPERS ============================ */
/* NOTE: uv_smooth is eSubsurfUVSmooth. */

View File

@ -103,7 +103,7 @@ typedef struct CCGDerivedMesh {
struct CCGSubSurf *ss;
int freeSS;
int drawInteriorEdges, useSubsurfUv, useGpuBackend;
int drawInteriorEdges, useSubsurfUv;
struct {
int startVert;
@ -156,13 +156,6 @@ typedef struct CCGDerivedMesh {
ThreadRWMutex origindex_cache_rwlock;
} CCGDerivedMesh;
#ifdef WITH_OPENSUBDIV
/* TODO(sergey): Not really ideal place, but we don't currently have better one. */
void BKE_subsurf_osd_init(void);
void BKE_subsurf_free_unused_buffers(void);
void BKE_subsurf_osd_cleanup(void);
#endif
#ifdef __cplusplus
}
#endif

View File

@ -64,8 +64,6 @@ set(SRC
${CMAKE_SOURCE_DIR}/release/datafiles/userdef/userdef_default.c
intern/CCGSubSurf.c
intern/CCGSubSurf_legacy.c
intern/CCGSubSurf_opensubdiv.c
intern/CCGSubSurf_opensubdiv_converter.c
intern/CCGSubSurf_util.c
intern/DerivedMesh.c
intern/action.c

View File

@ -32,13 +32,6 @@
#include "CCGSubSurf.h"
#include "CCGSubSurf_intern.h"
#ifdef WITH_OPENSUBDIV
# include "opensubdiv_capi.h"
# include "opensubdiv_converter_capi.h"
# include "opensubdiv_evaluator_capi.h"
# include "opensubdiv_topology_refiner_capi.h"
#endif
#include "GPU_glew.h"
/***/
@ -305,21 +298,6 @@ CCGSubSurf *ccgSubSurf_new(CCGMeshIFC *ifc,
ss->tempVerts = NULL;
ss->tempEdges = NULL;
#ifdef WITH_OPENSUBDIV
ss->osd_evaluator = NULL;
ss->osd_mesh = NULL;
ss->osd_topology_refiner = NULL;
ss->osd_mesh_invalid = false;
ss->osd_coarse_coords_invalid = false;
ss->osd_vao = 0;
ss->skip_grids = false;
ss->osd_compute = 0;
ss->osd_next_face_ptex_index = 0;
ss->osd_coarse_coords = NULL;
ss->osd_num_coarse_coords = 0;
ss->osd_subdiv_uvs = false;
#endif
return ss;
}
}
@ -328,23 +306,6 @@ void ccgSubSurf_free(CCGSubSurf *ss)
{
CCGAllocatorIFC allocatorIFC = ss->allocatorIFC;
CCGAllocatorHDL allocator = ss->allocator;
#ifdef WITH_OPENSUBDIV
if (ss->osd_evaluator != NULL) {
openSubdiv_deleteEvaluator(ss->osd_evaluator);
}
if (ss->osd_mesh != NULL) {
ccgSubSurf__delete_osdGLMesh(ss->osd_mesh);
}
if (ss->osd_vao != 0) {
ccgSubSurf__delete_vertex_array(ss->osd_vao);
}
if (ss->osd_coarse_coords != NULL) {
MEM_freeN(ss->osd_coarse_coords);
}
if (ss->osd_topology_refiner != NULL) {
openSubdiv_deleteTopologyRefiner(ss->osd_topology_refiner);
}
#endif
if (ss->syncState) {
ccg_ehash_free(ss->oldFMap, (EHEntryFreeFP)_face_free, ss);
@ -529,9 +490,6 @@ CCGError ccgSubSurf_initFullSync(CCGSubSurf *ss)
ss->tempEdges = MEM_mallocN(sizeof(*ss->tempEdges) * ss->lenTempArrays, "CCGSubsurf tempEdges");
ss->syncState = eSyncState_Vert;
#ifdef WITH_OPENSUBDIV
ss->osd_next_face_ptex_index = 0;
#endif
return eCCGError_None;
}
@ -671,9 +629,6 @@ CCGError ccgSubSurf_syncVert(
ccg_ehash_insert(ss->vMap, (EHEntry *)v);
v->flags = 0;
}
#ifdef WITH_OPENSUBDIV
v->osd_index = ss->vMap->numEntries - 1;
#endif
}
if (v_r) {
@ -874,15 +829,6 @@ CCGError ccgSubSurf_syncFace(
}
}
}
#ifdef WITH_OPENSUBDIV
f->osd_index = ss->osd_next_face_ptex_index;
if (numVerts == 4) {
ss->osd_next_face_ptex_index++;
}
else {
ss->osd_next_face_ptex_index += numVerts;
}
#endif
}
if (f_r) {
@ -893,15 +839,7 @@ CCGError ccgSubSurf_syncFace(
static void ccgSubSurf__sync(CCGSubSurf *ss)
{
#ifdef WITH_OPENSUBDIV
if (ss->skip_grids) {
ccgSubSurf__sync_opensubdiv(ss);
}
else
#endif
{
ccgSubSurf__sync_legacy(ss);
}
ccgSubSurf__sync_legacy(ss);
}
CCGError ccgSubSurf_processSync(CCGSubSurf *ss)
@ -1615,12 +1553,6 @@ int ccgSubSurf_getNumFinalVerts(const CCGSubSurf *ss)
ss->fMap->numEntries +
ss->numGrids * ((gridSize - 2) + ((gridSize - 2) * (gridSize - 2))));
#ifdef WITH_OPENSUBDIV
if (ss->skip_grids) {
return 0;
}
#endif
return numFinalVerts;
}
int ccgSubSurf_getNumFinalEdges(const CCGSubSurf *ss)
@ -1629,22 +1561,12 @@ int ccgSubSurf_getNumFinalEdges(const CCGSubSurf *ss)
int gridSize = ccg_gridsize(ss->subdivLevels);
int numFinalEdges = (ss->eMap->numEntries * (edgeSize - 1) +
ss->numGrids * ((gridSize - 1) + 2 * ((gridSize - 2) * (gridSize - 1))));
#ifdef WITH_OPENSUBDIV
if (ss->skip_grids) {
return 0;
}
#endif
return numFinalEdges;
}
int ccgSubSurf_getNumFinalFaces(const CCGSubSurf *ss)
{
int gridSize = ccg_gridsize(ss->subdivLevels);
int numFinalFaces = ss->numGrids * ((gridSize - 1) * (gridSize - 1));
#ifdef WITH_OPENSUBDIV
if (ss->skip_grids) {
return 0;
}
#endif
return numFinalFaces;
}

View File

@ -211,57 +211,4 @@ CCGFace *ccgFaceIterator_getCurrent(CCGFaceIterator *fi);
int ccgFaceIterator_isStopped(CCGFaceIterator *fi);
void ccgFaceIterator_next(CCGFaceIterator *fi);
#ifdef WITH_OPENSUBDIV
struct DerivedMesh;
/* Check if topology changed and evaluators are to be re-created. */
void ccgSubSurf_checkTopologyChanged(CCGSubSurf *ss, struct DerivedMesh *dm);
/* Create topology refiner from give derived mesh which then later will be
* used for GL mesh creation.
*/
void ccgSubSurf_prepareTopologyRefiner(CCGSubSurf *ss, struct DerivedMesh *dm);
/* Make sure GL mesh exists, up to date and ready to draw. */
bool ccgSubSurf_prepareGLMesh(CCGSubSurf *ss, bool use_osd_glsl, int active_uv_index);
/* Draw given partitions of the GL mesh.
*
* TODO(sergey): fill_quads is actually an invariant and should be part
* of the prepare routine.
*/
void ccgSubSurf_drawGLMesh(CCGSubSurf *ss,
bool fill_quads,
int start_partition,
int num_partitions);
/* Get number of base faces in a particular GL mesh. */
int ccgSubSurf_getNumGLMeshBaseFaces(CCGSubSurf *ss);
/* Get number of vertices in base faces in a particular GL mesh. */
int ccgSubSurf_getNumGLMeshBaseFaceVerts(CCGSubSurf *ss, int face);
/* Controls whether CCG are needed (Cmeaning CPU evaluation) or fully GPU compute
* and draw is allowed.
*/
void ccgSubSurf_setSkipGrids(CCGSubSurf *ss, bool skip_grids);
bool ccgSubSurf_needGrids(CCGSubSurf *ss);
/* Set evaluator's face varying data from UV coordinates.
* Used for CPU evaluation.
*/
void ccgSubSurf_evaluatorSetFVarUV(CCGSubSurf *ss, struct DerivedMesh *dm, int layer_index);
/* TODO(sergey): Temporary call to test things. */
void ccgSubSurf_evaluatorFVarUV(
CCGSubSurf *ss, int face_index, int S, float grid_u, float grid_v, float uv[2]);
void ccgSubSurf_free_osd_mesh(CCGSubSurf *ss);
void ccgSubSurf_getMinMax(CCGSubSurf *ss, float r_min[3], float r_max[3]);
void ccgSubSurf__sync_subdivUvs(CCGSubSurf *ss, bool subsurf_uvs);
#endif
#endif /* __CCGSUBSURF_H__ */

View File

@ -157,9 +157,6 @@ typedef enum {
eSyncState_Edge,
eSyncState_Face,
eSyncState_Partial,
#ifdef WITH_OPENSUBDIV
eSyncState_OpenSubdiv,
#endif
} SyncState;
struct CCGSubSurf {
@ -202,58 +199,6 @@ struct CCGSubSurf {
int lenTempArrays;
CCGVert **tempVerts;
CCGEdge **tempEdges;
#ifdef WITH_OPENSUBDIV
/* Skip grids means no CCG geometry is created and subsurf is possible
* to be completely done on GPU.
*/
bool skip_grids;
/* ** GPU backend. ** */
/* Compute device used by GL mesh. */
short osd_compute;
/* Coarse (base mesh) vertex coordinates.
*
* Filled in from the modifier stack and passed to OpenSubdiv compute
* on mesh display.
*/
float (*osd_coarse_coords)[3];
int osd_num_coarse_coords;
/* Denotes whether coarse positions in the GL mesh are invalid.
* Used to avoid updating GL mesh coords on every redraw.
*/
bool osd_coarse_coords_invalid;
/* GL mesh descriptor, used for refinement and draw. */
struct OpenSubdiv_GLMesh *osd_mesh;
/* Refiner which is used to create GL mesh.
*
* Refiner is created from the modifier stack and used later from the main
* thread to construct GL mesh to avoid threaded access to GL.
*/
struct OpenSubdiv_TopologyRefiner
*osd_topology_refiner; /* Only used at synchronization stage. */
/* Denotes whether osd_mesh is invalid now due to topology changes and needs
* to be reconstructed.
*
* Reconstruction happens from main thread due to OpenGL communication.
*/
bool osd_mesh_invalid;
/* Vertex array used for osd_mesh draw. */
unsigned int osd_vao;
/* ** CPU backend. ** */
/* Limit evaluator, used to evaluate CCG. */
struct OpenSubdiv_Evaluator *osd_evaluator;
/* Next PTex face index, used while CCG synchronization
* to fill in PTex index of CCGFace.
*/
int osd_next_face_ptex_index;
bool osd_subdiv_uvs;
#endif
};
/* ** Utility macros ** */
@ -322,16 +267,6 @@ void ccgSubSurf__sync_legacy(CCGSubSurf *ss);
void ccgSubSurf__sync_opensubdiv(CCGSubSurf *ss);
/* Delayed free routines. Will do actual free if called from
* main thread and schedule free for later free otherwise.
*/
#ifdef WITH_OPENSUBDIV
void ccgSubSurf__delete_osdGLMesh(struct OpenSubdiv_GLMesh *osd_mesh);
void ccgSubSurf__delete_vertex_array(unsigned int vao);
void ccgSubSurf__delete_pending(void);
#endif
/* * CCGSubSurf_opensubdiv_converter.c * */
struct OpenSubdiv_Converter;

View File

@ -1,970 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bke
*/
#ifdef WITH_OPENSUBDIV
# include "BLI_sys_types.h" // for intptr_t support
# include "MEM_guardedalloc.h"
# include "BLI_listbase.h"
# include "BLI_math.h"
# include "BLI_threads.h"
# include "BLI_utildefines.h" /* for BLI_assert */
# include "CCGSubSurf.h"
# include "CCGSubSurf_intern.h"
# include "BKE_DerivedMesh.h"
# include "BKE_subsurf.h"
# include "DNA_userdef_types.h"
# include "opensubdiv_capi.h"
# include "opensubdiv_converter_capi.h"
# include "opensubdiv_evaluator_capi.h"
# include "opensubdiv_gl_mesh_capi.h"
# include "opensubdiv_topology_refiner_capi.h"
# include "GPU_extensions.h"
# include "GPU_glew.h"
# define OSD_LOG \
if (false) \
printf
static bool compare_ccg_derivedmesh_topology(CCGSubSurf *ss, DerivedMesh *dm)
{
const int num_verts = dm->getNumVerts(dm);
const int num_edges = dm->getNumEdges(dm);
const int num_polys = dm->getNumPolys(dm);
const MEdge *medge = dm->getEdgeArray(dm);
const MLoop *mloop = dm->getLoopArray(dm);
const MPoly *mpoly = dm->getPolyArray(dm);
/* Quick preliminary tests based on the number of verts and facces. */
{
if (num_verts != ss->vMap->numEntries || num_edges != ss->eMap->numEntries ||
num_polys != ss->fMap->numEntries) {
return false;
}
}
/* Rather slow check for faces topology change. */
{
CCGFaceIterator ccg_face_iter;
for (ccgSubSurf_initFaceIterator(ss, &ccg_face_iter);
!ccgFaceIterator_isStopped(&ccg_face_iter);
ccgFaceIterator_next(&ccg_face_iter)) {
/*const*/ CCGFace *ccg_face = ccgFaceIterator_getCurrent(&ccg_face_iter);
const int poly_index = POINTER_AS_INT(ccgSubSurf_getFaceFaceHandle(ccg_face));
const MPoly *mp = &mpoly[poly_index];
int corner;
if (ccg_face->numVerts != mp->totloop) {
return false;
}
for (corner = 0; corner < ccg_face->numVerts; corner++) {
/*const*/ CCGVert *ccg_vert = FACE_getVerts(ccg_face)[corner];
const int vert_index = POINTER_AS_INT(ccgSubSurf_getVertVertHandle(ccg_vert));
if (vert_index != mloop[mp->loopstart + corner].v) {
return false;
}
}
}
}
/* Check for edge topology change. */
{
CCGEdgeIterator ccg_edge_iter;
for (ccgSubSurf_initEdgeIterator(ss, &ccg_edge_iter);
!ccgEdgeIterator_isStopped(&ccg_edge_iter);
ccgEdgeIterator_next(&ccg_edge_iter)) {
/* const */ CCGEdge *ccg_edge = ccgEdgeIterator_getCurrent(&ccg_edge_iter);
/* const */ CCGVert *ccg_vert1 = ccg_edge->v0;
/* const */ CCGVert *ccg_vert2 = ccg_edge->v1;
const int ccg_vert1_index = POINTER_AS_INT(ccgSubSurf_getVertVertHandle(ccg_vert1));
const int ccg_vert2_index = POINTER_AS_INT(ccgSubSurf_getVertVertHandle(ccg_vert2));
const int edge_index = POINTER_AS_INT(ccgSubSurf_getEdgeEdgeHandle(ccg_edge));
const MEdge *me = &medge[edge_index];
if (me->v1 != ccg_vert1_index || me->v2 != ccg_vert2_index) {
return false;
}
}
}
/* TODO(sergey): Crease topology changes detection. */
{
CCGEdgeIterator ccg_edge_iter;
for (ccgSubSurf_initEdgeIterator(ss, &ccg_edge_iter);
!ccgEdgeIterator_isStopped(&ccg_edge_iter);
ccgEdgeIterator_next(&ccg_edge_iter)) {
/* const */ CCGEdge *ccg_edge = ccgEdgeIterator_getCurrent(&ccg_edge_iter);
const int edge_index = POINTER_AS_INT(ccgSubSurf_getEdgeEdgeHandle(ccg_edge));
if (ccg_edge->crease != medge[edge_index].crease) {
return false;
}
}
}
return true;
}
static bool compare_osd_derivedmesh_topology(CCGSubSurf *ss, DerivedMesh *dm)
{
OpenSubdiv_Converter converter;
bool result;
if (ss->osd_mesh == NULL && ss->osd_topology_refiner == NULL) {
return true;
}
/* TODO(sergey): De-duplicate with topology counter at the bottom of
* the file.
*/
ccgSubSurf_converter_setup_from_derivedmesh(ss, dm, &converter);
result = openSubdiv_topologyRefinerCompareWithConverter(ss->osd_topology_refiner, &converter);
ccgSubSurf_converter_free(&converter);
return result;
}
static bool opensubdiv_is_topology_changed(CCGSubSurf *ss, DerivedMesh *dm)
{
if (ss->osd_compute != U.opensubdiv_compute_type) {
return true;
}
if (ss->osd_topology_refiner != NULL) {
const int levels = ss->osd_topology_refiner->getSubdivisionLevel(ss->osd_topology_refiner);
BLI_assert(ss->osd_mesh_invalid == true);
if (levels != ss->subdivLevels) {
return true;
}
}
if (ss->skip_grids == false) {
return compare_ccg_derivedmesh_topology(ss, dm) == false;
}
else {
return compare_osd_derivedmesh_topology(ss, dm) == false;
}
return false;
}
void ccgSubSurf_checkTopologyChanged(CCGSubSurf *ss, DerivedMesh *dm)
{
if (opensubdiv_is_topology_changed(ss, dm)) {
/* ** Make sure both GPU and CPU backends are properly reset. ** */
ss->osd_coarse_coords_invalid = true;
/* Reset GPU part. */
ss->osd_mesh_invalid = true;
if (ss->osd_topology_refiner != NULL) {
openSubdiv_deleteTopologyRefiner(ss->osd_topology_refiner);
ss->osd_topology_refiner = NULL;
}
/* Reset CPU side. */
if (ss->osd_evaluator != NULL) {
openSubdiv_deleteEvaluator(ss->osd_evaluator);
ss->osd_evaluator = NULL;
}
}
}
static void ccgSubSurf__updateGLMeshCoords(CCGSubSurf *ss)
{
BLI_assert(ss->meshIFC.numLayers == 3);
ss->osd_mesh->setCoarsePositions(
ss->osd_mesh, (float *)ss->osd_coarse_coords, 0, ss->osd_num_coarse_coords);
}
bool ccgSubSurf_prepareGLMesh(CCGSubSurf *ss, bool use_osd_glsl, int active_uv_index)
{
int compute_type;
switch (U.opensubdiv_compute_type) {
# define CHECK_COMPUTE_TYPE(type) \
case USER_OPENSUBDIV_COMPUTE_##type: \
compute_type = OPENSUBDIV_EVALUATOR_##type; \
break;
CHECK_COMPUTE_TYPE(CPU)
CHECK_COMPUTE_TYPE(OPENMP)
CHECK_COMPUTE_TYPE(OPENCL)
CHECK_COMPUTE_TYPE(CUDA)
CHECK_COMPUTE_TYPE(GLSL_TRANSFORM_FEEDBACK)
CHECK_COMPUTE_TYPE(GLSL_COMPUTE)
default:
compute_type = OPENSUBDIV_EVALUATOR_CPU;
break;
# undef CHECK_COMPUTE_TYPE
}
if (ss->osd_vao == 0) {
glGenVertexArrays(1, &ss->osd_vao);
}
if (ss->osd_mesh_invalid) {
if (ss->osd_mesh != NULL) {
ccgSubSurf__delete_osdGLMesh(ss->osd_mesh);
ss->osd_mesh = NULL;
}
ss->osd_mesh_invalid = false;
}
if (ss->osd_mesh == NULL) {
if (ss->osd_topology_refiner == NULL) {
/* Happens with empty meshes. */
/* TODO(sergey): Add assert that mesh is indeed empty. */
return false;
}
ss->osd_mesh = openSubdiv_createOsdGLMeshFromTopologyRefiner(ss->osd_topology_refiner,
compute_type);
if (UNLIKELY(ss->osd_mesh == NULL)) {
/* Most likely compute device is not available. */
return false;
}
ccgSubSurf__updateGLMeshCoords(ss);
ss->osd_mesh->refine(ss->osd_mesh);
ss->osd_mesh->synchronize(ss->osd_mesh);
ss->osd_coarse_coords_invalid = false;
glBindVertexArray(ss->osd_vao);
ss->osd_mesh->bindVertexBuffer(ss->osd_mesh);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(GLfloat) * 6, 0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(GLfloat) * 6, (float *)12);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
else if (ss->osd_coarse_coords_invalid) {
ccgSubSurf__updateGLMeshCoords(ss);
ss->osd_mesh->refine(ss->osd_mesh);
ss->osd_mesh->synchronize(ss->osd_mesh);
ss->osd_coarse_coords_invalid = false;
}
ss->osd_mesh->prepareDraw(ss->osd_mesh, use_osd_glsl, active_uv_index);
return true;
}
void ccgSubSurf_drawGLMesh(CCGSubSurf *ss,
bool fill_quads,
int start_partition,
int num_partitions)
{
if (LIKELY(ss->osd_mesh != NULL)) {
glBindVertexArray(ss->osd_vao);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ss->osd_mesh->getPatchIndexBuffer(ss->osd_mesh));
ss->osd_mesh->bindVertexBuffer(ss->osd_mesh);
glBindVertexArray(ss->osd_vao);
ss->osd_mesh->drawPatches(ss->osd_mesh, fill_quads, start_partition, num_partitions);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
}
int ccgSubSurf_getNumGLMeshBaseFaces(CCGSubSurf *ss)
{
if (ss->osd_topology_refiner != NULL) {
return ss->osd_topology_refiner->getNumFaces(ss->osd_topology_refiner);
}
return 0;
}
/* Get number of vertices in base faces in a particular GL mesh. */
int ccgSubSurf_getNumGLMeshBaseFaceVerts(CCGSubSurf *ss, int face)
{
if (ss->osd_topology_refiner != NULL) {
return ss->osd_topology_refiner->getNumFaceVertices(ss->osd_topology_refiner, face);
}
return 0;
}
void ccgSubSurf_setSkipGrids(CCGSubSurf *ss, bool skip_grids)
{
ss->skip_grids = skip_grids;
}
bool ccgSubSurf_needGrids(CCGSubSurf *ss)
{
return ss->skip_grids == false;
}
BLI_INLINE void ccgSubSurf__mapGridToFace(
int S, float grid_u, float grid_v, float *face_u, float *face_v)
{
float u, v;
/* - Each grid covers half of the face along the edges.
* - Grid's (0, 0) starts from the middle of the face.
*/
u = 0.5f - 0.5f * grid_u;
v = 0.5f - 0.5f * grid_v;
if (S == 0) {
*face_u = v;
*face_v = u;
}
else if (S == 1) {
*face_u = 1.0f - u;
*face_v = v;
}
else if (S == 2) {
*face_u = 1.0f - v;
*face_v = 1.0f - u;
}
else {
*face_u = u;
*face_v = 1.0f - v;
}
}
BLI_INLINE void ccgSubSurf__mapEdgeToFace(
int S, int edge_segment, bool inverse_edge, int edgeSize, float *face_u, float *face_v)
{
int t = inverse_edge ? edgeSize - edge_segment - 1 : edge_segment;
if (S == 0) {
*face_u = (float)t / (edgeSize - 1);
*face_v = 0.0f;
}
else if (S == 1) {
*face_u = 1.0f;
*face_v = (float)t / (edgeSize - 1);
}
else if (S == 2) {
*face_u = 1.0f - (float)t / (edgeSize - 1);
*face_v = 1.0f;
}
else {
*face_u = 0.0f;
*face_v = 1.0f - (float)t / (edgeSize - 1);
}
}
void ccgSubSurf_evaluatorSetFVarUV(CCGSubSurf *ss, DerivedMesh *dm, int layer_index)
{
MPoly *mpoly = dm->getPolyArray(dm);
MLoopUV *mloopuv = CustomData_get_layer_n(&dm->loopData, CD_MLOOPUV, layer_index);
int num_polys = dm->getNumPolys(dm);
int index, poly;
BLI_assert(ss->osd_evaluator != NULL);
for (poly = 0, index = 0; poly < num_polys; poly++) {
int loop;
MPoly *mp = &mpoly[poly];
for (loop = 0; loop < mp->totloop; loop++, index++) {
MLoopUV *mluv = &mloopuv[loop + mp->loopstart];
(void)mluv;
/* TODO(sergey): Send mluv->uv to the evaluator's face varying
* buffer.
*/
}
}
(void)ss;
}
void ccgSubSurf_evaluatorFVarUV(
CCGSubSurf *ss, int face_index, int S, float grid_u, float grid_v, float uv[2])
{
float face_u, face_v;
ccgSubSurf__mapGridToFace(S, grid_u, grid_v, &face_u, &face_v);
(void)ss;
(void)face_index;
/* TODO(sergey): Evaluate face varying coordinate. */
zero_v2(uv);
}
static bool opensubdiv_createEvaluator(CCGSubSurf *ss)
{
OpenSubdiv_Converter converter;
OpenSubdiv_TopologyRefiner *topology_refiner;
if (ss->fMap->numEntries == 0) {
/* OpenSubdiv doesn't support meshes without faces. */
return false;
}
ccgSubSurf_converter_setup_from_ccg(ss, &converter);
OpenSubdiv_TopologyRefinerSettings settings;
settings.level = ss->subdivLevels;
settings.is_adaptive = false;
topology_refiner = openSubdiv_createTopologyRefinerFromConverter(&converter, &settings);
ccgSubSurf_converter_free(&converter);
ss->osd_evaluator = openSubdiv_createEvaluatorFromTopologyRefiner(topology_refiner);
if (ss->osd_evaluator == NULL) {
BLI_assert(!"OpenSubdiv initialization failed, should not happen.");
return false;
}
return true;
}
static bool opensubdiv_ensureEvaluator(CCGSubSurf *ss)
{
if (ss->osd_evaluator == NULL) {
OSD_LOG("Allocating new evaluator, %d verts\n", ss->vMap->numEntries);
opensubdiv_createEvaluator(ss);
}
return ss->osd_evaluator != NULL;
}
static void opensubdiv_updateEvaluatorCoarsePositions(CCGSubSurf *ss)
{
float(*positions)[3];
int vertDataSize = ss->meshIFC.vertDataSize;
int num_basis_verts = ss->vMap->numEntries;
int i;
/* TODO(sergey): Avoid allocation on every update. We could either update
* coordinates in chunks of 1K vertices (which will only use stack memory)
* or do some callback magic for OSD evaluator can invoke it and fill in
* buffer directly.
*/
if (ss->meshIFC.numLayers == 3) {
/* If all the components are to be initialized, no need to memset the
* new memory block.
*/
positions = MEM_mallocN(3 * sizeof(float) * num_basis_verts, "OpenSubdiv coarse points");
}
else {
/* Calloc in order to have z component initialized to 0 for Uvs */
positions = MEM_callocN(3 * sizeof(float) * num_basis_verts, "OpenSubdiv coarse points");
}
# pragma omp parallel for
for (i = 0; i < ss->vMap->curSize; i++) {
CCGVert *v = (CCGVert *)ss->vMap->buckets[i];
for (; v; v = v->next) {
float *co = VERT_getCo(v, 0);
BLI_assert(v->osd_index < ss->vMap->numEntries);
VertDataCopy(positions[v->osd_index], co, ss);
OSD_LOG("Point %d has value %f %f %f\n",
v->osd_index,
positions[v->osd_index][0],
positions[v->osd_index][1],
positions[v->osd_index][2]);
}
}
ss->osd_evaluator->setCoarsePositions(ss->osd_evaluator, (float *)positions, 0, num_basis_verts);
ss->osd_evaluator->refine(ss->osd_evaluator);
MEM_freeN(positions);
}
static void opensubdiv_evaluateQuadFaceGrids(CCGSubSurf *ss,
CCGFace *face,
const int osd_face_index)
{
int normalDataOffset = ss->normalDataOffset;
int subdivLevels = ss->subdivLevels;
int gridSize = ccg_gridsize(subdivLevels);
int edgeSize = ccg_edgesize(subdivLevels);
int vertDataSize = ss->meshIFC.vertDataSize;
int S;
bool do_normals = ss->meshIFC.numLayers == 3;
# pragma omp parallel for
for (S = 0; S < face->numVerts; S++) {
int x, y, k;
CCGEdge *edge = NULL;
bool inverse_edge = false;
for (x = 0; x < gridSize; x++) {
for (y = 0; y < gridSize; y++) {
float *co = FACE_getIFCo(face, subdivLevels, S, x, y);
float *no = FACE_getIFNo(face, subdivLevels, S, x, y);
float grid_u = (float)x / (gridSize - 1), grid_v = (float)y / (gridSize - 1);
float face_u, face_v;
float P[3], dPdu[3], dPdv[3];
ccgSubSurf__mapGridToFace(S, grid_u, grid_v, &face_u, &face_v);
/* TODO(sergey): Need proper port. */
ss->osd_evaluator->evaluateLimit(ss->osd_evaluator,
osd_face_index,
face_u,
face_v,
P,
do_normals ? dPdu : NULL,
do_normals ? dPdv : NULL);
OSD_LOG("face=%d, corner=%d, grid_u=%f, grid_v=%f, face_u=%f, face_v=%f, P=(%f, %f, %f)\n",
osd_face_index,
S,
grid_u,
grid_v,
face_u,
face_v,
P[0],
P[1],
P[2]);
VertDataCopy(co, P, ss);
if (do_normals) {
cross_v3_v3v3(no, dPdu, dPdv);
normalize_v3(no);
}
if (x == gridSize - 1 && y == gridSize - 1) {
float *vert_co = VERT_getCo(FACE_getVerts(face)[S], subdivLevels);
VertDataCopy(vert_co, co, ss);
if (do_normals) {
float *vert_no = VERT_getNo(FACE_getVerts(face)[S], subdivLevels);
VertDataCopy(vert_no, no, ss);
}
}
if (S == 0 && x == 0 && y == 0) {
float *center_co = (float *)FACE_getCenterData(face);
VertDataCopy(center_co, co, ss);
if (do_normals) {
float *center_no = (float *)((byte *)FACE_getCenterData(face) + normalDataOffset);
VertDataCopy(center_no, no, ss);
}
}
}
}
for (x = 0; x < gridSize; x++) {
VertDataCopy(
FACE_getIECo(face, subdivLevels, S, x), FACE_getIFCo(face, subdivLevels, S, x, 0), ss);
if (do_normals) {
VertDataCopy(
FACE_getIENo(face, subdivLevels, S, x), FACE_getIFNo(face, subdivLevels, S, x, 0), ss);
}
}
for (k = 0; k < face->numVerts; k++) {
CCGEdge *current_edge = FACE_getEdges(face)[k];
CCGVert **face_verts = FACE_getVerts(face);
if (current_edge->v0 == face_verts[S] &&
current_edge->v1 == face_verts[(S + 1) % face->numVerts]) {
edge = current_edge;
inverse_edge = false;
break;
}
if (current_edge->v1 == face_verts[S] &&
current_edge->v0 == face_verts[(S + 1) % face->numVerts]) {
edge = current_edge;
inverse_edge = true;
break;
}
}
BLI_assert(edge != NULL);
for (x = 0; x < edgeSize; x++) {
float u = 0, v = 0;
float *co = EDGE_getCo(edge, subdivLevels, x);
float *no = EDGE_getNo(edge, subdivLevels, x);
float P[3], dPdu[3], dPdv[3];
ccgSubSurf__mapEdgeToFace(S, x, inverse_edge, edgeSize, &u, &v);
/* TODO(sergey): Ideally we will re-use grid here, but for now
* let's just re-evaluate for simplicity.
*/
/* TODO(sergey): Need proper port. */
ss->osd_evaluator->evaluateLimit(ss->osd_evaluator, osd_face_index, u, v, P, dPdu, dPdv);
VertDataCopy(co, P, ss);
if (do_normals) {
cross_v3_v3v3(no, dPdu, dPdv);
normalize_v3(no);
}
}
}
}
static void opensubdiv_evaluateNGonFaceGrids(CCGSubSurf *ss,
CCGFace *face,
const int osd_face_index)
{
CCGVert **all_verts = FACE_getVerts(face);
int normalDataOffset = ss->normalDataOffset;
int subdivLevels = ss->subdivLevels;
int gridSize = ccg_gridsize(subdivLevels);
int edgeSize = ccg_edgesize(subdivLevels);
int vertDataSize = ss->meshIFC.vertDataSize;
int S;
bool do_normals = ss->meshIFC.numLayers == 3;
/* Note about handling non-quad faces.
*
* In order to deal with non-quad faces we need to split them
* into a quads in the following way:
*
* |
* (vert_next)
* |
* |
* |
* (face_center) ------------------- (v2)
* | (o)--------------------> |
* | | v |
* | | |
* | | |
* | | |
* | | y ^ |
* | | | |
* | v u x | |
* | <---(o) |
* ---- (vert_prev) ---- (v1) -------------------- (vert)
*
* This is how grids are expected to be stored and it's how
* OpenSubdiv deals with non-quad faces using ptex face indices.
* We only need to convert ptex (x, y) to grid (u, v) by some
* simple flips and evaluate the ptex face.
*/
/* Evaluate face grids. */
# pragma omp parallel for
for (S = 0; S < face->numVerts; S++) {
int x, y;
for (x = 0; x < gridSize; x++) {
for (y = 0; y < gridSize; y++) {
float *co = FACE_getIFCo(face, subdivLevels, S, x, y);
float *no = FACE_getIFNo(face, subdivLevels, S, x, y);
float u = 1.0f - (float)y / (gridSize - 1), v = 1.0f - (float)x / (gridSize - 1);
float P[3], dPdu[3], dPdv[3];
/* TODO(sergey): Need proper port. */
ss->osd_evaluator->evaluateLimit(
ss->osd_evaluator, osd_face_index + S, u, v, P, dPdu, dPdv);
OSD_LOG("face=%d, corner=%d, u=%f, v=%f, P=(%f, %f, %f)\n",
osd_face_index + S,
S,
u,
v,
P[0],
P[1],
P[2]);
VertDataCopy(co, P, ss);
if (do_normals) {
cross_v3_v3v3(no, dPdu, dPdv);
normalize_v3(no);
}
/* TODO(sergey): De-dpuplicate with the quad case. */
if (x == gridSize - 1 && y == gridSize - 1) {
float *vert_co = VERT_getCo(FACE_getVerts(face)[S], subdivLevels);
VertDataCopy(vert_co, co, ss);
if (do_normals) {
float *vert_no = VERT_getNo(FACE_getVerts(face)[S], subdivLevels);
VertDataCopy(vert_no, no, ss);
}
}
if (S == 0 && x == 0 && y == 0) {
float *center_co = (float *)FACE_getCenterData(face);
VertDataCopy(center_co, co, ss);
if (do_normals) {
float *center_no = (float *)((byte *)FACE_getCenterData(face) + normalDataOffset);
VertDataCopy(center_no, no, ss);
}
}
}
}
for (x = 0; x < gridSize; x++) {
VertDataCopy(
FACE_getIECo(face, subdivLevels, S, x), FACE_getIFCo(face, subdivLevels, S, x, 0), ss);
if (do_normals) {
VertDataCopy(
FACE_getIENo(face, subdivLevels, S, x), FACE_getIFNo(face, subdivLevels, S, x, 0), ss);
}
}
}
/* Evaluate edges. */
for (S = 0; S < face->numVerts; S++) {
CCGEdge *edge = FACE_getEdges(face)[S];
int x, S0 = 0, S1 = 0;
bool flip;
for (x = 0; x < face->numVerts; x++) {
if (all_verts[x] == edge->v0) {
S0 = x;
}
else if (all_verts[x] == edge->v1) {
S1 = x;
}
}
if (S == face->numVerts - 1) {
flip = S0 > S1;
}
else {
flip = S0 < S1;
}
for (x = 0; x <= edgeSize / 2; x++) {
float *edge_co = EDGE_getCo(edge, subdivLevels, x);
float *edge_no = EDGE_getNo(edge, subdivLevels, x);
float *face_edge_co;
float *face_edge_no;
if (flip) {
face_edge_co = FACE_getIFCo(face, subdivLevels, S0, gridSize - 1, gridSize - 1 - x);
face_edge_no = FACE_getIFNo(face, subdivLevels, S0, gridSize - 1, gridSize - 1 - x);
}
else {
face_edge_co = FACE_getIFCo(face, subdivLevels, S0, gridSize - 1 - x, gridSize - 1);
face_edge_no = FACE_getIFNo(face, subdivLevels, S0, gridSize - 1 - x, gridSize - 1);
}
VertDataCopy(edge_co, face_edge_co, ss);
if (do_normals) {
VertDataCopy(edge_no, face_edge_no, ss);
}
}
for (x = edgeSize / 2 + 1; x < edgeSize; x++) {
float *edge_co = EDGE_getCo(edge, subdivLevels, x);
float *edge_no = EDGE_getNo(edge, subdivLevels, x);
float *face_edge_co;
float *face_edge_no;
if (flip) {
face_edge_co = FACE_getIFCo(face, subdivLevels, S1, x - edgeSize / 2, gridSize - 1);
face_edge_no = FACE_getIFNo(face, subdivLevels, S1, x - edgeSize / 2, gridSize - 1);
}
else {
face_edge_co = FACE_getIFCo(face, subdivLevels, S1, gridSize - 1, x - edgeSize / 2);
face_edge_no = FACE_getIFNo(face, subdivLevels, S1, gridSize - 1, x - edgeSize / 2);
}
VertDataCopy(edge_co, face_edge_co, ss);
if (do_normals) {
VertDataCopy(edge_no, face_edge_no, ss);
}
}
}
}
static void opensubdiv_evaluateGrids(CCGSubSurf *ss)
{
int i;
for (i = 0; i < ss->fMap->curSize; i++) {
CCGFace *face = (CCGFace *)ss->fMap->buckets[i];
for (; face; face = face->next) {
if (face->numVerts == 4) {
/* For quads we do special magic with converting face coords
* into corner coords and interpolating grids from it.
*/
opensubdiv_evaluateQuadFaceGrids(ss, face, face->osd_index);
}
else {
/* NGons and tris are split into separate osd faces which
* evaluates onto grids directly.
*/
opensubdiv_evaluateNGonFaceGrids(ss, face, face->osd_index);
}
}
}
}
CCGError ccgSubSurf_initOpenSubdivSync(CCGSubSurf *ss)
{
if (ss->syncState != eSyncState_None) {
return eCCGError_InvalidSyncState;
}
ss->syncState = eSyncState_OpenSubdiv;
return eCCGError_None;
}
void ccgSubSurf_prepareTopologyRefiner(CCGSubSurf *ss, DerivedMesh *dm)
{
if (ss->osd_mesh == NULL || ss->osd_mesh_invalid) {
if (dm->getNumPolys(dm) != 0) {
OpenSubdiv_Converter converter;
ccgSubSurf_converter_setup_from_derivedmesh(ss, dm, &converter);
/* TODO(sergey): Remove possibly previously allocated refiner. */
OpenSubdiv_TopologyRefinerSettings settings;
settings.level = ss->subdivLevels;
settings.is_adaptive = false;
ss->osd_topology_refiner = openSubdiv_createTopologyRefinerFromConverter(&converter,
&settings);
ccgSubSurf_converter_free(&converter);
}
}
/* Update number of grids, needed for things like final faces
* counter, used by display drawing.
*/
{
const int num_polys = dm->getNumPolys(dm);
const MPoly *mpoly = dm->getPolyArray(dm);
int poly;
ss->numGrids = 0;
for (poly = 0; poly < num_polys; poly++) {
ss->numGrids += mpoly[poly].totloop;
}
}
{
const int num_verts = dm->getNumVerts(dm);
const MVert *mvert = dm->getVertArray(dm);
int vert;
if (ss->osd_coarse_coords != NULL && num_verts != ss->osd_num_coarse_coords) {
MEM_freeN(ss->osd_coarse_coords);
ss->osd_coarse_coords = NULL;
}
if (ss->osd_coarse_coords == NULL) {
ss->osd_coarse_coords = MEM_mallocN(sizeof(float) * 6 * num_verts, "osd coarse positions");
}
for (vert = 0; vert < num_verts; vert++) {
copy_v3_v3(ss->osd_coarse_coords[vert * 2 + 0], mvert[vert].co);
normal_short_to_float_v3(ss->osd_coarse_coords[vert * 2 + 1], mvert[vert].no);
}
ss->osd_num_coarse_coords = num_verts;
ss->osd_coarse_coords_invalid = true;
}
}
void ccgSubSurf__sync_opensubdiv(CCGSubSurf *ss)
{
BLI_assert(ss->meshIFC.numLayers == 2 || ss->meshIFC.numLayers == 3);
/* Common synchronization steps */
ss->osd_compute = U.opensubdiv_compute_type;
if (ss->skip_grids == false) {
/* Make sure OSD evaluator is up-to-date. */
if (opensubdiv_ensureEvaluator(ss)) {
/* Update coarse points in the OpenSubdiv evaluator. */
opensubdiv_updateEvaluatorCoarsePositions(ss);
/* Evaluate opensubdiv mesh into the CCG grids. */
opensubdiv_evaluateGrids(ss);
}
}
else {
BLI_assert(ss->meshIFC.numLayers == 3);
}
# ifdef DUMP_RESULT_GRIDS
ccgSubSurf__dumpCoords(ss);
# endif
}
void ccgSubSurf_free_osd_mesh(CCGSubSurf *ss)
{
if (ss->osd_mesh != NULL) {
ccgSubSurf__delete_osdGLMesh(ss->osd_mesh);
ss->osd_mesh = NULL;
}
if (ss->osd_vao != 0) {
glDeleteVertexArrays(1, &ss->osd_vao);
ss->osd_vao = 0;
}
}
void ccgSubSurf_getMinMax(CCGSubSurf *ss, float r_min[3], float r_max[3])
{
int i;
BLI_assert(ss->skip_grids == true);
if (ss->osd_num_coarse_coords == 0) {
zero_v3(r_min);
zero_v3(r_max);
}
for (i = 0; i < ss->osd_num_coarse_coords; i++) {
/* Coarse coordinates has normals interleaved into the array. */
DO_MINMAX(ss->osd_coarse_coords[2 * i], r_min, r_max);
}
}
/* ** Delayed delete routines ** */
typedef struct OsdDeletePendingItem {
struct OsdDeletePendingItem *next, *prev;
OpenSubdiv_GLMesh *osd_mesh;
unsigned int vao;
} OsdDeletePendingItem;
static SpinLock delete_spin;
static ListBase delete_pool = {NULL, NULL};
static void delete_pending_push(OpenSubdiv_GLMesh *osd_mesh, unsigned int vao)
{
OsdDeletePendingItem *new_entry = MEM_mallocN(sizeof(OsdDeletePendingItem),
"opensubdiv delete entry");
new_entry->osd_mesh = osd_mesh;
new_entry->vao = vao;
BLI_spin_lock(&delete_spin);
BLI_addtail(&delete_pool, new_entry);
BLI_spin_unlock(&delete_spin);
}
void ccgSubSurf__delete_osdGLMesh(OpenSubdiv_GLMesh *osd_mesh)
{
if (BLI_thread_is_main()) {
openSubdiv_deleteOsdGLMesh(osd_mesh);
}
else {
delete_pending_push(osd_mesh, 0);
}
}
void ccgSubSurf__delete_vertex_array(unsigned int vao)
{
if (BLI_thread_is_main()) {
glDeleteVertexArrays(1, &vao);
}
else {
delete_pending_push(NULL, vao);
}
}
void ccgSubSurf__delete_pending(void)
{
OsdDeletePendingItem *entry;
BLI_assert(BLI_thread_is_main());
BLI_spin_lock(&delete_spin);
for (entry = delete_pool.first; entry != NULL; entry = entry->next) {
if (entry->osd_mesh != NULL) {
openSubdiv_deleteOsdGLMesh(entry->osd_mesh);
}
if (entry->vao != 0) {
glDeleteVertexArrays(1, &entry->vao);
}
}
BLI_freelistN(&delete_pool);
BLI_spin_unlock(&delete_spin);
}
void ccgSubSurf__sync_subdivUvs(CCGSubSurf *ss, bool subdiv_uvs)
{
ss->osd_subdiv_uvs = subdiv_uvs;
}
/* ** Public API ** */
void BKE_subsurf_osd_init(void)
{
openSubdiv_init();
BLI_spin_init(&delete_spin);
}
void BKE_subsurf_free_unused_buffers(void)
{
ccgSubSurf__delete_pending();
}
void BKE_subsurf_osd_cleanup(void)
{
openSubdiv_cleanup();
ccgSubSurf__delete_pending();
BLI_spin_end(&delete_spin);
}
#endif /* WITH_OPENSUBDIV */

View File

@ -1,777 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bke
*/
#ifdef WITH_OPENSUBDIV
# include <stdlib.h>
# include "BLI_sys_types.h" // for intptr_t support
# include "MEM_guardedalloc.h"
# include "BLI_math.h"
# include "BLI_utildefines.h" /* for BLI_assert */
# include "CCGSubSurf.h"
# include "CCGSubSurf_intern.h"
# include "BKE_DerivedMesh.h"
# include "BKE_mesh_mapping.h"
# include "opensubdiv_capi.h"
# include "opensubdiv_converter_capi.h"
/* Use mesh element mapping structures during conversion.
* Uses more memory but is much faster than naive algorithm.
*/
# define USE_MESH_ELEMENT_MAPPING
/**
* Converter from DerivedMesh.
*/
typedef struct ConvDMStorage {
CCGSubSurf *ss;
DerivedMesh *dm;
# ifdef USE_MESH_ELEMENT_MAPPING
MeshElemMap *vert_edge_map, *vert_poly_map, *edge_poly_map;
int *vert_edge_mem, *vert_poly_mem, *edge_poly_mem;
# endif
MVert *mvert;
MEdge *medge;
MLoop *mloop;
MPoly *mpoly;
MeshIslandStore island_store;
int num_uvs;
float *uvs;
int *face_uvs;
} ConvDMStorage;
static OpenSubdiv_SchemeType conv_dm_get_type(const OpenSubdiv_Converter *converter)
{
ConvDMStorage *storage = converter->user_data;
if (storage->ss->meshIFC.simpleSubdiv) {
return OSD_SCHEME_BILINEAR;
}
else {
return OSD_SCHEME_CATMARK;
}
}
static OpenSubdiv_VtxBoundaryInterpolation conv_dm_get_vtx_boundary_interpolation(
const OpenSubdiv_Converter *UNUSED(converter))
{
return OSD_VTX_BOUNDARY_EDGE_ONLY;
}
static OpenSubdiv_FVarLinearInterpolation conv_dm_get_fvar_linear_interpolation(
const OpenSubdiv_Converter *converter)
{
ConvDMStorage *storage = converter->user_data;
if (storage->ss->osd_subdiv_uvs) {
return OSD_FVAR_LINEAR_INTERPOLATION_CORNERS_ONLY;
}
return OSD_FVAR_LINEAR_INTERPOLATION_ALL;
}
static bool conv_dm_specifies_full_topology(const OpenSubdiv_Converter *UNUSED(converter))
{
return true;
}
static int conv_dm_get_num_faces(const OpenSubdiv_Converter *converter)
{
ConvDMStorage *storage = converter->user_data;
DerivedMesh *dm = storage->dm;
return dm->getNumPolys(dm);
}
static int conv_dm_get_num_edges(const OpenSubdiv_Converter *converter)
{
ConvDMStorage *storage = converter->user_data;
DerivedMesh *dm = storage->dm;
return dm->getNumEdges(dm);
}
static int conv_dm_get_num_verts(const OpenSubdiv_Converter *converter)
{
ConvDMStorage *storage = converter->user_data;
DerivedMesh *dm = storage->dm;
return dm->getNumVerts(dm);
}
static int conv_dm_get_num_face_verts(const OpenSubdiv_Converter *converter, int face)
{
ConvDMStorage *storage = converter->user_data;
const MPoly *mpoly = &storage->mpoly[face];
return mpoly->totloop;
}
static void conv_dm_get_face_verts(const OpenSubdiv_Converter *converter,
int face,
int *face_verts)
{
ConvDMStorage *storage = converter->user_data;
const MPoly *mpoly = &storage->mpoly[face];
int loop;
for (loop = 0; loop < mpoly->totloop; loop++) {
face_verts[loop] = storage->mloop[mpoly->loopstart + loop].v;
}
}
static void conv_dm_get_face_edges(const OpenSubdiv_Converter *converter,
int face,
int *face_edges)
{
ConvDMStorage *storage = converter->user_data;
const MPoly *mpoly = &storage->mpoly[face];
int loop;
for (loop = 0; loop < mpoly->totloop; loop++) {
face_edges[loop] = storage->mloop[mpoly->loopstart + loop].e;
}
}
static void conv_dm_get_edge_verts(const OpenSubdiv_Converter *converter,
int edge,
int *edge_verts)
{
ConvDMStorage *storage = converter->user_data;
const MEdge *medge = &storage->medge[edge];
edge_verts[0] = medge->v1;
edge_verts[1] = medge->v2;
}
static int conv_dm_get_num_edge_faces(const OpenSubdiv_Converter *converter, int edge)
{
ConvDMStorage *storage = converter->user_data;
# ifndef USE_MESH_ELEMENT_MAPPING
DerivedMesh *dm = storage->dm;
int num = 0, poly;
for (poly = 0; poly < dm->getNumPolys(dm); poly++) {
const MPoly *mpoly = &user_data->mpoly[poly];
int loop;
for (loop = 0; loop < mpoly->totloop; loop++) {
const MLoop *mloop = &storage->mloop[mpoly->loopstart + loop];
if (mloop->e == edge) {
num++;
break;
}
}
}
return num;
# else
return storage->edge_poly_map[edge].count;
# endif
}
static void conv_dm_get_edge_faces(const OpenSubdiv_Converter *converter,
int edge,
int *edge_faces)
{
ConvDMStorage *storage = converter->user_data;
# ifndef USE_MESH_ELEMENT_MAPPING
DerivedMesh *dm = storage->dm;
int num = 0, poly;
for (poly = 0; poly < dm->getNumPolys(dm); poly++) {
const MPoly *mpoly = &user_data->mpoly[poly];
int loop;
for (loop = 0; loop < mpoly->totloop; loop++) {
const MLoop *mloop = &storage->mloop[mpoly->loopstart + loop];
if (mloop->e == edge) {
edge_faces[num++] = poly;
break;
}
}
}
# else
memcpy(edge_faces,
storage->edge_poly_map[edge].indices,
sizeof(int) * storage->edge_poly_map[edge].count);
# endif
}
static float conv_dm_get_edge_sharpness(const OpenSubdiv_Converter *converter, int edge)
{
ConvDMStorage *storage = converter->user_data;
CCGSubSurf *ss = storage->ss;
const MEdge *medge = storage->medge;
return (float)medge[edge].crease / 255.0f * ss->subdivLevels;
}
static int conv_dm_get_num_vert_edges(const OpenSubdiv_Converter *converter, int vert)
{
ConvDMStorage *storage = converter->user_data;
# ifndef USE_MESH_ELEMENT_MAPPING
DerivedMesh *dm = storage->dm;
int num = 0, edge;
for (edge = 0; edge < dm->getNumEdges(dm); edge++) {
const MEdge *medge = &user_data->medge[edge];
if (medge->v1 == vert || medge->v2 == vert) {
num++;
}
}
return num;
# else
return storage->vert_edge_map[vert].count;
# endif
}
static void conv_dm_get_vert_edges(const OpenSubdiv_Converter *converter,
int vert,
int *vert_edges)
{
ConvDMStorage *storage = converter->user_data;
# ifndef USE_MESH_ELEMENT_MAPPING
DerivedMesh *dm = storage->dm;
int num = 0, edge;
for (edge = 0; edge < dm->getNumEdges(dm); edge++) {
const MEdge *medge = &user_data->medge[edge];
if (medge->v1 == vert || medge->v2 == vert) {
vert_edges[num++] = edge;
}
}
# else
memcpy(vert_edges,
storage->vert_edge_map[vert].indices,
sizeof(int) * storage->vert_edge_map[vert].count);
# endif
}
static int conv_dm_get_num_vert_faces(const OpenSubdiv_Converter *converter, int vert)
{
ConvDMStorage *storage = converter->user_data;
# ifndef USE_MESH_ELEMENT_MAPPING
DerivedMesh *dm = storage->dm;
int num = 0, poly;
for (poly = 0; poly < dm->getNumPolys(dm); poly++) {
const MPoly *mpoly = &user_data->mpoly[poly];
int loop;
for (loop = 0; loop < mpoly->totloop; loop++) {
const MLoop *mloop = &storage->mloop[mpoly->loopstart + loop];
if (mloop->v == vert) {
num++;
break;
}
}
}
return num;
# else
return storage->vert_poly_map[vert].count;
# endif
}
static void conv_dm_get_vert_faces(const OpenSubdiv_Converter *converter,
int vert,
int *vert_faces)
{
ConvDMStorage *storage = converter->user_data;
# ifndef USE_MESH_ELEMENT_MAPPING
DerivedMesh *dm = storage->dm;
int num = 0, poly;
for (poly = 0; poly < dm->getNumPolys(dm); poly++) {
const MPoly *mpoly = &storage->mpoly[poly];
int loop;
for (loop = 0; loop < mpoly->totloop; loop++) {
const MLoop *mloop = &storage->mloop[mpoly->loopstart + loop];
if (mloop->v == vert) {
vert_faces[num++] = poly;
break;
}
}
}
# else
memcpy(vert_faces,
storage->vert_poly_map[vert].indices,
sizeof(int) * storage->vert_poly_map[vert].count);
# endif
}
static bool conv_dm_is_infinite_sharp_vertex(const OpenSubdiv_Converter *UNUSED(converter),
int UNUSED(manifold_vertex_index))
{
return false;
}
static float conv_dm_get_vertex_sharpness(const OpenSubdiv_Converter *UNUSED(converter),
int UNUSED(manifold_vertex_index))
{
return 0.0f;
}
static int conv_dm_get_num_uv_layers(const OpenSubdiv_Converter *converter)
{
ConvDMStorage *storage = converter->user_data;
DerivedMesh *dm = storage->dm;
int num_uv_layers = CustomData_number_of_layers(&dm->loopData, CD_MLOOPUV);
return num_uv_layers;
}
static void conv_dm_precalc_uv_layer(const OpenSubdiv_Converter *converter, int layer)
{
ConvDMStorage *storage = converter->user_data;
DerivedMesh *dm = storage->dm;
const MLoopUV *mloopuv = CustomData_get_layer_n(&dm->loopData, CD_MLOOPUV, layer);
const int num_loops = dm->getNumLoops(dm);
/* Initialize memory required for the operations. */
if (storage->uvs == NULL) {
storage->uvs = MEM_mallocN(sizeof(float) * 2 * num_loops, "osd uvs");
}
if (storage->face_uvs == NULL) {
storage->face_uvs = MEM_mallocN(sizeof(int) * num_loops, "osd face uvs");
}
/* Calculate islands connectivity of the UVs. */
BKE_mesh_calc_islands_loop_poly_uvmap(storage->mvert,
dm->getNumVerts(dm),
storage->medge,
dm->getNumEdges(dm),
storage->mpoly,
dm->getNumPolys(dm),
storage->mloop,
dm->getNumLoops(dm),
mloopuv,
&storage->island_store);
/* Here we "weld" duplicated vertices from island to the same UV value.
* The idea here is that we need to pass individual islands to OpenSubdiv.
*/
storage->num_uvs = 0;
for (int island = 0; island < storage->island_store.islands_num; island++) {
MeshElemMap *island_poly_map = storage->island_store.islands[island];
for (int poly = 0; poly < island_poly_map->count; poly++) {
int poly_index = island_poly_map->indices[poly];
/* Within the same UV island we should share UV points across
* loops. Otherwise each poly will be subdivided individually
* which we don't really want.
*/
const MPoly *mpoly = &storage->mpoly[poly_index];
for (int loop = 0; loop < mpoly->totloop; loop++) {
const MLoopUV *luv = &mloopuv[mpoly->loopstart + loop];
bool found = false;
/* TODO(sergey): Quite bad loop, which gives us O(N^2)
* complexity here. But how can we do it smarter, hopefully
* without requiring lots of additional memory.
*/
for (int i = 0; i < storage->num_uvs; i++) {
if (equals_v2v2(luv->uv, &storage->uvs[2 * i])) {
storage->face_uvs[mpoly->loopstart + loop] = i;
found = true;
break;
}
}
if (!found) {
copy_v2_v2(&storage->uvs[2 * storage->num_uvs], luv->uv);
storage->face_uvs[mpoly->loopstart + loop] = storage->num_uvs;
++storage->num_uvs;
}
}
}
}
}
static void conv_dm_finish_uv_layer(const OpenSubdiv_Converter *converter)
{
ConvDMStorage *storage = converter->user_data;
BKE_mesh_loop_islands_free(&storage->island_store);
}
static int conv_dm_get_num_uvs(const OpenSubdiv_Converter *converter)
{
ConvDMStorage *storage = converter->user_data;
return storage->num_uvs;
}
static int conv_dm_get_face_corner_uv_index(const OpenSubdiv_Converter *converter,
int face,
int corner)
{
ConvDMStorage *storage = converter->user_data;
const MPoly *mpoly = &storage->mpoly[face];
return storage->face_uvs[mpoly->loopstart + corner];
}
static void conv_dm_free_user_data(const OpenSubdiv_Converter *converter)
{
ConvDMStorage *user_data = converter->user_data;
if (user_data->uvs != NULL) {
MEM_freeN(user_data->uvs);
}
if (user_data->face_uvs != NULL) {
MEM_freeN(user_data->face_uvs);
}
# ifdef USE_MESH_ELEMENT_MAPPING
MEM_freeN(user_data->vert_edge_map);
MEM_freeN(user_data->vert_edge_mem);
MEM_freeN(user_data->vert_poly_map);
MEM_freeN(user_data->vert_poly_mem);
MEM_freeN(user_data->edge_poly_map);
MEM_freeN(user_data->edge_poly_mem);
# endif
MEM_freeN(user_data);
}
void ccgSubSurf_converter_setup_from_derivedmesh(CCGSubSurf *ss,
DerivedMesh *dm,
OpenSubdiv_Converter *converter)
{
ConvDMStorage *user_data;
converter->getSchemeType = conv_dm_get_type;
converter->getVtxBoundaryInterpolation = conv_dm_get_vtx_boundary_interpolation;
converter->getFVarLinearInterpolation = conv_dm_get_fvar_linear_interpolation;
converter->specifiesFullTopology = conv_dm_specifies_full_topology;
converter->getNumFaces = conv_dm_get_num_faces;
converter->getNumEdges = conv_dm_get_num_edges;
converter->getNumVertices = conv_dm_get_num_verts;
converter->getNumFaceVertices = conv_dm_get_num_face_verts;
converter->getFaceVertices = conv_dm_get_face_verts;
converter->getFaceEdges = conv_dm_get_face_edges;
converter->getEdgeVertices = conv_dm_get_edge_verts;
converter->getNumEdgeFaces = conv_dm_get_num_edge_faces;
converter->getEdgeFaces = conv_dm_get_edge_faces;
converter->getEdgeSharpness = conv_dm_get_edge_sharpness;
converter->getNumVertexEdges = conv_dm_get_num_vert_edges;
converter->getVertexEdges = conv_dm_get_vert_edges;
converter->getNumVertexFaces = conv_dm_get_num_vert_faces;
converter->getVertexFaces = conv_dm_get_vert_faces;
converter->isInfiniteSharpVertex = conv_dm_is_infinite_sharp_vertex;
converter->getVertexSharpness = conv_dm_get_vertex_sharpness;
converter->getNumUVLayers = conv_dm_get_num_uv_layers;
converter->precalcUVLayer = conv_dm_precalc_uv_layer;
converter->finishUVLayer = conv_dm_finish_uv_layer;
converter->getNumUVCoordinates = conv_dm_get_num_uvs;
converter->getFaceCornerUVIndex = conv_dm_get_face_corner_uv_index;
user_data = MEM_mallocN(sizeof(ConvDMStorage), __func__);
user_data->ss = ss;
user_data->dm = dm;
user_data->mvert = dm->getVertArray(dm);
user_data->medge = dm->getEdgeArray(dm);
user_data->mloop = dm->getLoopArray(dm);
user_data->mpoly = dm->getPolyArray(dm);
memset(&user_data->island_store, 0, sizeof(user_data->island_store));
user_data->uvs = NULL;
user_data->face_uvs = NULL;
converter->freeUserData = conv_dm_free_user_data;
converter->user_data = user_data;
# ifdef USE_MESH_ELEMENT_MAPPING
{
const MEdge *medge = dm->getEdgeArray(dm);
const MLoop *mloop = dm->getLoopArray(dm);
const MPoly *mpoly = dm->getPolyArray(dm);
const int num_vert = dm->getNumVerts(dm), num_edge = dm->getNumEdges(dm),
num_loop = dm->getNumLoops(dm), num_poly = dm->getNumPolys(dm);
BKE_mesh_vert_edge_map_create(
&user_data->vert_edge_map, &user_data->vert_edge_mem, medge, num_vert, num_edge);
BKE_mesh_vert_poly_map_create(&user_data->vert_poly_map,
&user_data->vert_poly_mem,
mpoly,
mloop,
num_vert,
num_poly,
num_loop);
BKE_mesh_edge_poly_map_create(&user_data->edge_poly_map,
&user_data->edge_poly_mem,
medge,
num_edge,
mpoly,
num_poly,
mloop,
num_loop);
}
# endif /* USE_MESH_ELEMENT_MAPPING */
}
/**
* Converter from CCGSubSurf
*/
static OpenSubdiv_SchemeType conv_ccg_get_bilinear_type(const OpenSubdiv_Converter *converter)
{
CCGSubSurf *ss = converter->user_data;
if (ss->meshIFC.simpleSubdiv) {
return OSD_SCHEME_BILINEAR;
}
else {
return OSD_SCHEME_CATMARK;
}
}
static OpenSubdiv_VtxBoundaryInterpolation conv_ccg_get_vtx_boundary_interpolation(
const OpenSubdiv_Converter *UNUSED(converter))
{
return OSD_VTX_BOUNDARY_EDGE_ONLY;
}
static OpenSubdiv_FVarLinearInterpolation conv_ccg_get_fvar_linear_interpolation(
const OpenSubdiv_Converter *converter)
{
CCGSubSurf *ss = converter->user_data;
if (ss->osd_subdiv_uvs) {
return OSD_FVAR_LINEAR_INTERPOLATION_CORNERS_ONLY;
}
return OSD_FVAR_LINEAR_INTERPOLATION_ALL;
}
static bool conv_ccg_specifies_full_topology(const OpenSubdiv_Converter *UNUSED(converter))
{
return true;
}
static int conv_ccg_get_num_faces(const OpenSubdiv_Converter *converter)
{
CCGSubSurf *ss = converter->user_data;
return ss->fMap->numEntries;
}
static int conv_ccg_get_num_edges(const OpenSubdiv_Converter *converter)
{
CCGSubSurf *ss = converter->user_data;
return ss->eMap->numEntries;
}
static int conv_ccg_get_num_verts(const OpenSubdiv_Converter *converter)
{
CCGSubSurf *ss = converter->user_data;
return ss->vMap->numEntries;
}
static int conv_ccg_get_num_face_verts(const OpenSubdiv_Converter *converter, int face)
{
CCGSubSurf *ss = converter->user_data;
CCGFace *ccg_face = ccgSubSurf_getFace(ss, POINTER_FROM_INT(face));
return ccgSubSurf_getFaceNumVerts(ccg_face);
}
static void conv_ccg_get_face_verts(const OpenSubdiv_Converter *converter,
int face,
int *face_verts)
{
CCGSubSurf *ss = converter->user_data;
CCGFace *ccg_face = ccgSubSurf_getFace(ss, POINTER_FROM_INT(face));
int num_face_verts = ccgSubSurf_getFaceNumVerts(ccg_face);
int loop;
for (loop = 0; loop < num_face_verts; loop++) {
CCGVert *ccg_vert = ccgSubSurf_getFaceVert(ccg_face, loop);
face_verts[loop] = POINTER_AS_INT(ccgSubSurf_getVertVertHandle(ccg_vert));
}
}
static void conv_ccg_get_face_edges(const OpenSubdiv_Converter *converter,
int face,
int *face_edges)
{
CCGSubSurf *ss = converter->user_data;
CCGFace *ccg_face = ccgSubSurf_getFace(ss, POINTER_FROM_INT(face));
int num_face_verts = ccgSubSurf_getFaceNumVerts(ccg_face);
int loop;
for (loop = 0; loop < num_face_verts; loop++) {
CCGEdge *ccg_edge = ccgSubSurf_getFaceEdge(ccg_face, loop);
face_edges[loop] = POINTER_AS_INT(ccgSubSurf_getEdgeEdgeHandle(ccg_edge));
}
}
static void conv_ccg_get_edge_verts(const OpenSubdiv_Converter *converter,
int edge,
int *edge_verts)
{
CCGSubSurf *ss = converter->user_data;
CCGEdge *ccg_edge = ccgSubSurf_getEdge(ss, POINTER_FROM_INT(edge));
CCGVert *ccg_vert0 = ccgSubSurf_getEdgeVert0(ccg_edge);
CCGVert *ccg_vert1 = ccgSubSurf_getEdgeVert1(ccg_edge);
edge_verts[0] = POINTER_AS_INT(ccgSubSurf_getVertVertHandle(ccg_vert0));
edge_verts[1] = POINTER_AS_INT(ccgSubSurf_getVertVertHandle(ccg_vert1));
}
static int conv_ccg_get_num_edge_faces(const OpenSubdiv_Converter *converter, int edge)
{
CCGSubSurf *ss = converter->user_data;
CCGEdge *ccg_edge = ccgSubSurf_getEdge(ss, POINTER_FROM_INT(edge));
return ccgSubSurf_getEdgeNumFaces(ccg_edge);
}
static void conv_ccg_get_edge_faces(const OpenSubdiv_Converter *converter,
int edge,
int *edge_faces)
{
CCGSubSurf *ss = converter->user_data;
CCGEdge *ccg_edge = ccgSubSurf_getEdge(ss, POINTER_FROM_INT(edge));
int num_edge_faces = ccgSubSurf_getEdgeNumFaces(ccg_edge);
int face;
for (face = 0; face < num_edge_faces; face++) {
CCGFace *ccg_face = ccgSubSurf_getEdgeFace(ccg_edge, face);
edge_faces[face] = POINTER_AS_INT(ccgSubSurf_getFaceFaceHandle(ccg_face));
}
}
static float conv_ccg_get_edge_sharpness(const OpenSubdiv_Converter *converter, int edge)
{
CCGSubSurf *ss = converter->user_data;
CCGEdge *ccg_edge = ccgSubSurf_getEdge(ss, POINTER_FROM_INT(edge));
/* TODO(sergey): Multiply by subdivision level once CPU evaluator
* is switched to uniform subdivision type.
*/
return ccg_edge->crease;
}
static int conv_ccg_get_num_vert_edges(const OpenSubdiv_Converter *converter, int vert)
{
CCGSubSurf *ss = converter->user_data;
CCGVert *ccg_vert = ccgSubSurf_getVert(ss, POINTER_FROM_INT(vert));
return ccgSubSurf_getVertNumEdges(ccg_vert);
}
static void conv_ccg_get_vert_edges(const OpenSubdiv_Converter *converter,
int vert,
int *vert_edges)
{
CCGSubSurf *ss = converter->user_data;
CCGVert *ccg_vert = ccgSubSurf_getVert(ss, POINTER_FROM_INT(vert));
int num_vert_edges = ccgSubSurf_getVertNumEdges(ccg_vert);
int edge;
for (edge = 0; edge < num_vert_edges; edge++) {
CCGEdge *ccg_edge = ccgSubSurf_getVertEdge(ccg_vert, edge);
vert_edges[edge] = POINTER_AS_INT(ccgSubSurf_getEdgeEdgeHandle(ccg_edge));
}
}
static int conv_ccg_get_num_vert_faces(const OpenSubdiv_Converter *converter, int vert)
{
CCGSubSurf *ss = converter->user_data;
CCGVert *ccg_vert = ccgSubSurf_getVert(ss, POINTER_FROM_INT(vert));
return ccgSubSurf_getVertNumFaces(ccg_vert);
}
static void conv_ccg_get_vert_faces(const OpenSubdiv_Converter *converter,
int vert,
int *vert_faces)
{
CCGSubSurf *ss = converter->user_data;
CCGVert *ccg_vert = ccgSubSurf_getVert(ss, POINTER_FROM_INT(vert));
int num_vert_faces = ccgSubSurf_getVertNumFaces(ccg_vert);
int face;
for (face = 0; face < num_vert_faces; face++) {
CCGFace *ccg_face = ccgSubSurf_getVertFace(ccg_vert, face);
vert_faces[face] = POINTER_AS_INT(ccgSubSurf_getFaceFaceHandle(ccg_face));
}
}
static bool conv_ccg_is_infinite_sharp_vertex(const OpenSubdiv_Converter *UNUSED(converter),
int UNUSED(manifold_vertex_index))
{
return false;
}
static float conv_ccg_get_vertex_sharpness(const OpenSubdiv_Converter *UNUSED(converter),
int UNUSED(manifold_vertex_index))
{
return 0.0f;
}
static int conv_ccg_get_num_uv_layers(const OpenSubdiv_Converter *UNUSED(converter))
{
return 0;
}
static void conv_ccg_precalc_uv_layer(const OpenSubdiv_Converter *UNUSED(converter),
int UNUSED(layer))
{
}
static void conv_ccg_finish_uv_layer(const OpenSubdiv_Converter *UNUSED(converter))
{
}
static int conv_ccg_get_num_uvs(const OpenSubdiv_Converter *UNUSED(converter))
{
return 0;
}
static int conv_ccg_get_face_corner_uv_index(const OpenSubdiv_Converter *UNUSED(converter),
int UNUSED(face),
int UNUSED(corner_))
{
return 0;
}
void ccgSubSurf_converter_setup_from_ccg(CCGSubSurf *ss, OpenSubdiv_Converter *converter)
{
converter->getSchemeType = conv_ccg_get_bilinear_type;
converter->getVtxBoundaryInterpolation = conv_ccg_get_vtx_boundary_interpolation;
converter->getFVarLinearInterpolation = conv_ccg_get_fvar_linear_interpolation;
converter->specifiesFullTopology = conv_ccg_specifies_full_topology;
converter->getNumFaces = conv_ccg_get_num_faces;
converter->getNumEdges = conv_ccg_get_num_edges;
converter->getNumVertices = conv_ccg_get_num_verts;
converter->getNumFaceVertices = conv_ccg_get_num_face_verts;
converter->getFaceVertices = conv_ccg_get_face_verts;
converter->getFaceEdges = conv_ccg_get_face_edges;
converter->getEdgeVertices = conv_ccg_get_edge_verts;
converter->getNumEdgeFaces = conv_ccg_get_num_edge_faces;
converter->getEdgeFaces = conv_ccg_get_edge_faces;
converter->getEdgeSharpness = conv_ccg_get_edge_sharpness;
converter->getNumVertexEdges = conv_ccg_get_num_vert_edges;
converter->getVertexEdges = conv_ccg_get_vert_edges;
converter->getNumVertexFaces = conv_ccg_get_num_vert_faces;
converter->getVertexFaces = conv_ccg_get_vert_faces;
converter->isInfiniteSharpVertex = conv_ccg_is_infinite_sharp_vertex;
converter->getVertexSharpness = conv_ccg_get_vertex_sharpness;
converter->getNumUVLayers = conv_ccg_get_num_uv_layers;
converter->precalcUVLayer = conv_ccg_precalc_uv_layer;
converter->finishUVLayer = conv_ccg_finish_uv_layer;
converter->getNumUVCoordinates = conv_ccg_get_num_uvs;
converter->getFaceCornerUVIndex = conv_ccg_get_face_corner_uv_index;
converter->freeUserData = NULL;
converter->user_data = ss;
}
void ccgSubSurf_converter_free(struct OpenSubdiv_Converter *converter)
{
if (converter->freeUserData) {
converter->freeUserData(converter);
}
}
#endif /* WITH_OPENSUBDIV */

View File

@ -38,6 +38,18 @@
#include "opensubdiv_evaluator_capi.h"
#include "opensubdiv_topology_refiner_capi.h"
/* =================----====--===== MODULE ==========================------== */
void BKE_subdiv_init()
{
openSubdiv_init();
}
void BKE_subdiv_exit()
{
openSubdiv_cleanup();
}
/* ========================== CONVERSION HELPERS ============================ */
eSubdivFVarLinearInterpolation BKE_subdiv_fvar_interpolation_from_uv_smooth(int uv_smooth)

View File

@ -71,18 +71,13 @@
#include "CCGSubSurf.h"
#ifdef WITH_OPENSUBDIV
# include "opensubdiv_capi.h"
#endif
/* assumes MLoop's are laid out 4 for each poly, in order */
#define USE_LOOP_LAYOUT_FAST
static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
int drawInteriorEdges,
int useSubsurfUv,
DerivedMesh *dm,
bool use_gpu_backend);
DerivedMesh *dm);
///
static void *arena_alloc(CCGAllocatorHDL a, int numBytes)
@ -404,82 +399,6 @@ static int ss_sync_from_uv(CCGSubSurf *ss, CCGSubSurf *origss, DerivedMesh *dm,
return 1;
}
#ifdef WITH_OPENSUBDIV
static void UNUSED_FUNCTION(set_subsurf_osd_ccg_uv)(CCGSubSurf *ss,
DerivedMesh *dm,
DerivedMesh *result,
int layer_index)
{
CCGFace **faceMap;
MTFace *tf;
MLoopUV *mluv;
CCGFaceIterator fi;
int index, gridSize, gridFaces, totface, x, y, S;
MLoopUV *dmloopuv = CustomData_get_layer_n(&dm->loopData, CD_MLOOPUV, layer_index);
/* need to update both CD_MTFACE & CD_MLOOPUV, hrmf, we could get away with
* just tface except applying the modifier then looses subsurf UV */
MTFace *tface = CustomData_get_layer_n(&result->faceData, CD_MTFACE, layer_index);
MLoopUV *mloopuv = CustomData_get_layer_n(&result->loopData, CD_MLOOPUV, layer_index);
if (dmloopuv == NULL || (tface == NULL && mloopuv == NULL)) {
return;
}
ccgSubSurf_evaluatorSetFVarUV(ss, dm, layer_index);
/* get some info from CCGSubSurf */
totface = ccgSubSurf_getNumFaces(ss);
gridSize = ccgSubSurf_getGridSize(ss);
gridFaces = gridSize - 1;
/* make a map from original faces to CCGFaces */
faceMap = MEM_mallocN(totface * sizeof(*faceMap), "facemapuv");
for (ccgSubSurf_initFaceIterator(ss, &fi); !ccgFaceIterator_isStopped(&fi);
ccgFaceIterator_next(&fi)) {
CCGFace *f = ccgFaceIterator_getCurrent(&fi);
faceMap[POINTER_AS_INT(ccgSubSurf_getFaceFaceHandle(f))] = f;
}
/* load coordinates from uvss into tface */
tf = tface;
mluv = mloopuv;
for (index = 0; index < totface; index++) {
CCGFace *f = faceMap[index];
int numVerts = ccgSubSurf_getFaceNumVerts(f);
for (S = 0; S < numVerts; S++) {
for (y = 0; y < gridFaces; y++) {
for (x = 0; x < gridFaces; x++) {
const int delta[4][2] = {{0, 0}, {0, 1}, {1, 1}, {1, 0}};
float uv[4][2];
int i;
for (i = 0; i < 4; i++) {
const int dx = delta[i][0], dy = delta[i][1];
const float grid_u = ((float)(x + dx)) / (gridSize - 1),
grid_v = ((float)(y + dy)) / (gridSize - 1);
ccgSubSurf_evaluatorFVarUV(ss, index, S, grid_u, grid_v, uv[i]);
}
if (tf) {
copy_v2_v2(tf->uv[0], uv[0]);
copy_v2_v2(tf->uv[1], uv[1]);
copy_v2_v2(tf->uv[2], uv[2]);
copy_v2_v2(tf->uv[3], uv[3]);
tf++;
}
if (mluv) {
copy_v2_v2(mluv[0].uv, uv[0]);
copy_v2_v2(mluv[1].uv, uv[1]);
copy_v2_v2(mluv[2].uv, uv[2]);
copy_v2_v2(mluv[3].uv, uv[3]);
mluv += 4;
}
}
}
}
}
MEM_freeN(faceMap);
}
#endif /* WITH_OPENSUBDIV */
static void set_subsurf_legacy_uv(CCGSubSurf *ss, DerivedMesh *dm, DerivedMesh *result, int n)
{
CCGSubSurf *uvss;
@ -564,16 +483,7 @@ static void set_subsurf_legacy_uv(CCGSubSurf *ss, DerivedMesh *dm, DerivedMesh *
static void set_subsurf_uv(CCGSubSurf *ss, DerivedMesh *dm, DerivedMesh *result, int layer_index)
{
#ifdef WITH_OPENSUBDIV
if (!ccgSubSurf_needGrids(ss)) {
/* GPU backend is used, no need to evaluate UVs on CPU. */
/* TODO(sergey): Think of how to support edit mode of UVs. */
}
else
#endif
{
set_subsurf_legacy_uv(ss, dm, result, layer_index);
}
set_subsurf_legacy_uv(ss, dm, result, layer_index);
}
/* face weighting */
@ -763,40 +673,13 @@ static void ss_sync_ccg_from_derivedmesh(CCGSubSurf *ss,
#endif
}
#ifdef WITH_OPENSUBDIV
static void ss_sync_osd_from_derivedmesh(CCGSubSurf *ss, DerivedMesh *dm)
{
ccgSubSurf_initFullSync(ss);
ccgSubSurf_prepareTopologyRefiner(ss, dm);
ccgSubSurf_processSync(ss);
}
#endif /* WITH_OPENSUBDIV */
static void ss_sync_from_derivedmesh(CCGSubSurf *ss,
DerivedMesh *dm,
float (*vertexCos)[3],
int use_flat_subdiv,
bool use_subdiv_uvs)
bool UNUSED(use_subdiv_uvs))
{
#ifndef WITH_OPENSUBDIV
UNUSED_VARS(use_subdiv_uvs);
#endif
#ifdef WITH_OPENSUBDIV
/* Reset all related descriptors if actual mesh topology changed or if
* other evaluation-related settings changed.
*/
if (!ccgSubSurf_needGrids(ss)) {
/* TODO(sergey): Use vertex coordinates and flat subdiv flag. */
ccgSubSurf__sync_subdivUvs(ss, use_subdiv_uvs);
ccgSubSurf_checkTopologyChanged(ss, dm);
ss_sync_osd_from_derivedmesh(ss, dm);
}
else
#endif
{
ss_sync_ccg_from_derivedmesh(ss, dm, vertexCos, use_flat_subdiv);
}
ss_sync_ccg_from_derivedmesh(ss, dm, vertexCos, use_flat_subdiv);
}
/***/
@ -850,13 +733,6 @@ static void UNUSED_FUNCTION(ccgDM_getMinMax)(DerivedMesh *dm, float r_min[3], fl
int i, edgeSize = ccgSubSurf_getEdgeSize(ss);
int gridSize = ccgSubSurf_getGridSize(ss);
#ifdef WITH_OPENSUBDIV
if (ccgdm->useGpuBackend) {
ccgSubSurf_getMinMax(ccgdm->ss, r_min, r_max);
return;
}
#endif
CCG_key_top_level(&key, ss);
if (!ccgSubSurf_getNumVerts(ss)) {
@ -1642,11 +1518,9 @@ static void ccgDM_release(DerivedMesh *dm)
}
MEM_freeN(ccgdm->edgeFlags);
MEM_freeN(ccgdm->faceFlags);
if (ccgdm->useGpuBackend == false) {
MEM_freeN(ccgdm->vertMap);
MEM_freeN(ccgdm->edgeMap);
MEM_freeN(ccgdm->faceMap);
}
MEM_freeN(ccgdm->vertMap);
MEM_freeN(ccgdm->edgeMap);
MEM_freeN(ccgdm->faceMap);
BLI_mutex_end(&ccgdm->loops_cache_lock);
BLI_rw_mutex_end(&ccgdm->origindex_cache_rwlock);
@ -2417,76 +2291,44 @@ static void set_ccgdm_all_geometry(CCGDerivedMesh *ccgdm,
BLI_assert(faceNum == ccgSubSurf_getNumFinalFaces(ss));
}
/* Fill in only geometry arrays needed for the GPU tessellation. */
static void set_ccgdm_gpu_geometry(CCGDerivedMesh *ccgdm, DerivedMesh *dm)
static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
int drawInteriorEdges,
int useSubsurfUv,
DerivedMesh *dm)
{
const int totface = dm->getNumPolys(dm);
MPoly *mpoly = CustomData_get_layer(&dm->polyData, CD_MPOLY);
int index;
DMFlagMat *faceFlags = ccgdm->faceFlags;
for (index = 0; index < totface; index++) {
faceFlags->flag = mpoly ? mpoly[index].flag : 0;
faceFlags->mat_nr = mpoly ? mpoly[index].mat_nr : 0;
faceFlags++;
}
/* TODO(sergey): Fill in edge flags. */
}
static CCGDerivedMesh *getCCGDerivedMesh(
CCGSubSurf *ss, int drawInteriorEdges, int useSubsurfUv, DerivedMesh *dm, bool use_gpu_backend)
{
#ifdef WITH_OPENSUBDIV
const int totedge = dm->getNumEdges(dm);
const int totface = dm->getNumPolys(dm);
#else
const int totedge = ccgSubSurf_getNumEdges(ss);
const int totface = ccgSubSurf_getNumFaces(ss);
#endif
CCGDerivedMesh *ccgdm = MEM_callocN(sizeof(*ccgdm), "ccgdm");
if (use_gpu_backend == false) {
BLI_assert(totedge == ccgSubSurf_getNumEdges(ss));
BLI_assert(totface == ccgSubSurf_getNumFaces(ss));
DM_from_template(&ccgdm->dm,
dm,
DM_TYPE_CCGDM,
ccgSubSurf_getNumFinalVerts(ss),
ccgSubSurf_getNumFinalEdges(ss),
0,
ccgSubSurf_getNumFinalFaces(ss) * 4,
ccgSubSurf_getNumFinalFaces(ss));
BLI_assert(totedge == ccgSubSurf_getNumEdges(ss));
BLI_assert(totface == ccgSubSurf_getNumFaces(ss));
DM_from_template(&ccgdm->dm,
dm,
DM_TYPE_CCGDM,
ccgSubSurf_getNumFinalVerts(ss),
ccgSubSurf_getNumFinalEdges(ss),
0,
ccgSubSurf_getNumFinalFaces(ss) * 4,
ccgSubSurf_getNumFinalFaces(ss));
CustomData_free_layer_active(&ccgdm->dm.polyData, CD_NORMAL, ccgdm->dm.numPolyData);
CustomData_free_layer_active(&ccgdm->dm.polyData, CD_NORMAL, ccgdm->dm.numPolyData);
ccgdm->reverseFaceMap = MEM_callocN(sizeof(int) * ccgSubSurf_getNumFinalFaces(ss),
"reverseFaceMap");
ccgdm->reverseFaceMap = MEM_callocN(sizeof(int) * ccgSubSurf_getNumFinalFaces(ss),
"reverseFaceMap");
create_ccgdm_maps(ccgdm, ss);
}
else {
DM_from_template(&ccgdm->dm, dm, DM_TYPE_CCGDM, 0, 0, 0, 0, dm->getNumPolys(dm));
CustomData_copy_data(&dm->polyData, &ccgdm->dm.polyData, 0, 0, dm->getNumPolys(dm));
}
create_ccgdm_maps(ccgdm, ss);
set_default_ccgdm_callbacks(ccgdm);
ccgdm->ss = ss;
ccgdm->drawInteriorEdges = drawInteriorEdges;
ccgdm->useSubsurfUv = useSubsurfUv;
ccgdm->useGpuBackend = use_gpu_backend;
/* CDDM hack. */
ccgdm->edgeFlags = MEM_callocN(sizeof(short) * totedge, "edgeFlags");
ccgdm->faceFlags = MEM_callocN(sizeof(DMFlagMat) * totface, "faceFlags");
if (use_gpu_backend == false) {
set_ccgdm_all_geometry(ccgdm, ss, dm, useSubsurfUv != 0);
}
else {
set_ccgdm_gpu_geometry(ccgdm, dm);
}
set_ccgdm_all_geometry(ccgdm, ss, dm, useSubsurfUv != 0);
ccgdm->dm.numVertData = ccgSubSurf_getNumFinalVerts(ss);
ccgdm->dm.numEdgeData = ccgSubSurf_getNumFinalEdges(ss);
@ -2502,21 +2344,6 @@ static CCGDerivedMesh *getCCGDerivedMesh(
/***/
static bool subsurf_use_gpu_backend(SubsurfFlags flags)
{
#ifdef WITH_OPENSUBDIV
/* Use GPU backend if it's a last modifier in the stack
* and user chose to use any of the OSD compute devices,
* but also check if GPU has all needed features.
*/
return (flags & SUBSURF_USE_GPU_BACKEND) != 0 &&
(U.opensubdiv_compute_type != USER_OPENSUBDIV_COMPUTE_NONE);
#else
(void)flags;
return false;
#endif
}
struct DerivedMesh *subsurf_make_derived_from_derived(struct DerivedMesh *dm,
struct SubsurfModifierData *smd,
const struct Scene *scene,
@ -2527,7 +2354,6 @@ struct DerivedMesh *subsurf_make_derived_from_derived(struct DerivedMesh *dm,
const CCGFlags useAging = (smd->flags & eSubsurfModifierFlag_DebugIncr) ? CCG_USE_AGING : 0;
const int useSubsurfUv = (smd->uv_smooth != SUBSURF_UV_SMOOTH_NONE);
const int drawInteriorEdges = !(smd->flags & eSubsurfModifierFlag_ControlEdges);
const bool use_gpu_backend = subsurf_use_gpu_backend(flags);
const bool ignore_simplify = (flags & SUBSURF_IGNORE_SIMPLIFY);
CCGDerivedMesh *result;
@ -2546,11 +2372,8 @@ struct DerivedMesh *subsurf_make_derived_from_derived(struct DerivedMesh *dm,
smd->emCache = _getSubSurf(smd->emCache, levels, 3, useSimple | useAging | CCG_CALC_NORMALS);
#ifdef WITH_OPENSUBDIV
ccgSubSurf_setSkipGrids(smd->emCache, use_gpu_backend);
#endif
ss_sync_from_derivedmesh(smd->emCache, dm, vertCos, useSimple, useSubsurfUv);
result = getCCGDerivedMesh(smd->emCache, drawInteriorEdges, useSubsurfUv, dm, use_gpu_backend);
result = getCCGDerivedMesh(smd->emCache, drawInteriorEdges, useSubsurfUv, dm);
}
else if (flags & SUBSURF_USE_RENDER_PARAMS) {
/* Do not use cache in render mode. */
@ -2567,7 +2390,7 @@ struct DerivedMesh *subsurf_make_derived_from_derived(struct DerivedMesh *dm,
ss_sync_from_derivedmesh(ss, dm, vertCos, useSimple, useSubsurfUv);
result = getCCGDerivedMesh(ss, drawInteriorEdges, useSubsurfUv, dm, false);
result = getCCGDerivedMesh(ss, drawInteriorEdges, useSubsurfUv, dm);
result->freeSS = 1;
}
@ -2600,32 +2423,15 @@ struct DerivedMesh *subsurf_make_derived_from_derived(struct DerivedMesh *dm,
ss_sync_from_derivedmesh(ss, dm, vertCos, useSimple, useSubsurfUv);
result = getCCGDerivedMesh(smd->mCache, drawInteriorEdges, useSubsurfUv, dm, false);
result = getCCGDerivedMesh(smd->mCache, drawInteriorEdges, useSubsurfUv, dm);
}
else {
CCGFlags ccg_flags = useSimple | CCG_USE_ARENA | CCG_CALC_NORMALS;
CCGSubSurf *prevSS = NULL;
if (smd->mCache && (flags & SUBSURF_IS_FINAL_CALC)) {
#ifdef WITH_OPENSUBDIV
/* With OpenSubdiv enabled we always tries to re-use previous
* subsurf structure in order to save computation time since
* re-creation is rather a complicated business.
*
* TODO(sergey): There was a good reason why final calculation
* used to free entirely cached subsurf structure. reason of
* this is to be investigated still to be sure we don't have
* regressions here.
*/
if (use_gpu_backend) {
prevSS = smd->mCache;
}
else
#endif
{
ccgSubSurf_free(smd->mCache);
smd->mCache = NULL;
}
ccgSubSurf_free(smd->mCache);
smd->mCache = NULL;
}
if (flags & SUBSURF_ALLOC_PAINT_MASK) {
@ -2633,12 +2439,9 @@ struct DerivedMesh *subsurf_make_derived_from_derived(struct DerivedMesh *dm,
}
ss = _getSubSurf(prevSS, levels, 3, ccg_flags);
#ifdef WITH_OPENSUBDIV
ccgSubSurf_setSkipGrids(ss, use_gpu_backend);
#endif
ss_sync_from_derivedmesh(ss, dm, vertCos, useSimple, useSubsurfUv);
result = getCCGDerivedMesh(ss, drawInteriorEdges, useSubsurfUv, dm, use_gpu_backend);
result = getCCGDerivedMesh(ss, drawInteriorEdges, useSubsurfUv, dm);
if (flags & SUBSURF_IS_FINAL_CALC) {
smd->mCache = ss;
@ -2710,26 +2513,10 @@ void subsurf_calculate_limit_positions(Mesh *me, float (*r_positions)[3])
bool subsurf_has_edges(DerivedMesh *dm)
{
CCGDerivedMesh *ccgdm = (CCGDerivedMesh *)dm;
#ifdef WITH_OPENSUBDIV
if (ccgdm->useGpuBackend) {
return true;
}
#else
(void)ccgdm;
#endif
return dm->getNumEdges(dm) != 0;
}
bool subsurf_has_faces(DerivedMesh *dm)
{
CCGDerivedMesh *ccgdm = (CCGDerivedMesh *)dm;
#ifdef WITH_OPENSUBDIV
if (ccgdm->useGpuBackend) {
return true;
}
#else
(void)ccgdm;
#endif
return dm->getNumPolys(dm) != 0;
}

View File

@ -168,10 +168,6 @@ if(WITH_BUILDINFO)
add_definitions(-DWITH_BUILDINFO)
endif()
if(WITH_OPENSUBDIV)
add_definitions(-DWITH_OPENSUBDIV)
endif()
if(WITH_INPUT_NDOF)
add_definitions(-DWITH_INPUT_NDOF)
endif()

View File

@ -982,10 +982,6 @@ void wm_draw_update(bContext *C)
wmWindowManager *wm = CTX_wm_manager(C);
wmWindow *win;
#ifdef WITH_OPENSUBDIV
BKE_subsurf_free_unused_buffers();
#endif
GPU_free_unused_buffers(bmain);
for (win = wm->windows.first; win; win = win->next) {

View File

@ -125,6 +125,8 @@
#include "GPU_material.h"
#include "BKE_sound.h"
#include "BKE_subdiv.h"
#include "COM_compositor.h"
#include "DEG_depsgraph.h"
@ -132,10 +134,6 @@
#include "DRW_engine.h"
#ifdef WITH_OPENSUBDIV
# include "BKE_subsurf.h"
#endif
CLG_LOGREF_DECLARE_GLOBAL(WM_LOG_OPERATORS, "wm.operator");
CLG_LOGREF_DECLARE_GLOBAL(WM_LOG_HANDLERS, "wm.handler");
CLG_LOGREF_DECLARE_GLOBAL(WM_LOG_EVENTS, "wm.event");
@ -193,9 +191,8 @@ void WM_init_opengl(Main *bmain)
GPU_pass_cache_init();
#ifdef WITH_OPENSUBDIV
BKE_subsurf_osd_init();
#endif
BKE_subdiv_init();
opengl_is_init = true;
}
@ -576,11 +573,9 @@ void WM_exit_ex(bContext *C, const bool do_python)
COM_deinitialize();
#endif
if (opengl_is_init) {
#ifdef WITH_OPENSUBDIV
BKE_subsurf_osd_cleanup();
#endif
BKE_subdiv_exit();
if (opengl_is_init) {
GPU_free_unused_buffers(G_MAIN);
}