* Added clang-cl support to BLI_strict_flags.h

This commit is contained in:
Joseph Eagar 2021-05-23 20:39:52 -07:00
parent a97c5d7daa
commit d4292bbd28
8 changed files with 173 additions and 44 deletions

View File

@ -49,27 +49,27 @@
/* Unsigned */
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
{
return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x) + x;
return (uint64_t)(InterlockedExchangeAdd64((int64_t *)p, (int64_t)x) + (int64_t)x);
}
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
{
return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x)) - x;
return (uint64_t)(InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x)) - (int64_t)x);
}
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
{
return InterlockedCompareExchange64((int64_t *)v, _new, old);
return (uint64_t)(InterlockedCompareExchange64((int64_t *)v, _new, old));
}
ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
{
return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x);
return (uint64_t)InterlockedExchangeAdd64((int64_t *)p, (int64_t)x);
}
ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x)
{
return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x));
return (uint64_t)InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x));
}
/* Signed */
@ -103,32 +103,32 @@ ATOMIC_INLINE int64_t atomic_fetch_and_sub_int64(int64_t *p, int64_t x)
/* Unsigned */
ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
{
return InterlockedExchangeAdd(p, x) + x;
return (uint32_t)InterlockedExchangeAdd(p, x) + x;
}
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
{
return InterlockedExchangeAdd(p, -((int32_t)x)) - x;
return (uint32_t)InterlockedExchangeAdd(p, -((int32_t)x)) - x;
}
ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new)
{
return InterlockedCompareExchange((long *)v, _new, old);
return (uint32_t)InterlockedCompareExchange((long *)v, _new, old);
}
ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x)
{
return InterlockedExchangeAdd(p, x);
return (uint32_t)InterlockedExchangeAdd(p, x);
}
ATOMIC_INLINE uint32_t atomic_fetch_and_or_uint32(uint32_t *p, uint32_t x)
{
return InterlockedOr((long *)p, x);
return (uint32_t)InterlockedOr((long *)p, x);
}
ATOMIC_INLINE uint32_t atomic_fetch_and_and_uint32(uint32_t *p, uint32_t x)
{
return InterlockedAnd((long *)p, x);
return (uint32_t)InterlockedAnd((long *)p, x);
}
/* Signed */
@ -205,9 +205,9 @@ ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE int8_t atomic_fetch_and_and_int8(int8_t *p, int8_t b)
{
#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
return InterlockedAnd8((char *)p, (char)b);
return (int8_t)InterlockedAnd8((char *)p, (char)b);
#else
return _InterlockedAnd8((char *)p, (char)b);
return (int8_t)_InterlockedAnd8((char *)p, (char)b);
#endif
}
@ -215,9 +215,9 @@ ATOMIC_INLINE int8_t atomic_fetch_and_and_int8(int8_t *p, int8_t b)
ATOMIC_INLINE int8_t atomic_fetch_and_or_int8(int8_t *p, int8_t b)
{
#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
return InterlockedOr8((char *)p, (char)b);
return (int8_t)InterlockedOr8((char *)p, (char)b);
#else
return _InterlockedOr8((char *)p, (char)b);
return (int8_t)_InterlockedOr8((char *)p, (char)b);
#endif
}

View File

@ -28,6 +28,7 @@
#include "MEM_guardedalloc.h"
#include "BLI_ghash.h"
#include "BLI_math_bits.h"
#include "BLI_math_vector.h"
#include "BLI_task.h"
@ -50,6 +51,11 @@ static void subdiv_ccg_average_inner_face_grids(SubdivCCG *subdiv_ccg,
CCGKey *key,
SubdivCCGFace *face);
void subdiv_ccg_average_faces_boundaries_and_corners(SubdivCCG *subdiv_ccg,
CCGKey *key,
struct CCGFace **effected_faces,
int num_effected_faces);
/** \} */
/* -------------------------------------------------------------------- */
@ -889,11 +895,12 @@ void BKE_subdiv_ccg_update_normals(SubdivCCG *subdiv_ccg,
return;
}
subdiv_ccg_recalc_modified_inner_grid_normals(subdiv_ccg, effected_faces, num_effected_faces);
/* TODO(sergey): Only average elements which are adjacent to modified
* faces. */
CCGKey key;
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
subdiv_ccg_average_faces_boundaries_and_corners(
subdiv_ccg, &key, effected_faces, num_effected_faces);
}
/** \} */
@ -1032,6 +1039,9 @@ static void subdiv_ccg_average_inner_grids_task(void *__restrict userdata_v,
typedef struct AverageGridsBoundariesData {
SubdivCCG *subdiv_ccg;
CCGKey *key;
/* Optional lookup table. Maps task range index to index in subdiv_ccg->adjacent_edges*/
int *idxmap;
} AverageGridsBoundariesData;
typedef struct AverageGridsBoundariesTLSData {
@ -1079,10 +1089,12 @@ static void subdiv_ccg_average_grids_boundary(SubdivCCG *subdiv_ccg,
}
static void subdiv_ccg_average_grids_boundaries_task(void *__restrict userdata_v,
const int adjacent_edge_index,
const int n,
const TaskParallelTLS *__restrict tls_v)
{
AverageGridsBoundariesData *data = userdata_v;
const int adjacent_edge_index = data->idxmap ? data->idxmap[n] : n;
AverageGridsBoundariesTLSData *tls = tls_v->userdata_chunk;
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
CCGKey *key = data->key;
@ -1100,6 +1112,9 @@ static void subdiv_ccg_average_grids_boundaries_free(const void *__restrict UNUS
typedef struct AverageGridsCornerData {
SubdivCCG *subdiv_ccg;
CCGKey *key;
/* Optional lookup table. Maps task range index to index in subdiv_ccg->adjacent_vertices*/
int *idxmap;
} AverageGridsCornerData;
static void subdiv_ccg_average_grids_corners(SubdivCCG *subdiv_ccg,
@ -1128,10 +1143,11 @@ static void subdiv_ccg_average_grids_corners(SubdivCCG *subdiv_ccg,
}
static void subdiv_ccg_average_grids_corners_task(void *__restrict userdata_v,
const int adjacent_vertex_index,
const int n,
const TaskParallelTLS *__restrict UNUSED(tls_v))
{
AverageGridsCornerData *data = userdata_v;
const int adjacent_vertex_index = data->idxmap ? data->idxmap[n] : n;
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
CCGKey *key = data->key;
SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[adjacent_vertex_index];
@ -1143,9 +1159,7 @@ static void subdiv_ccg_average_all_boundaries(SubdivCCG *subdiv_ccg, CCGKey *key
TaskParallelSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
AverageGridsBoundariesData boundaries_data = {
.subdiv_ccg = subdiv_ccg,
.key = key,
};
.subdiv_ccg = subdiv_ccg, .key = key, .idxmap = NULL};
AverageGridsBoundariesTLSData tls_data = {NULL};
parallel_range_settings.userdata_chunk = &tls_data;
parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
@ -1161,10 +1175,7 @@ static void subdiv_ccg_average_all_corners(SubdivCCG *subdiv_ccg, CCGKey *key)
{
TaskParallelSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
AverageGridsCornerData corner_data = {
.subdiv_ccg = subdiv_ccg,
.key = key,
};
AverageGridsCornerData corner_data = {.subdiv_ccg = subdiv_ccg, .key = key, .idxmap = NULL};
BLI_task_parallel_range(0,
subdiv_ccg->num_adjacent_vertices,
&corner_data,
@ -1198,6 +1209,108 @@ void BKE_subdiv_ccg_average_grids(SubdivCCG *subdiv_ccg)
subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
}
void subdiv_ccg_average_faces_boundaries_and_corners(SubdivCCG *subdiv_ccg,
CCGKey *key,
struct CCGFace **effected_faces,
int num_effected_faces)
{
Subdiv *subdiv = subdiv_ccg->subdiv;
GSet *adjacent_verts = BLI_gset_ptr_new(__func__);
GSet *adjacent_edges = BLI_gset_ptr_new(__func__);
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
GSetIterator gi;
StaticOrHeapIntStorage face_vertices_storage;
StaticOrHeapIntStorage face_edges_storage;
static_or_heap_storage_init(&face_vertices_storage);
static_or_heap_storage_init(&face_edges_storage);
for (int i = 0; i < num_effected_faces; i++) {
SubdivCCGFace *face = (SubdivCCGFace *)effected_faces[i];
int face_index = face - subdiv_ccg->faces;
const int num_face_grids = face->num_grids;
const int num_face_edges = num_face_grids;
int *face_vertices = static_or_heap_storage_get(&face_vertices_storage, num_face_edges);
topology_refiner->getFaceVertices(topology_refiner, face_index, face_vertices);
/* Note that order of edges is same as order of MLoops, which also
* means it's the same as order of grids. */
int *face_edges = static_or_heap_storage_get(&face_edges_storage, num_face_edges);
topology_refiner->getFaceEdges(topology_refiner, face_index, face_edges);
for (int corner = 0; corner < num_face_edges; corner++) {
const int vertex_index = face_vertices[corner];
const int edge_index = face_edges[corner];
int edge_vertices[2];
topology_refiner->getEdgeVertices(topology_refiner, edge_index, edge_vertices);
const bool is_edge_flipped = (edge_vertices[0] != vertex_index);
/* Grid which is adjacent to the current corner. */
const int current_grid_index = face->start_grid_index + corner;
/* Grid which is adjacent to the next corner. */
const int next_grid_index = face->start_grid_index + (corner + 1) % num_face_grids;
SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[edge_index];
BLI_gset_add(adjacent_edges, adjacent_edge);
/* Grid which is adjacent to the current corner. */
const int grid_index = face->start_grid_index + corner;
SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[vertex_index];
BLI_gset_add(adjacent_verts, adjacent_vertex);
}
}
static_or_heap_storage_free(&face_vertices_storage);
static_or_heap_storage_free(&face_edges_storage);
/* first do boundaries */
int *idxmap = MEM_mallocN(sizeof(*idxmap) * BLI_gset_len(adjacent_edges), "idxmap");
int i = 0;
GSET_ITER_INDEX (gi, adjacent_edges, i) {
SubdivCCGAdjacentEdge *adjacent_edge = BLI_gsetIterator_getKey(&gi);
idxmap[i] = adjacent_edge - subdiv_ccg->adjacent_edges;
}
TaskParallelSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
AverageGridsBoundariesData boundaries_data = {
.subdiv_ccg = subdiv_ccg, .key = key, .idxmap = idxmap};
AverageGridsBoundariesTLSData tls_data = {NULL};
parallel_range_settings.userdata_chunk = &tls_data;
parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
parallel_range_settings.func_free = subdiv_ccg_average_grids_boundaries_free;
BLI_task_parallel_range(0,
BLI_gset_len(adjacent_edges),
&boundaries_data,
subdiv_ccg_average_grids_boundaries_task,
&parallel_range_settings);
/*now do corners*/
MEM_SAFE_FREE(idxmap);
idxmap = MEM_mallocN(sizeof(*idxmap) * BLI_gset_len(adjacent_verts), "idxmap");
GSET_ITER_INDEX (gi, adjacent_verts, i) {
SubdivCCGAdjacentVertex *adjacent_vertex = BLI_gsetIterator_getKey(&gi);
idxmap[i] = adjacent_vertex - subdiv_ccg->adjacent_vertices;
}
AverageGridsCornerData corner_data = {.subdiv_ccg = subdiv_ccg, .key = key, .idxmap = idxmap};
BLI_parallel_range_settings_defaults(&parallel_range_settings);
BLI_task_parallel_range(0,
BLI_gset_len(adjacent_verts),
&corner_data,
subdiv_ccg_average_grids_corners_task,
&parallel_range_settings);
BLI_gset_free(adjacent_verts, NULL);
BLI_gset_free(adjacent_edges, NULL);
MEM_SAFE_FREE(idxmap);
}
typedef struct StitchFacesInnerGridsData {
SubdivCCG *subdiv_ccg;
CCGKey *key;

View File

@ -99,12 +99,12 @@
# define ATTR_ALIGN(x) __attribute__((aligned(x)))
#endif
/* Disable optimization for a function (for debugging use only)*/
/* Disable optimization for a function (for debugging use only!)*/
#ifdef __clang__
# define ATTR_NO_OPT __attribute__((optnone))
#elif __MSC_VER
#elif defined(__MSC_VER)
# define ATTR_NO_OPT __pragma(optimize("", off))
#elif __GNUC__
#elif defined(__GNUC__)
# define ATTR_NO_OPT __attribute__((optimize("O0")))
#else
# define ATTR_NO_OPT

View File

@ -45,10 +45,19 @@
#endif
#ifdef _MSC_VER
# pragma warning(error : 4018) /* signed/unsigned mismatch */
# pragma warning(error : 4244) /* conversion from 'type1' to 'type2', possible loss of data */
# pragma warning(error : 4245) /* conversion from 'int' to 'unsigned int' */
# pragma warning(error : 4267) /* conversion from 'size_t' to 'type', possible loss of data */
# pragma warning(error : 4305) /* truncation from 'type1' to 'type2' */
# pragma warning(error : 4389) /* signed/unsigned mismatch */
# ifdef __clang__
# pragma clang diagnostic error "-Wsign-conversion"
# pragma clang diagnostic error "-Wsign-compare"
# pragma clang diagnostic error "-Wimplicit-float-conversion"
# pragma clang diagnostic error "-Wimplicit-int-conversion"
# pragma clang diagnostic error "-Wimplicit-int"
# pragma clang diagnostic error "-Wimplicit-function-declaration"
# else
# pragma warning(error : 4018) /* signed/unsigned mismatch */
# pragma warning(error : 4244) /* conversion from 'type1' to 'type2', possible loss of data */
# pragma warning(error : 4245) /* conversion from 'int' to 'unsigned int' */
# pragma warning(error : 4267) /* conversion from 'size_t' to 'type', possible loss of data */
# pragma warning(error : 4305) /* truncation from 'type1' to 'type2' */
# pragma warning(error : 4389) /* signed/unsigned mismatch */
# endif
#endif

View File

@ -342,8 +342,8 @@ void BLI_table_gset_insert(TableGSet *ts, void *elem)
{
if (ts->cur >= ts->size) {
uint newsize = (uint)(ts->cur + 1);
newsize = (newsize << 1) - (newsize >> 1);
newsize = MAX2(newsize, 8);
newsize = (newsize << 1U) - (newsize >> 1U);
newsize = MAX2(newsize, 8U);
if (!ts->elems) {
ts->elems = (void *)MEM_mallocN(sizeof(void *) * newsize, "ts->elems");

View File

@ -281,7 +281,7 @@ BLI_mempool *BLI_mempool_create_for_tasks(const unsigned int esize,
BLI_freenode *last_tail = NULL;
/* Allocate the actual chunks. */
for (int i = 0; i < pool->maxchunks; i++) {
for (uint i = 0; i < pool->maxchunks; i++) {
BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
last_tail = mempool_chunk_add(pool, mpchunk, last_tail);
}
@ -304,7 +304,7 @@ BLI_mempool *BLI_mempool_create_for_tasks(const unsigned int esize,
pool->totused = totalloc;
pool->free = NULL;
int i = pool->pchunk - 1;
int i = (int)pool->pchunk - 1;
while (lastchunk && totalloc > totelem) {
if (i < 0) {

View File

@ -587,7 +587,7 @@ static void bm_log_vert_values_swap(
if (lv->customdata) {
if (v->head.data) {
old_cdata = scratch;
memcpy(old_cdata, v->head.data, bm->vdata.totsize);
memcpy(old_cdata, v->head.data, (size_t)bm->vdata.totsize);
}
CustomData_bmesh_swap_data(&entry->vdata, &bm->vdata, lv->customdata, &v->head.data);
}
@ -625,7 +625,7 @@ static void bm_log_face_values_swap(BMLog *log,
#ifdef CUSTOMDATA
if (f->head.data) {
old_cdata = scratch;
memcpy(old_cdata, f->head.data, log->bm->pdata.totsize);
memcpy(old_cdata, f->head.data, (size_t)log->bm->pdata.totsize);
}
if (lf->customdata_f) {
@ -678,7 +678,8 @@ static void bm_log_full_mesh_intern(BMesh *bm, BMLog *log, BMLogEntry *entry)
{
CustomData_MeshMasks cd_mask_extra = {CD_MASK_DYNTOPO_VERT, 0, 0, 0, 0};
entry->full_copy_idmap = BLI_ghash_ptr_new_ex("bmlog", bm->totvert + bm->totface);
entry->full_copy_idmap = BLI_ghash_ptr_new_ex("bmlog",
(unsigned int)(bm->totvert + bm->totface));
BM_mesh_elem_index_ensure(bm, BM_VERT | BM_EDGE | BM_FACE);
@ -693,7 +694,7 @@ static void bm_log_full_mesh_intern(BMesh *bm, BMLog *log, BMLogEntry *entry)
}
uint id = POINTER_AS_UINT(*val);
uintptr_t key = elem->index;
uintptr_t key = (size_t)elem->index;
key |= ((uintptr_t)elem->htype) << 31L;
BLI_ghash_insert(entry->full_copy_idmap, POINTER_FROM_UINT(id), (void *)key);

View File

@ -933,6 +933,12 @@ void RE_bake_normal_world_to_tangent(const BakePixel pixel_array[],
/* converts from world space to local space */
mul_transposed_mat3_m4_v3(mat, nor);
normalize_v3(nor);
if (dot_v3v3(nor, normal) < 0.0f) {
negate_v3(nor);
}
invert_m3_m3(itsm, tsm);
mul_m3_v3(itsm, nor);
normalize_v3(nor);