Cleanup: Clang-Tidy modernize-redundant-void-arg

This commit is contained in:
Aaron Carlisle 2021-12-08 00:31:20 -05:00
parent cbcd74de22
commit c1279768a7
40 changed files with 131 additions and 134 deletions

View File

@ -65,7 +65,7 @@ class MemLeakPrinter {
};
} // namespace
void MEM_init_memleak_detection(void)
void MEM_init_memleak_detection()
{
/**
* This variable is constructed when this function is first called. This should happen as soon as
@ -84,7 +84,7 @@ void MEM_use_memleak_detection(bool enabled)
ignore_memleak = !enabled;
}
void MEM_enable_fail_on_memleak(void)
void MEM_enable_fail_on_memleak()
{
fail_on_memleak = true;
}

View File

@ -411,7 +411,7 @@ void FundamentalToEssential(const Mat3& F, Mat3* E) {
// Default settings for fundamental estimation which should be suitable
// for a wide range of use cases.
EstimateFundamentalOptions::EstimateFundamentalOptions(void)
EstimateFundamentalOptions::EstimateFundamentalOptions()
: max_num_iterations(50), expected_average_symmetric_distance(1e-16) {
}

View File

@ -157,7 +157,7 @@ bool Homography2DFromCorrespondencesLinear(const Mat& x1,
// Default settings for homography estimation which should be suitable
// for a wide range of use cases.
EstimateHomographyOptions::EstimateHomographyOptions(void)
EstimateHomographyOptions::EstimateHomographyOptions()
: use_normalization(true),
max_num_iterations(50),
expected_average_symmetric_distance(1e-16) {

View File

@ -123,7 +123,7 @@ struct FallbackProcessor {
MEM_CXX_CLASS_ALLOC_FUNCS("FallbackProcessor");
};
OCIO_ConstConfigRcPtr *FallbackImpl::getCurrentConfig(void)
OCIO_ConstConfigRcPtr *FallbackImpl::getCurrentConfig()
{
return CONFIG_DEFAULT;
}
@ -132,7 +132,7 @@ void FallbackImpl::setCurrentConfig(const OCIO_ConstConfigRcPtr * /*config*/)
{
}
OCIO_ConstConfigRcPtr *FallbackImpl::configCreateFromEnv(void)
OCIO_ConstConfigRcPtr *FallbackImpl::configCreateFromEnv()
{
return NULL;
}
@ -502,12 +502,12 @@ void FallbackImpl::OCIO_PackedImageDescRelease(OCIO_PackedImageDesc *id)
MEM_freeN(id);
}
const char *FallbackImpl::getVersionString(void)
const char *FallbackImpl::getVersionString()
{
return "fallback";
}
int FallbackImpl::getVersionHex(void)
int FallbackImpl::getVersionHex()
{
return 0;
}

View File

@ -23,7 +23,7 @@
static IOCIOImpl *impl = NULL;
void OCIO_init(void)
void OCIO_init()
{
#ifdef WITH_OCIO
impl = new OCIOImpl();
@ -32,18 +32,18 @@ void OCIO_init(void)
#endif
}
void OCIO_exit(void)
void OCIO_exit()
{
delete impl;
impl = NULL;
}
OCIO_ConstConfigRcPtr *OCIO_getCurrentConfig(void)
OCIO_ConstConfigRcPtr *OCIO_getCurrentConfig()
{
return impl->getCurrentConfig();
}
OCIO_ConstConfigRcPtr *OCIO_configCreateFallback(void)
OCIO_ConstConfigRcPtr *OCIO_configCreateFallback()
{
delete impl;
impl = new FallbackImpl();
@ -56,7 +56,7 @@ void OCIO_setCurrentConfig(const OCIO_ConstConfigRcPtr *config)
impl->setCurrentConfig(config);
}
OCIO_ConstConfigRcPtr *OCIO_configCreateFromEnv(void)
OCIO_ConstConfigRcPtr *OCIO_configCreateFromEnv()
{
return impl->configCreateFromEnv();
}
@ -308,22 +308,22 @@ bool OCIO_gpuDisplayShaderBind(OCIO_ConstConfigRcPtr *config,
use_overlay);
}
void OCIO_gpuDisplayShaderUnbind(void)
void OCIO_gpuDisplayShaderUnbind()
{
impl->gpuDisplayShaderUnbind();
}
void OCIO_gpuCacheFree(void)
void OCIO_gpuCacheFree()
{
impl->gpuCacheFree();
}
const char *OCIO_getVersionString(void)
const char *OCIO_getVersionString()
{
return impl->getVersionString();
}
int OCIO_getVersionHex(void)
int OCIO_getVersionHex()
{
return impl->getVersionHex();
}

View File

@ -33,17 +33,17 @@ using blender::opensubdiv::GLSLTransformFeedbackDeviceContext;
using blender::opensubdiv::OpenCLDeviceContext;
using blender::opensubdiv::OpenMPDeviceContext;
void openSubdiv_init(void)
void openSubdiv_init()
{
// Ensure all OpenGL strings are cached.
openSubdiv_getAvailableEvaluators();
}
void openSubdiv_cleanup(void)
void openSubdiv_cleanup()
{
}
int openSubdiv_getAvailableEvaluators(void)
int openSubdiv_getAvailableEvaluators()
{
int flags = OPENSUBDIV_EVALUATOR_CPU;
@ -70,7 +70,7 @@ int openSubdiv_getAvailableEvaluators(void)
return flags;
}
int openSubdiv_getVersionHex(void)
int openSubdiv_getVersionHex()
{
#if defined(OPENSUBDIV_VERSION_NUMBER)
return OPENSUBDIV_VERSION_NUMBER;

View File

@ -20,20 +20,20 @@
#include <cstddef>
void openSubdiv_init(void)
void openSubdiv_init()
{
}
void openSubdiv_cleanup(void)
void openSubdiv_cleanup()
{
}
int openSubdiv_getAvailableEvaluators(void)
int openSubdiv_getAvailableEvaluators()
{
return 0;
}
int openSubdiv_getVersionHex(void)
int openSubdiv_getVersionHex()
{
return 0;
}

View File

@ -39,7 +39,7 @@
using namespace blender;
AssetMetaData *BKE_asset_metadata_create(void)
AssetMetaData *BKE_asset_metadata_create()
{
AssetMetaData *asset_data = (AssetMetaData *)MEM_callocN(sizeof(*asset_data), __func__);
memcpy(asset_data, DNA_struct_default_get(AssetMetaData), sizeof(*asset_data));

View File

@ -125,7 +125,7 @@ bool bvhcache_has_tree(const BVHCache *bvh_cache, const BVHTree *tree)
return false;
}
BVHCache *bvhcache_init(void)
BVHCache *bvhcache_init()
{
BVHCache *cache = (BVHCache *)MEM_callocN(sizeof(BVHCache), __func__);
BLI_mutex_init(&cache->mutex);

View File

@ -139,7 +139,7 @@ std::optional<std::string> CryptomatteSession::operator[](float encoded_hash) co
return std::nullopt;
}
CryptomatteSession *BKE_cryptomatte_init(void)
CryptomatteSession *BKE_cryptomatte_init()
{
CryptomatteSession *session = new CryptomatteSession();
return session;

View File

@ -210,7 +210,7 @@ void BKE_icons_init(int first_dyn_id)
}
}
void BKE_icons_free(void)
void BKE_icons_free()
{
BLI_assert(BLI_thread_is_main());
@ -227,7 +227,7 @@ void BKE_icons_free(void)
BLI_linklist_lockfree_free(&g_icon_delete_queue, MEM_freeN);
}
void BKE_icons_deferred_free(void)
void BKE_icons_deferred_free()
{
std::scoped_lock lock(gIconMutex);
@ -271,7 +271,7 @@ static PreviewImage *previewimg_deferred_create(const char *path, int source)
return prv;
}
PreviewImage *BKE_previewimg_create(void)
PreviewImage *BKE_previewimg_create()
{
return previewimg_create_ex(0);
}

View File

@ -1365,7 +1365,7 @@ bool ntreeIsRegistered(bNodeTree *ntree)
return (ntree->typeinfo != &NodeTreeTypeUndefined);
}
GHashIterator *ntreeTypeGetIterator(void)
GHashIterator *ntreeTypeGetIterator()
{
return BLI_ghashIterator_new(nodetreetypes_hash);
}
@ -1450,7 +1450,7 @@ bool nodeTypeUndefined(const bNode *node)
ID_IS_LINKED(node->id) && (node->id->tag & LIB_TAG_MISSING));
}
GHashIterator *nodeTypeGetIterator(void)
GHashIterator *nodeTypeGetIterator()
{
return BLI_ghashIterator_new(nodetypes_hash);
}
@ -1498,7 +1498,7 @@ bool nodeSocketIsRegistered(bNodeSocket *sock)
return (sock->typeinfo != &NodeSocketTypeUndefined);
}
GHashIterator *nodeSocketTypeGetIterator(void)
GHashIterator *nodeSocketTypeGetIterator()
{
return BLI_ghashIterator_new(nodesockettypes_hash);
}
@ -4057,7 +4057,7 @@ void BKE_node_clipboard_init(const struct bNodeTree *ntree)
node_clipboard.type = ntree->type;
}
void BKE_node_clipboard_clear(void)
void BKE_node_clipboard_clear()
{
LISTBASE_FOREACH_MUTABLE (bNodeLink *, link, &node_clipboard.links) {
nodeRemLink(nullptr, link);
@ -4074,7 +4074,7 @@ void BKE_node_clipboard_clear(void)
#endif
}
bool BKE_node_clipboard_validate(void)
bool BKE_node_clipboard_validate()
{
bool ok = true;
@ -4152,22 +4152,22 @@ void BKE_node_clipboard_add_link(bNodeLink *link)
BLI_addtail(&node_clipboard.links, link);
}
const ListBase *BKE_node_clipboard_get_nodes(void)
const ListBase *BKE_node_clipboard_get_nodes()
{
return &node_clipboard.nodes;
}
const ListBase *BKE_node_clipboard_get_links(void)
const ListBase *BKE_node_clipboard_get_links()
{
return &node_clipboard.links;
}
int BKE_node_clipboard_get_type(void)
int BKE_node_clipboard_get_type()
{
return node_clipboard.type;
}
void BKE_node_clipboard_free(void)
void BKE_node_clipboard_free()
{
BKE_node_clipboard_validate();
BKE_node_clipboard_clear();
@ -5899,7 +5899,7 @@ static void registerFunctionNodes()
register_node_type_fn_value_to_string();
}
void BKE_node_system_init(void)
void BKE_node_system_init()
{
nodetreetypes_hash = BLI_ghash_str_new("nodetreetypes_hash gh");
nodetypes_hash = BLI_ghash_str_new("nodetypes_hash gh");
@ -5926,7 +5926,7 @@ void BKE_node_system_init(void)
registerFunctionNodes();
}
void BKE_node_system_exit(void)
void BKE_node_system_exit()
{
if (nodetypes_hash) {
NODE_TYPES_BEGIN (nt) {

View File

@ -3792,7 +3792,7 @@ void BKE_object_apply_mat4(Object *ob,
/** \name Object Bounding Box API
* \{ */
BoundBox *BKE_boundbox_alloc_unit(void)
BoundBox *BKE_boundbox_alloc_unit()
{
const float min[3] = {-1.0f, -1.0f, -1.0f}, max[3] = {1.0f, 1.0f, 1.0f};

View File

@ -3195,7 +3195,7 @@ struct PerfCounts {
static PerfCounts *perfdata = nullptr;
static void perfdata_init(void)
static void perfdata_init()
{
perfdata = new PerfCounts;
@ -3247,7 +3247,7 @@ static void doperfmax(int maxnum, int val)
perfdata->max[maxnum] = max_ii(perfdata->max[maxnum], val);
}
static void dump_perfdata(void)
static void dump_perfdata()
{
std::cout << "\nPERFDATA\n";
for (int i : perfdata->count.index_range()) {

View File

@ -272,7 +272,7 @@ struct RNG_THREAD_ARRAY {
RNG rng_tab[BLENDER_MAX_THREADS];
};
RNG_THREAD_ARRAY *BLI_rng_threaded_new(void)
RNG_THREAD_ARRAY *BLI_rng_threaded_new()
{
unsigned int i;
RNG_THREAD_ARRAY *rngarr = (RNG_THREAD_ARRAY *)MEM_mallocN(sizeof(RNG_THREAD_ARRAY),

View File

@ -140,7 +140,7 @@ struct ThreadSlot {
int avail;
};
void BLI_threadapi_init(void)
void BLI_threadapi_init()
{
mainid = pthread_self();
if (numaAPI_Initialize() == NUMAAPI_SUCCESS) {
@ -148,7 +148,7 @@ void BLI_threadapi_init(void)
}
}
void BLI_threadapi_exit(void)
void BLI_threadapi_exit()
{
}
@ -231,7 +231,7 @@ static void *tslot_thread_start(void *tslot_p)
return tslot->do_thread(tslot->callerdata);
}
int BLI_thread_is_main(void)
int BLI_thread_is_main()
{
return pthread_equal(pthread_self(), mainid);
}
@ -306,7 +306,7 @@ void BLI_threadpool_end(ListBase *threadbase)
/* System Information */
/* how many threads are native on this system? */
int BLI_system_thread_count(void)
int BLI_system_thread_count()
{
static int t = -1;
@ -347,7 +347,7 @@ void BLI_system_num_threads_override_set(int num)
num_threads_override = num;
}
int BLI_system_num_threads_override_get(void)
int BLI_system_num_threads_override_get()
{
return num_threads_override;
}
@ -418,7 +418,7 @@ void BLI_mutex_end(ThreadMutex *mutex)
pthread_mutex_destroy(mutex);
}
ThreadMutex *BLI_mutex_alloc(void)
ThreadMutex *BLI_mutex_alloc()
{
ThreadMutex *mutex = static_cast<ThreadMutex *>(MEM_callocN(sizeof(ThreadMutex), "ThreadMutex"));
BLI_mutex_init(mutex);
@ -533,7 +533,7 @@ void BLI_rw_mutex_end(ThreadRWMutex *mutex)
pthread_rwlock_destroy(mutex);
}
ThreadRWMutex *BLI_rw_mutex_alloc(void)
ThreadRWMutex *BLI_rw_mutex_alloc()
{
ThreadRWMutex *mutex = static_cast<ThreadRWMutex *>(
MEM_callocN(sizeof(ThreadRWMutex), "ThreadRWMutex"));
@ -555,7 +555,7 @@ struct TicketMutex {
unsigned int queue_head, queue_tail;
};
TicketMutex *BLI_ticket_mutex_alloc(void)
TicketMutex *BLI_ticket_mutex_alloc()
{
TicketMutex *ticket = static_cast<TicketMutex *>(
MEM_callocN(sizeof(TicketMutex), "TicketMutex"));
@ -640,7 +640,7 @@ struct ThreadQueue {
volatile int canceled;
};
ThreadQueue *BLI_thread_queue_init(void)
ThreadQueue *BLI_thread_queue_init()
{
ThreadQueue *queue;
@ -818,8 +818,7 @@ void BLI_thread_queue_wait_finish(ThreadQueue *queue)
/* **** Special functions to help performance on crazy NUMA setups. **** */
#if 0 /* UNUSED */
static bool check_is_threadripper2_alike_topology(void)
{
static bool check_is_threadripper2_alike_topology(){
/* NOTE: We hope operating system does not support CPU hot-swap to
* a different brand. And that SMP of different types is also not
* encouraged by the system. */
@ -860,8 +859,7 @@ static bool check_is_threadripper2_alike_topology(void)
return is_threadripper2;
}
static void threadripper_put_process_on_fast_node(void)
{
static void threadripper_put_process_on_fast_node(){
if (!is_numa_available) {
return;
}
@ -880,8 +878,7 @@ static void threadripper_put_process_on_fast_node(void)
numaAPI_RunProcessOnNode(0);
}
static void threadripper_put_thread_on_fast_node(void)
{
static void threadripper_put_thread_on_fast_node(){
if (!is_numa_available) {
return;
}
@ -899,7 +896,7 @@ static void threadripper_put_thread_on_fast_node(void)
}
#endif /* UNUSED */
void BLI_thread_put_process_on_fast_node(void)
void BLI_thread_put_process_on_fast_node()
{
/* Disabled for now since this causes only 16 threads to be used on a
* thread-ripper for computations like sculpting and fluid sim. The problem
@ -915,7 +912,7 @@ void BLI_thread_put_process_on_fast_node(void)
#endif
}
void BLI_thread_put_thread_on_fast_node(void)
void BLI_thread_put_thread_on_fast_node()
{
/* Disabled for now, see comment above. */
#if 0

View File

@ -81,7 +81,7 @@ bUUID BLI_uuid_generate_random()
return uuid;
}
bUUID BLI_uuid_nil(void)
bUUID BLI_uuid_nil()
{
const bUUID nil = {0, 0, 0, 0, 0, {0}};
return nil;

View File

@ -40,7 +40,7 @@
namespace deg = blender::deg;
/* Register all node types */
void DEG_register_node_types(void)
void DEG_register_node_types()
{
/* register node types */
deg::deg_register_base_depsnodes();
@ -49,7 +49,7 @@ void DEG_register_node_types(void)
}
/* Free registry on exit */
void DEG_free_node_types(void)
void DEG_free_node_types()
{
}

View File

@ -40,7 +40,7 @@ struct DRWTexturePool {
int last_user_id = -1;
};
DRWTexturePool *DRW_texture_pool_create(void)
DRWTexturePool *DRW_texture_pool_create()
{
return new DRWTexturePool();
}

View File

@ -109,7 +109,7 @@ struct AnimKeylist {
#endif
};
AnimKeylist *ED_keylist_create(void)
AnimKeylist *ED_keylist_create()
{
AnimKeylist *keylist = new AnimKeylist();
return keylist;

View File

@ -946,7 +946,7 @@ static bool has_external_files(Main *bmain, struct ReportList *reports)
/* -------------------------------------------------------------------- */
void ED_operatortypes_asset(void)
void ED_operatortypes_asset()
{
WM_operatortype_append(ASSET_OT_mark);
WM_operatortype_append(ASSET_OT_clear);

View File

@ -186,12 +186,12 @@ bool UI_search_item_add(uiSearchItems *items,
return true;
}
int UI_searchbox_size_y(void)
int UI_searchbox_size_y()
{
return SEARCH_ITEMS * UI_UNIT_Y + 2 * UI_POPUP_MENU_TOP;
}
int UI_searchbox_size_x(void)
int UI_searchbox_size_x()
{
return 12 * UI_UNIT_X;
}

View File

@ -1325,7 +1325,7 @@ PointerRNA *UI_list_custom_drag_operator_set(uiList *ui_list,
/** \name List-types Registration
* \{ */
void ED_uilisttypes_ui(void)
void ED_uilisttypes_ui()
{
WM_uilisttype_add(UI_UL_asset_view());
}

View File

@ -3073,7 +3073,7 @@ static void node_socket_undefined_interface_draw_color(bContext *UNUSED(C),
/** \} */
void ED_node_init_butfuncs(void)
void ED_node_init_butfuncs()
{
/* Fallback types for undefined tree, nodes, sockets
* Defined in blenkernel, but not registered in type hashes.

View File

@ -106,7 +106,7 @@ extern void ui_draw_dropshadow(
const rctf *rct, float radius, float aspect, float alpha, int select);
}
float ED_node_grid_size(void)
float ED_node_grid_size()
{
return U.widget_unit;
}

View File

@ -127,7 +127,7 @@ void node_operatortypes()
WM_operatortype_append(NODE_OT_cryptomatte_layer_remove);
}
void ED_operatormacros_node(void)
void ED_operatormacros_node()
{
wmOperatorType *ot;
wmOperatorTypeMacro *mot;

View File

@ -996,7 +996,7 @@ static void node_space_subtype_item_extend(bContext *C, EnumPropertyItem **item,
}
/* only called once, from space/spacetypes.c */
void ED_spacetype_node(void)
void ED_spacetype_node()
{
SpaceType *st = (SpaceType *)MEM_callocN(sizeof(SpaceType), "spacetype node");
ARegionType *art;

View File

@ -617,7 +617,7 @@ static void spreadsheet_right_region_listener(const wmRegionListenerParams *UNUS
{
}
void ED_spacetype_spreadsheet(void)
void ED_spacetype_spreadsheet()
{
SpaceType *st = (SpaceType *)MEM_callocN(sizeof(SpaceType), "spacetype spreadsheet");
ARegionType *art;

View File

@ -276,7 +276,7 @@ static void ED_OT_flush_edits(wmOperatorType *ot)
/** \} */
void ED_operatortypes_edutils(void)
void ED_operatortypes_edutils()
{
WM_operatortype_append(ED_OT_lib_id_load_custom_preview);
WM_operatortype_append(ED_OT_lib_id_generate_preview);

View File

@ -50,7 +50,7 @@ using namespace blender::gpu;
/** \name Creation & Deletion
* \{ */
GPUBatch *GPU_batch_calloc(void)
GPUBatch *GPU_batch_calloc()
{
GPUBatch *batch = GPUBackend::get()->batch_alloc();
memset(batch, 0, sizeof(*batch));
@ -315,12 +315,12 @@ void GPU_batch_program_set_imm_shader(GPUBatch *batch)
/** \name Init/Exit
* \{ */
void gpu_batch_init(void)
void gpu_batch_init()
{
gpu_batch_presets_init();
}
void gpu_batch_exit(void)
void gpu_batch_exit()
{
gpu_batch_presets_exit();
}

View File

@ -44,7 +44,7 @@ using namespace blender::gpu;
/** \name Capabilities
* \{ */
int GPU_max_texture_size(void)
int GPU_max_texture_size()
{
return GCaps.max_texture_size;
}
@ -57,27 +57,27 @@ int GPU_texture_size_with_limit(int res, bool limit_gl_texture_size)
return min_ii(reslimit, res);
}
int GPU_max_texture_layers(void)
int GPU_max_texture_layers()
{
return GCaps.max_texture_layers;
}
int GPU_max_textures_vert(void)
int GPU_max_textures_vert()
{
return GCaps.max_textures_vert;
}
int GPU_max_textures_geom(void)
int GPU_max_textures_geom()
{
return GCaps.max_textures_geom;
}
int GPU_max_textures_frag(void)
int GPU_max_textures_frag()
{
return GCaps.max_textures_frag;
}
int GPU_max_textures(void)
int GPU_max_textures()
{
return GCaps.max_textures;
}
@ -92,37 +92,37 @@ int GPU_max_work_group_size(int index)
return GCaps.max_work_group_size[index];
}
int GPU_max_uniforms_vert(void)
int GPU_max_uniforms_vert()
{
return GCaps.max_uniforms_vert;
}
int GPU_max_uniforms_frag(void)
int GPU_max_uniforms_frag()
{
return GCaps.max_uniforms_frag;
}
int GPU_max_batch_indices(void)
int GPU_max_batch_indices()
{
return GCaps.max_batch_indices;
}
int GPU_max_batch_vertices(void)
int GPU_max_batch_vertices()
{
return GCaps.max_batch_vertices;
}
int GPU_max_vertex_attribs(void)
int GPU_max_vertex_attribs()
{
return GCaps.max_vertex_attribs;
}
int GPU_max_varying_floats(void)
int GPU_max_varying_floats()
{
return GCaps.max_varying_floats;
}
int GPU_extensions_len(void)
int GPU_extensions_len()
{
return GCaps.extensions_len;
}
@ -132,43 +132,43 @@ const char *GPU_extension_get(int i)
return GCaps.extension_get ? GCaps.extension_get(i) : "\0";
}
bool GPU_mip_render_workaround(void)
bool GPU_mip_render_workaround()
{
return GCaps.mip_render_workaround;
}
bool GPU_depth_blitting_workaround(void)
bool GPU_depth_blitting_workaround()
{
return GCaps.depth_blitting_workaround;
}
bool GPU_use_main_context_workaround(void)
bool GPU_use_main_context_workaround()
{
return GCaps.use_main_context_workaround;
}
bool GPU_crappy_amd_driver(void)
bool GPU_crappy_amd_driver()
{
/* Currently are the same drivers with the `unused_fb_slot` problem. */
return GCaps.broken_amd_driver;
}
bool GPU_use_hq_normals_workaround(void)
bool GPU_use_hq_normals_workaround()
{
return GCaps.use_hq_normals_workaround;
}
bool GPU_compute_shader_support(void)
bool GPU_compute_shader_support()
{
return GCaps.compute_shader_support;
}
bool GPU_shader_storage_buffer_objects_support(void)
bool GPU_shader_storage_buffer_objects_support()
{
return GCaps.shader_storage_buffer_objects_support;
}
bool GPU_shader_image_load_store_support(void)
bool GPU_shader_image_load_store_support()
{
return GCaps.shader_image_load_store_support;
}
@ -179,7 +179,7 @@ bool GPU_shader_image_load_store_support(void)
/** \name Memory statistics
* \{ */
bool GPU_mem_stats_supported(void)
bool GPU_mem_stats_supported()
{
return GCaps.mem_stats_support;
}
@ -190,7 +190,7 @@ void GPU_mem_stats_get(int *totalmem, int *freemem)
}
/* Return support for the active context + window. */
bool GPU_stereo_quadbuffer_support(void)
bool GPU_stereo_quadbuffer_support()
{
return Context::get()->front_right != nullptr;
}

View File

@ -133,7 +133,7 @@ void GPU_context_active_set(GPUContext *ctx_)
}
}
GPUContext *GPU_context_active_get(void)
GPUContext *GPU_context_active_get()
{
return wrap(Context::get());
}
@ -146,12 +146,12 @@ GPUContext *GPU_context_active_get(void)
static std::mutex main_context_mutex;
void GPU_context_main_lock(void)
void GPU_context_main_lock()
{
main_context_mutex.lock();
}
void GPU_context_main_unlock(void)
void GPU_context_main_unlock()
{
main_context_mutex.unlock();
}
@ -180,7 +180,7 @@ void GPU_backend_init(eGPUBackendType backend_type)
}
}
void GPU_backend_exit(void)
void GPU_backend_exit()
{
/* TODO: assert no resource left. Currently UI textures are still not freed in their context
* correctly. */

View File

@ -45,7 +45,7 @@ void GPU_debug_group_begin(const char *name)
ctx->debug_group_begin(name, stack.size());
}
void GPU_debug_group_end(void)
void GPU_debug_group_end()
{
if (!(G.debug & G_DEBUG_GPU)) {
return;

View File

@ -240,19 +240,19 @@ void GPU_backbuffer_bind(eGPUBackBuffer buffer)
}
}
void GPU_framebuffer_restore(void)
void GPU_framebuffer_restore()
{
Context::get()->back_left->bind(false);
}
GPUFrameBuffer *GPU_framebuffer_active_get(void)
GPUFrameBuffer *GPU_framebuffer_active_get()
{
Context *ctx = Context::get();
return wrap(ctx ? ctx->active_fb : nullptr);
}
/* Returns the default frame-buffer. Will always exists even if it's just a dummy. */
GPUFrameBuffer *GPU_framebuffer_back_get(void)
GPUFrameBuffer *GPU_framebuffer_back_get()
{
Context *ctx = Context::get();
return wrap(ctx ? ctx->back_left : nullptr);
@ -514,14 +514,14 @@ void GPU_framebuffer_push(GPUFrameBuffer *fb)
FrameBufferStack.top++;
}
GPUFrameBuffer *GPU_framebuffer_pop(void)
GPUFrameBuffer *GPU_framebuffer_pop()
{
BLI_assert(FrameBufferStack.top > 0);
FrameBufferStack.top--;
return FrameBufferStack.framebuffers[FrameBufferStack.top];
}
uint GPU_framebuffer_stack_level_get(void)
uint GPU_framebuffer_stack_level_get()
{
return FrameBufferStack.top;
}

View File

@ -349,7 +349,7 @@ uint32_t *IndexBuf::unmap(const uint32_t *mapped_memory) const
/** \name C-API
* \{ */
GPUIndexBuf *GPU_indexbuf_calloc(void)
GPUIndexBuf *GPU_indexbuf_calloc()
{
return wrap(GPUBackend::get()->indexbuf_alloc());
}

View File

@ -67,7 +67,7 @@ struct GPUMatrixState {
#define ProjectionStack Context::get()->matrix_state->projection_stack
#define Projection ProjectionStack.stack[ProjectionStack.top]
GPUMatrixState *GPU_matrix_state_create(void)
GPUMatrixState *GPU_matrix_state_create()
{
#define MATRIX_4X4_IDENTITY \
{ \
@ -99,7 +99,7 @@ static void gpu_matrix_state_active_set_dirty(bool value)
state->dirty = value;
}
void GPU_matrix_reset(void)
void GPU_matrix_reset()
{
GPUMatrixState *state = Context::get()->matrix_state;
state->model_view_stack.top = 0;
@ -132,28 +132,28 @@ static void checkmat(cosnt float *m)
#endif
void GPU_matrix_push(void)
void GPU_matrix_push()
{
BLI_assert(ModelViewStack.top + 1 < MATRIX_STACK_DEPTH);
ModelViewStack.top++;
copy_m4_m4(ModelView, ModelViewStack.stack[ModelViewStack.top - 1]);
}
void GPU_matrix_pop(void)
void GPU_matrix_pop()
{
BLI_assert(ModelViewStack.top > 0);
ModelViewStack.top--;
gpu_matrix_state_active_set_dirty(true);
}
void GPU_matrix_push_projection(void)
void GPU_matrix_push_projection()
{
BLI_assert(ProjectionStack.top + 1 < MATRIX_STACK_DEPTH);
ProjectionStack.top++;
copy_m4_m4(Projection, ProjectionStack.stack[ProjectionStack.top - 1]);
}
void GPU_matrix_pop_projection(void)
void GPU_matrix_pop_projection()
{
BLI_assert(ProjectionStack.top > 0);
ProjectionStack.top--;
@ -167,7 +167,7 @@ void GPU_matrix_set(const float m[4][4])
gpu_matrix_state_active_set_dirty(true);
}
void GPU_matrix_identity_projection_set(void)
void GPU_matrix_identity_projection_set()
{
unit_m4(Projection);
CHECKMAT(Projection3D);
@ -181,7 +181,7 @@ void GPU_matrix_projection_set(const float m[4][4])
gpu_matrix_state_active_set_dirty(true);
}
void GPU_matrix_identity_set(void)
void GPU_matrix_identity_set()
{
unit_m4(ModelView);
gpu_matrix_state_active_set_dirty(true);
@ -668,7 +668,7 @@ void GPU_matrix_bind(GPUShader *shader)
gpu_matrix_state_active_set_dirty(false);
}
bool GPU_matrix_dirty_get(void)
bool GPU_matrix_dirty_get()
{
GPUMatrixState *state = Context::get()->matrix_state;
return state->dirty;
@ -681,13 +681,13 @@ BLI_STATIC_ASSERT(GPU_PY_MATRIX_STACK_LEN + 1 == MATRIX_STACK_DEPTH, "define mis
/* Return int since caller is may subtract. */
int GPU_matrix_stack_level_get_model_view(void)
int GPU_matrix_stack_level_get_model_view()
{
GPUMatrixState *state = Context::get()->matrix_state;
return (int)state->model_view_stack.top;
}
int GPU_matrix_stack_level_get_projection(void)
int GPU_matrix_stack_level_get_projection()
{
GPUMatrixState *state = Context::get()->matrix_state;
return (int)state->projection_stack.top;

View File

@ -127,31 +127,31 @@ eGPUSupportLevel GPU_platform_support_level()
return GPG.support_level;
}
const char *GPU_platform_vendor(void)
const char *GPU_platform_vendor()
{
BLI_assert(GPG.initialized);
return GPG.vendor;
}
const char *GPU_platform_renderer(void)
const char *GPU_platform_renderer()
{
BLI_assert(GPG.initialized);
return GPG.renderer;
}
const char *GPU_platform_version(void)
const char *GPU_platform_version()
{
BLI_assert(GPG.initialized);
return GPG.version;
}
const char *GPU_platform_support_level_key(void)
const char *GPU_platform_support_level_key()
{
BLI_assert(GPG.initialized);
return GPG.support_key;
}
const char *GPU_platform_gpu_name(void)
const char *GPU_platform_gpu_name()
{
BLI_assert(GPG.initialized);
return GPG.gpu_name;

View File

@ -151,7 +151,7 @@ bool gpu_select_query_load_id(uint id)
return true;
}
uint gpu_select_query_end(void)
uint gpu_select_query_end()
{
uint hits = 0;
const uint maxhits = g_query_state.bufsize;

View File

@ -245,7 +245,7 @@ void GPU_uniformbuf_unbind(GPUUniformBuf *ubo)
unwrap(ubo)->unbind();
}
void GPU_uniformbuf_unbind_all(void)
void GPU_uniformbuf_unbind_all()
{
/* FIXME */
}

View File

@ -223,7 +223,7 @@ bool USD_export(bContext *C,
return export_ok;
}
int USD_get_version(void)
int USD_get_version()
{
/* USD 19.11 defines:
*