Merge branch 'master' into blender2.8

This commit is contained in:
Campbell Barton 2018-02-18 22:33:05 +11:00
commit 2bc952fdb6
81 changed files with 683 additions and 972 deletions

View File

@ -415,7 +415,7 @@ option(WITH_CYCLES_OSL "Build Cycles with OSL support" ${_init_CYCLES_OSL})
option(WITH_CYCLES_OPENSUBDIV "Build Cycles with OpenSubdiv support" ${_init_CYCLES_OPENSUBDIV})
option(WITH_CYCLES_CUDA_BINARIES "Build Cycles CUDA binaries" OFF)
option(WITH_CYCLES_CUBIN_COMPILER "Build cubins with nvrtc based compiler instead of nvcc" OFF)
set(CYCLES_CUDA_BINARIES_ARCH sm_20 sm_21 sm_30 sm_35 sm_37 sm_50 sm_52 sm_60 sm_61 CACHE STRING "CUDA architectures to build binaries for")
set(CYCLES_CUDA_BINARIES_ARCH sm_30 sm_35 sm_37 sm_50 sm_52 sm_60 sm_61 CACHE STRING "CUDA architectures to build binaries for")
mark_as_advanced(CYCLES_CUDA_BINARIES_ARCH)
unset(PLATFORM_DEFAULT)
option(WITH_CYCLES_LOGGING "Build Cycles with logging support" ON)

View File

@ -107,25 +107,24 @@ if 'cmake' in builder:
elif builder.endswith('i686_cmake'):
bits = 32
chroot_name = 'buildbot_' + deb_name + '_i686'
cuda_chroot_name = 'buildbot_' + deb_name + '_x86_64'
targets = ['player', 'blender', 'cuda']
targets = ['player', 'blender']
cmake_extra_options.extend(["-DCMAKE_C_COMPILER=/usr/bin/gcc-7",
"-DCMAKE_CXX_COMPILER=/usr/bin/g++-7"])
cmake_options.append("-C" + os.path.join(blender_dir, cmake_config_file))
# Prepare CMake options needed to configure cuda binaries compilation.
cuda_cmake_options.append("-DWITH_CYCLES_CUDA_BINARIES=%s" % ('ON' if build_cubins else 'OFF'))
cuda_cmake_options.append("-DCYCLES_CUDA_BINARIES_ARCH=sm_20;sm_21;sm_30;sm_35;sm_37;sm_50;sm_52;sm_60;sm_61")
if build_cubins or 'cuda' in targets:
if bits == 32:
cuda_cmake_options.append("-DCUDA_64_BIT_DEVICE_CODE=OFF")
else:
# Prepare CMake options needed to configure cuda binaries compilation, 64bit only.
if bits == 64:
cuda_cmake_options.append("-DWITH_CYCLES_CUDA_BINARIES=%s" % ('ON' if build_cubins else 'OFF'))
cuda_cmake_options.append("-DCYCLES_CUDA_BINARIES_ARCH=sm_30;sm_35;sm_37;sm_50;sm_52;sm_60;sm_61")
if build_cubins or 'cuda' in targets:
cuda_cmake_options.append("-DCUDA_64_BIT_DEVICE_CODE=ON")
# Only modify common cmake options if cuda doesn't require separate target.
if 'cuda' not in targets:
cmake_options += cuda_cmake_options
# Only modify common cmake options if cuda doesn't require separate target.
if 'cuda' not in targets:
cmake_options += cuda_cmake_options
else:
cuda_cmake_options.append("-DWITH_CYCLES_CUDA_BINARIES=OFF")
cmake_options.append("-DCMAKE_INSTALL_PREFIX=%s" % (install_dir))

View File

@ -54,7 +54,7 @@ set(WITH_X11_XF86VMODE ON CACHE BOOL "" FORCE)
set(WITH_PLAYER ON CACHE BOOL "" FORCE)
set(WITH_MEM_JEMALLOC ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_CUDA_BINARIES ON CACHE BOOL "" FORCE)
set(CYCLES_CUDA_BINARIES_ARCH sm_20;sm_21;sm_30;sm_35;sm_37;sm_50;sm_52;sm_60;sm_61 CACHE STRING "" FORCE)
set(CYCLES_CUDA_BINARIES_ARCH sm_30;sm_35;sm_37;sm_50;sm_52;sm_60;sm_61 CACHE STRING "" FORCE)
# platform dependent options
if(UNIX AND NOT APPLE)

View File

@ -126,7 +126,8 @@ if(WITH_CYCLES_CUBIN_COMPILER)
# though we are building 32 bit blender a 64 bit cubin_cc will have
# to be build to compile the cubins.
if(MSVC AND NOT CMAKE_CL_64)
Message("cycles_cubin_cc not supported on x86")
message("Building with CUDA not supported on 32 bit, skipped")
set(WITH_CYCLES_CUDA_BINARIES OFF)
else()
set(SRC
cycles_cubin_cc.cpp

View File

@ -502,7 +502,6 @@ DeviceInfo Device::get_multi_device(const vector<DeviceInfo>& subdevices, int th
info.description = "Multi Device";
info.num = 0;
info.has_fermi_limits = false;
info.has_half_images = true;
info.has_volume_decoupled = true;
info.bvh_layout_mask = BVH_LAYOUT_ALL;
@ -538,8 +537,6 @@ DeviceInfo Device::get_multi_device(const vector<DeviceInfo>& subdevices, int th
}
/* Accumulate device info. */
info.has_fermi_limits = info.has_fermi_limits ||
device.has_fermi_limits;
info.has_half_images &= device.has_half_images;
info.has_volume_decoupled &= device.has_volume_decoupled;
info.bvh_layout_mask = device.bvh_layout_mask & info.bvh_layout_mask;

View File

@ -56,7 +56,6 @@ public:
int num;
bool display_device; /* GPU is used as a display device. */
bool advanced_shading; /* Supports full shading system. */
bool has_fermi_limits; /* Fixed number of textures limit. */
bool has_half_images; /* Support half-float textures. */
bool has_volume_decoupled; /* Decoupled volume shading. */
BVHLayoutMask bvh_layout_mask; /* Bitmask of supported BVH layouts. */
@ -73,7 +72,6 @@ public:
cpu_threads = 0;
display_device = false;
advanced_shading = true;
has_fermi_limits = false;
has_half_images = false;
has_volume_decoupled = false;
bvh_layout_mask = BVH_LAYOUT_NONE;

View File

@ -309,9 +309,7 @@ public:
delete split_kernel;
if(!info.has_fermi_limits) {
texture_info.free();
}
texture_info.free();
cuda_assert(cuCtxDestroy(cuContext));
}
@ -322,9 +320,9 @@ public:
cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, cuDevId);
cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, cuDevId);
/* We only support sm_20 and above */
if(major < 2) {
cuda_error_message(string_printf("CUDA device supported only with compute capability 2.0 or up, found %d.%d.", major, minor));
/* We only support sm_30 and above */
if(major < 3) {
cuda_error_message(string_printf("CUDA device supported only with compute capability 3.0 or up, found %d.%d.", major, minor));
return false;
}
@ -462,9 +460,9 @@ public:
#ifdef _WIN32
if(have_precompiled_kernels()) {
if(major < 2) {
if(major < 3) {
cuda_error_message(string_printf(
"CUDA device requires compute capability 2.0 or up, "
"CUDA device requires compute capability 3.0 or up, "
"found %d.%d. Your GPU is not supported.",
major, minor));
}
@ -680,7 +678,7 @@ public:
void load_texture_info()
{
if(!info.has_fermi_limits && need_texture_info) {
if(need_texture_info) {
texture_info.copy_to_device();
need_texture_info = false;
}
@ -1018,9 +1016,6 @@ public:
{
CUDAContextScope scope(this);
/* Check if we are on sm_30 or above, for bindless textures. */
bool has_fermi_limits = info.has_fermi_limits;
/* General variables for both architectures */
string bind_name = mem.name;
size_t dsize = datatype_size(mem.data_type);
@ -1074,27 +1069,6 @@ public:
}
/* Image Texture Storage */
CUtexref texref = NULL;
if(has_fermi_limits) {
if(mem.data_depth > 1) {
/* Kernel uses different bind names for 2d and 3d float textures,
* so we have to adjust couple of things here.
*/
vector<string> tokens;
string_split(tokens, mem.name, "_");
bind_name = string_printf("__tex_image_%s_3d_%s",
tokens[2].c_str(),
tokens[3].c_str());
}
cuda_assert(cuModuleGetTexRef(&texref, cuModule, bind_name.c_str()));
if(!texref) {
return;
}
}
CUarray_format_enum format;
switch(mem.data_type) {
case TYPE_UCHAR: format = CU_AD_FORMAT_UNSIGNED_INT8; break;
@ -1187,97 +1161,68 @@ public:
cuda_assert(cuMemcpyHtoD(mem.device_pointer, mem.host_pointer, size));
}
if(!has_fermi_limits) {
/* Kepler+, bindless textures. */
int flat_slot = 0;
if(string_startswith(mem.name, "__tex_image")) {
int pos = string(mem.name).rfind("_");
flat_slot = atoi(mem.name + pos + 1);
}
else {
assert(0);
}
CUDA_RESOURCE_DESC resDesc;
memset(&resDesc, 0, sizeof(resDesc));
if(array_3d) {
resDesc.resType = CU_RESOURCE_TYPE_ARRAY;
resDesc.res.array.hArray = array_3d;
resDesc.flags = 0;
}
else if(mem.data_height > 0) {
resDesc.resType = CU_RESOURCE_TYPE_PITCH2D;
resDesc.res.pitch2D.devPtr = mem.device_pointer;
resDesc.res.pitch2D.format = format;
resDesc.res.pitch2D.numChannels = mem.data_elements;
resDesc.res.pitch2D.height = mem.data_height;
resDesc.res.pitch2D.width = mem.data_width;
resDesc.res.pitch2D.pitchInBytes = dst_pitch;
}
else {
resDesc.resType = CU_RESOURCE_TYPE_LINEAR;
resDesc.res.linear.devPtr = mem.device_pointer;
resDesc.res.linear.format = format;
resDesc.res.linear.numChannels = mem.data_elements;
resDesc.res.linear.sizeInBytes = mem.device_size;
}
CUDA_TEXTURE_DESC texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = address_mode;
texDesc.addressMode[1] = address_mode;
texDesc.addressMode[2] = address_mode;
texDesc.filterMode = filter_mode;
texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
cuda_assert(cuTexObjectCreate(&cmem->texobject, &resDesc, &texDesc, NULL));
/* Resize once */
if(flat_slot >= texture_info.size()) {
/* Allocate some slots in advance, to reduce amount
* of re-allocations. */
texture_info.resize(flat_slot + 128);
}
/* Set Mapping and tag that we need to (re-)upload to device */
TextureInfo& info = texture_info[flat_slot];
info.data = (uint64_t)cmem->texobject;
info.cl_buffer = 0;
info.interpolation = mem.interpolation;
info.extension = mem.extension;
info.width = mem.data_width;
info.height = mem.data_height;
info.depth = mem.data_depth;
need_texture_info = true;
/* Kepler+, bindless textures. */
int flat_slot = 0;
if(string_startswith(mem.name, "__tex_image")) {
int pos = string(mem.name).rfind("_");
flat_slot = atoi(mem.name + pos + 1);
}
else {
/* Fermi, fixed texture slots. */
if(array_3d) {
cuda_assert(cuTexRefSetArray(texref, array_3d, CU_TRSA_OVERRIDE_FORMAT));
}
else if(mem.data_height > 0) {
CUDA_ARRAY_DESCRIPTOR array_desc;
array_desc.Format = format;
array_desc.Height = mem.data_height;
array_desc.Width = mem.data_width;
array_desc.NumChannels = mem.data_elements;
cuda_assert(cuTexRefSetAddress2D_v3(texref, &array_desc, mem.device_pointer, dst_pitch));
}
else {
cuda_assert(cuTexRefSetAddress(NULL, texref, cuda_device_ptr(mem.device_pointer), size));
}
/* Attach to texture reference. */
cuda_assert(cuTexRefSetFilterMode(texref, filter_mode));
cuda_assert(cuTexRefSetFlags(texref, CU_TRSF_NORMALIZED_COORDINATES));
cuda_assert(cuTexRefSetFormat(texref, format, mem.data_elements));
cuda_assert(cuTexRefSetAddressMode(texref, 0, address_mode));
cuda_assert(cuTexRefSetAddressMode(texref, 1, address_mode));
if(mem.data_depth > 1) {
cuda_assert(cuTexRefSetAddressMode(texref, 2, address_mode));
}
assert(0);
}
CUDA_RESOURCE_DESC resDesc;
memset(&resDesc, 0, sizeof(resDesc));
if(array_3d) {
resDesc.resType = CU_RESOURCE_TYPE_ARRAY;
resDesc.res.array.hArray = array_3d;
resDesc.flags = 0;
}
else if(mem.data_height > 0) {
resDesc.resType = CU_RESOURCE_TYPE_PITCH2D;
resDesc.res.pitch2D.devPtr = mem.device_pointer;
resDesc.res.pitch2D.format = format;
resDesc.res.pitch2D.numChannels = mem.data_elements;
resDesc.res.pitch2D.height = mem.data_height;
resDesc.res.pitch2D.width = mem.data_width;
resDesc.res.pitch2D.pitchInBytes = dst_pitch;
}
else {
resDesc.resType = CU_RESOURCE_TYPE_LINEAR;
resDesc.res.linear.devPtr = mem.device_pointer;
resDesc.res.linear.format = format;
resDesc.res.linear.numChannels = mem.data_elements;
resDesc.res.linear.sizeInBytes = mem.device_size;
}
CUDA_TEXTURE_DESC texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = address_mode;
texDesc.addressMode[1] = address_mode;
texDesc.addressMode[2] = address_mode;
texDesc.filterMode = filter_mode;
texDesc.flags = CU_TRSF_NORMALIZED_COORDINATES;
cuda_assert(cuTexObjectCreate(&cmem->texobject, &resDesc, &texDesc, NULL));
/* Resize once */
if(flat_slot >= texture_info.size()) {
/* Allocate some slots in advance, to reduce amount
* of re-allocations. */
texture_info.resize(flat_slot + 128);
}
/* Set Mapping and tag that we need to (re-)upload to device */
TextureInfo& info = texture_info[flat_slot];
info.data = (uint64_t)cmem->texobject;
info.cl_buffer = 0;
info.interpolation = mem.interpolation;
info.extension = mem.extension;
info.width = mem.data_width;
info.height = mem.data_height;
info.depth = mem.data_depth;
need_texture_info = true;
}
void tex_free(device_memory& mem)
@ -2550,9 +2495,9 @@ void device_cuda_info(vector<DeviceInfo>& devices)
int major;
cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, num);
if(major < 2) {
if(major < 3) {
VLOG(1) << "Ignoring device \"" << name
<< "\", compute capability is too low.";
<< "\", this graphics card is no longer supported.";
continue;
}
@ -2562,8 +2507,7 @@ void device_cuda_info(vector<DeviceInfo>& devices)
info.description = string(name);
info.num = num;
info.advanced_shading = (major >= 2);
info.has_fermi_limits = !(major >= 3);
info.advanced_shading = (major >= 3);
info.has_half_images = (major >= 3);
info.has_volume_decoupled = false;
info.bvh_layout_mask = BVH_LAYOUT_BVH2;

View File

@ -316,12 +316,8 @@ set(SRC_SPLIT_HEADERS
# CUDA module
if(WITH_CYCLES_CUDA_BINARIES)
# 32 bit or 64 bit
if(CUDA_64_BIT_DEVICE_CODE)
set(CUDA_BITS 64)
else()
set(CUDA_BITS 32)
endif()
# 64 bit only
set(CUDA_BITS 64)
# CUDA version
execute_process(COMMAND ${CUDA_NVCC_EXECUTABLE} "--version" OUTPUT_VARIABLE NVCC_OUT)
@ -420,17 +416,17 @@ if(WITH_CYCLES_CUDA_BINARIES)
endmacro()
foreach(arch ${CYCLES_CUDA_BINARIES_ARCH})
if(CUDA_VERSION GREATER "89" AND ${arch} MATCHES "sm_2.")
message(STATUS "CUDA binaries for ${arch} disabled, not supported by CUDA 9.")
if(${arch} MATCHES "sm_2.")
message(STATUS "CUDA binaries for ${arch} are no longer supported, skipped.")
else()
# Compile regular kernel
CYCLES_CUDA_KERNEL_ADD(${arch} filter "" "${cuda_filter_sources}" FALSE)
CYCLES_CUDA_KERNEL_ADD(${arch} kernel "" "${cuda_sources}" FALSE)
endif()
if(WITH_CYCLES_CUDA_SPLIT_KERNEL_BINARIES)
# Compile split kernel
CYCLES_CUDA_KERNEL_ADD(${arch} kernel_split "-D __SPLIT__" ${cuda_sources} FALSE)
endif()
if(WITH_CYCLES_CUDA_SPLIT_KERNEL_BINARIES)
# Compile split kernel
CYCLES_CUDA_KERNEL_ADD(${arch} kernel_split "-D __SPLIT__" ${cuda_sources} FALSE)
endif()
endforeach()

View File

@ -18,12 +18,6 @@ CCL_NAMESPACE_BEGIN
#ifdef __HAIR__
#if defined(__KERNEL_CUDA__) && (__CUDA_ARCH__ < 300)
# define ccl_device_curveintersect ccl_device
#else
# define ccl_device_curveintersect ccl_device_forceinline
#endif
#ifdef __KERNEL_SSE2__
ccl_device_inline ssef transform_point_T3(const ssef t[3], const ssef &a)
{
@ -32,7 +26,7 @@ ccl_device_inline ssef transform_point_T3(const ssef t[3], const ssef &a)
#endif
/* On CPU pass P and dir by reference to aligned vector. */
ccl_device_curveintersect bool cardinal_curve_intersect(
ccl_device_forceinline bool cardinal_curve_intersect(
KernelGlobals *kg,
Intersection *isect,
const float3 ccl_ref P,
@ -505,18 +499,18 @@ ccl_device_curveintersect bool cardinal_curve_intersect(
return hit;
}
ccl_device_curveintersect bool curve_intersect(KernelGlobals *kg,
Intersection *isect,
float3 P,
float3 direction,
uint visibility,
int object,
int curveAddr,
float time,
int type,
uint *lcg_state,
float difl,
float extmax)
ccl_device_forceinline bool curve_intersect(KernelGlobals *kg,
Intersection *isect,
float3 P,
float3 direction,
uint visibility,
int object,
int curveAddr,
float time,
int type,
uint *lcg_state,
float difl,
float extmax)
{
/* define few macros to minimize code duplication for SSE */
#ifndef __KERNEL_SSE2__

View File

@ -50,10 +50,7 @@ __device__ half __float2half(const float f)
/* Qualifier wrappers for different names on different devices */
#define ccl_device __device__ __inline__
#if __CUDA_ARCH__ < 300
# define ccl_device_inline __device__ __inline__
# define ccl_device_forceinline __device__ __forceinline__
#elif __CUDA_ARCH__ < 500
#if __CUDA_ARCH__ < 500
# define ccl_device_inline __device__ __forceinline__
# define ccl_device_forceinline __device__ __forceinline__
#else
@ -138,18 +135,9 @@ ccl_device_inline uint ccl_num_groups(uint d)
/* Textures */
/* Use arrays for regular data. This is a little slower than textures on Fermi,
* but allows for cleaner code and we will stop supporting Fermi soon. */
/* Use arrays for regular data. */
#define kernel_tex_fetch(t, index) t[(index)]
/* On Kepler (6xx) and above, we use Bindless Textures for images.
* On Fermi cards (4xx and 5xx), we have to use regular textures. */
#if __CUDA_ARCH__ < 300
typedef texture<float4, 2> texture_image_float4;
typedef texture<float4, 3> texture_image3d_float4;
typedef texture<uchar4, 2, cudaReadModeNormalizedFloat> texture_image_uchar4;
#endif
#define kernel_data __data
/* Use fast math functions */

View File

@ -47,7 +47,6 @@ struct VolumeStep;
typedef struct KernelGlobals {
# define KERNEL_TEX(type, name) texture<type> name;
# define KERNEL_IMAGE_TEX(type, ttype, name)
# include "kernel/kernel_textures.h"
KernelData __data;
@ -93,7 +92,6 @@ typedef struct KernelGlobals {
} KernelGlobals;
# define KERNEL_TEX(type, name) const __constant__ __device__ type *name;
# define KERNEL_IMAGE_TEX(type, ttype, name) ttype name;
# include "kernel/kernel_textures.h"
#endif /* __KERNEL_CUDA__ */

View File

@ -18,10 +18,6 @@
# define KERNEL_TEX(type, name)
#endif
#ifndef KERNEL_IMAGE_TEX
# define KERNEL_IMAGE_TEX(type, ttype, name)
#endif
/* bvh */
KERNEL_TEX(float4, __bvh_nodes)
KERNEL_TEX(float4, __bvh_leaf_nodes)
@ -78,113 +74,8 @@ KERNEL_TEX(float, __lookup_table)
/* sobol */
KERNEL_TEX(uint, __sobol_directions)
#if !defined(__KERNEL_CUDA__) || __CUDA_ARCH__ >= 300
/* image textures */
KERNEL_TEX(TextureInfo, __texture_info)
#else
/* full-float image */
KERNEL_IMAGE_TEX(float4, texture_image_float4, __tex_image_float4_000)
KERNEL_IMAGE_TEX(float4, texture_image_float4, __tex_image_float4_008)
KERNEL_IMAGE_TEX(float4, texture_image_float4, __tex_image_float4_016)
KERNEL_IMAGE_TEX(float4, texture_image_float4, __tex_image_float4_024)
KERNEL_IMAGE_TEX(float4, texture_image_float4, __tex_image_float4_032)
KERNEL_IMAGE_TEX(float4, texture_image3d_float4, __tex_image_float4_3d_000)
KERNEL_IMAGE_TEX(float4, texture_image3d_float4, __tex_image_float4_3d_008)
KERNEL_IMAGE_TEX(float4, texture_image3d_float4, __tex_image_float4_3d_016)
KERNEL_IMAGE_TEX(float4, texture_image3d_float4, __tex_image_float4_3d_024)
KERNEL_IMAGE_TEX(float4, texture_image3d_float4, __tex_image_float4_3d_032)
/* image
* These texture names are encoded to their flattened slots as
* ImageManager::type_index_to_flattened_slot() returns them. */
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_001)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_009)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_017)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_025)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_033)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_041)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_049)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_057)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_065)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_073)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_081)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_089)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_097)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_105)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_113)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_121)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_129)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_137)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_145)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_153)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_161)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_169)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_177)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_185)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_193)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_201)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_209)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_217)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_225)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_233)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_241)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_249)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_257)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_265)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_273)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_281)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_289)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_297)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_305)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_313)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_321)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_329)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_337)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_345)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_353)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_361)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_369)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_377)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_385)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_393)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_401)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_409)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_417)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_425)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_433)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_441)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_449)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_457)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_465)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_473)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_481)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_489)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_497)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_505)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_513)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_521)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_529)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_537)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_545)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_553)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_561)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_569)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_577)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_585)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_593)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_601)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_609)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_617)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_625)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_633)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_641)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_649)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_657)
KERNEL_IMAGE_TEX(uchar4, texture_image_uchar4, __tex_image_byte4_665)
#endif /* defined(__KERNEL_CUDA__) && __CUDA_ARCH__ < 300 */
#undef KERNEL_TEX
#undef KERNEL_IMAGE_TEX

View File

@ -85,7 +85,6 @@ void kernel_tex_copy(KernelGlobals *kg,
kg->tname.data = (type*)mem; \
kg->tname.width = size; \
}
#define KERNEL_IMAGE_TEX(type, tname)
#include "kernel/kernel_textures.h"
else {
assert(0);

View File

@ -16,20 +16,8 @@
/* device data taken from CUDA occupancy calculator */
/* 2.0 and 2.1 */
#if __CUDA_ARCH__ == 200 || __CUDA_ARCH__ == 210
# define CUDA_MULTIPRESSOR_MAX_REGISTERS 32768
# define CUDA_MULTIPROCESSOR_MAX_BLOCKS 8
# define CUDA_BLOCK_MAX_THREADS 1024
# define CUDA_THREAD_MAX_REGISTERS 63
/* tunable parameters */
# define CUDA_THREADS_BLOCK_WIDTH 16
# define CUDA_KERNEL_MAX_REGISTERS 32
# define CUDA_KERNEL_BRANCHED_MAX_REGISTERS 40
/* 3.0 and 3.5 */
#elif __CUDA_ARCH__ == 300 || __CUDA_ARCH__ == 350
#if __CUDA_ARCH__ == 300 || __CUDA_ARCH__ == 350
# define CUDA_MULTIPRESSOR_MAX_REGISTERS 65536
# define CUDA_MULTIPROCESSOR_MAX_BLOCKS 16
# define CUDA_BLOCK_MAX_THREADS 1024

View File

@ -14,10 +14,6 @@
* limitations under the License.
*/
#if __CUDA_ARCH__ >= 300
/* Kepler */
/* w0, w1, w2, and w3 are the four cubic B-spline basis functions. */
ccl_device float cubic_w0(float a)
{
@ -191,120 +187,3 @@ ccl_device float4 kernel_tex_image_interp_3d(KernelGlobals *kg, int id, float x,
}
}
#else
/* Fermi */
ccl_device float4 kernel_tex_image_interp(KernelGlobals *kg, int id, float x, float y)
{
float4 r;
switch(id) {
case 0: r = tex2D(__tex_image_float4_000, x, y); break;
case 8: r = tex2D(__tex_image_float4_008, x, y); break;
case 16: r = tex2D(__tex_image_float4_016, x, y); break;
case 24: r = tex2D(__tex_image_float4_024, x, y); break;
case 32: r = tex2D(__tex_image_float4_032, x, y); break;
case 1: r = tex2D(__tex_image_byte4_001, x, y); break;
case 9: r = tex2D(__tex_image_byte4_009, x, y); break;
case 17: r = tex2D(__tex_image_byte4_017, x, y); break;
case 25: r = tex2D(__tex_image_byte4_025, x, y); break;
case 33: r = tex2D(__tex_image_byte4_033, x, y); break;
case 41: r = tex2D(__tex_image_byte4_041, x, y); break;
case 49: r = tex2D(__tex_image_byte4_049, x, y); break;
case 57: r = tex2D(__tex_image_byte4_057, x, y); break;
case 65: r = tex2D(__tex_image_byte4_065, x, y); break;
case 73: r = tex2D(__tex_image_byte4_073, x, y); break;
case 81: r = tex2D(__tex_image_byte4_081, x, y); break;
case 89: r = tex2D(__tex_image_byte4_089, x, y); break;
case 97: r = tex2D(__tex_image_byte4_097, x, y); break;
case 105: r = tex2D(__tex_image_byte4_105, x, y); break;
case 113: r = tex2D(__tex_image_byte4_113, x, y); break;
case 121: r = tex2D(__tex_image_byte4_121, x, y); break;
case 129: r = tex2D(__tex_image_byte4_129, x, y); break;
case 137: r = tex2D(__tex_image_byte4_137, x, y); break;
case 145: r = tex2D(__tex_image_byte4_145, x, y); break;
case 153: r = tex2D(__tex_image_byte4_153, x, y); break;
case 161: r = tex2D(__tex_image_byte4_161, x, y); break;
case 169: r = tex2D(__tex_image_byte4_169, x, y); break;
case 177: r = tex2D(__tex_image_byte4_177, x, y); break;
case 185: r = tex2D(__tex_image_byte4_185, x, y); break;
case 193: r = tex2D(__tex_image_byte4_193, x, y); break;
case 201: r = tex2D(__tex_image_byte4_201, x, y); break;
case 209: r = tex2D(__tex_image_byte4_209, x, y); break;
case 217: r = tex2D(__tex_image_byte4_217, x, y); break;
case 225: r = tex2D(__tex_image_byte4_225, x, y); break;
case 233: r = tex2D(__tex_image_byte4_233, x, y); break;
case 241: r = tex2D(__tex_image_byte4_241, x, y); break;
case 249: r = tex2D(__tex_image_byte4_249, x, y); break;
case 257: r = tex2D(__tex_image_byte4_257, x, y); break;
case 265: r = tex2D(__tex_image_byte4_265, x, y); break;
case 273: r = tex2D(__tex_image_byte4_273, x, y); break;
case 281: r = tex2D(__tex_image_byte4_281, x, y); break;
case 289: r = tex2D(__tex_image_byte4_289, x, y); break;
case 297: r = tex2D(__tex_image_byte4_297, x, y); break;
case 305: r = tex2D(__tex_image_byte4_305, x, y); break;
case 313: r = tex2D(__tex_image_byte4_313, x, y); break;
case 321: r = tex2D(__tex_image_byte4_321, x, y); break;
case 329: r = tex2D(__tex_image_byte4_329, x, y); break;
case 337: r = tex2D(__tex_image_byte4_337, x, y); break;
case 345: r = tex2D(__tex_image_byte4_345, x, y); break;
case 353: r = tex2D(__tex_image_byte4_353, x, y); break;
case 361: r = tex2D(__tex_image_byte4_361, x, y); break;
case 369: r = tex2D(__tex_image_byte4_369, x, y); break;
case 377: r = tex2D(__tex_image_byte4_377, x, y); break;
case 385: r = tex2D(__tex_image_byte4_385, x, y); break;
case 393: r = tex2D(__tex_image_byte4_393, x, y); break;
case 401: r = tex2D(__tex_image_byte4_401, x, y); break;
case 409: r = tex2D(__tex_image_byte4_409, x, y); break;
case 417: r = tex2D(__tex_image_byte4_417, x, y); break;
case 425: r = tex2D(__tex_image_byte4_425, x, y); break;
case 433: r = tex2D(__tex_image_byte4_433, x, y); break;
case 441: r = tex2D(__tex_image_byte4_441, x, y); break;
case 449: r = tex2D(__tex_image_byte4_449, x, y); break;
case 457: r = tex2D(__tex_image_byte4_457, x, y); break;
case 465: r = tex2D(__tex_image_byte4_465, x, y); break;
case 473: r = tex2D(__tex_image_byte4_473, x, y); break;
case 481: r = tex2D(__tex_image_byte4_481, x, y); break;
case 489: r = tex2D(__tex_image_byte4_489, x, y); break;
case 497: r = tex2D(__tex_image_byte4_497, x, y); break;
case 505: r = tex2D(__tex_image_byte4_505, x, y); break;
case 513: r = tex2D(__tex_image_byte4_513, x, y); break;
case 521: r = tex2D(__tex_image_byte4_521, x, y); break;
case 529: r = tex2D(__tex_image_byte4_529, x, y); break;
case 537: r = tex2D(__tex_image_byte4_537, x, y); break;
case 545: r = tex2D(__tex_image_byte4_545, x, y); break;
case 553: r = tex2D(__tex_image_byte4_553, x, y); break;
case 561: r = tex2D(__tex_image_byte4_561, x, y); break;
case 569: r = tex2D(__tex_image_byte4_569, x, y); break;
case 577: r = tex2D(__tex_image_byte4_577, x, y); break;
case 585: r = tex2D(__tex_image_byte4_585, x, y); break;
case 593: r = tex2D(__tex_image_byte4_593, x, y); break;
case 601: r = tex2D(__tex_image_byte4_601, x, y); break;
case 609: r = tex2D(__tex_image_byte4_609, x, y); break;
case 617: r = tex2D(__tex_image_byte4_617, x, y); break;
case 625: r = tex2D(__tex_image_byte4_625, x, y); break;
case 633: r = tex2D(__tex_image_byte4_633, x, y); break;
case 641: r = tex2D(__tex_image_byte4_641, x, y); break;
case 649: r = tex2D(__tex_image_byte4_649, x, y); break;
case 657: r = tex2D(__tex_image_byte4_657, x, y); break;
case 665: r = tex2D(__tex_image_byte4_665, x, y); break;
default: r = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
}
return r;
}
ccl_device float4 kernel_tex_image_interp_3d(KernelGlobals *kg, int id, float x, float y, float z, InterpolationType interp)
{
float4 r;
switch(id) {
case 0: r = tex3D(__tex_image_float4_3d_000, x, y, z); break;
case 8: r = tex3D(__tex_image_float4_3d_008, x, y, z); break;
case 16: r = tex3D(__tex_image_float4_3d_016, x, y, z); break;
case 24: r = tex3D(__tex_image_float4_3d_024, x, y, z); break;
case 32: r = tex3D(__tex_image_float4_3d_032, x, y, z); break;
}
return r;
}
#endif

View File

@ -30,8 +30,7 @@
* in local memory on the GPU, as it would take too many register and indexes in
* ways not known at compile time. This seems the only solution even though it
* may be slow, with two positive factors. If the same shader is being executed,
* memory access will be coalesced, and on fermi cards, memory will actually be
* cached.
* memory access will be coalesced and cached.
*
* The result of shader execution will be a single closure. This means the
* closure type, associated label, data and weight. Sampling from multiple

View File

@ -49,7 +49,6 @@ ImageManager::ImageManager(const DeviceInfo& info)
/* Set image limits */
max_num_images = TEX_NUM_MAX;
has_half_images = info.has_half_images;
cuda_fermi_limits = info.has_fermi_limits;
for(size_t type = 0; type < IMAGE_DATA_NUM_TYPES; type++) {
tex_num_images[type] = 0;
@ -255,7 +254,7 @@ int ImageManager::add_image(const string& filename,
/* Check whether it's a float texture. */
is_float = (type == IMAGE_DATA_TYPE_FLOAT || type == IMAGE_DATA_TYPE_FLOAT4);
/* No single channel and half textures on CUDA (Fermi) and no half on OpenCL, use available slots */
/* No half textures on OpenCL, use full float instead. */
if(!has_half_images) {
if(type == IMAGE_DATA_TYPE_HALF4) {
type = IMAGE_DATA_TYPE_FLOAT4;
@ -265,15 +264,6 @@ int ImageManager::add_image(const string& filename,
}
}
if(cuda_fermi_limits) {
if(type == IMAGE_DATA_TYPE_FLOAT) {
type = IMAGE_DATA_TYPE_FLOAT4;
}
else if(type == IMAGE_DATA_TYPE_BYTE) {
type = IMAGE_DATA_TYPE_BYTE4;
}
}
/* Fnd existing image. */
for(slot = 0; slot < images[type].size(); slot++) {
img = images[type][slot];
@ -303,27 +293,16 @@ int ImageManager::add_image(const string& filename,
break;
}
/* Count if we're over the limit */
if(cuda_fermi_limits) {
if(tex_num_images[IMAGE_DATA_TYPE_BYTE4] == TEX_NUM_BYTE4_CUDA
|| tex_num_images[IMAGE_DATA_TYPE_FLOAT4] == TEX_NUM_FLOAT4_CUDA)
{
printf("ImageManager::add_image: Reached %s image limit (%d), skipping '%s'\n",
name_from_type(type).c_str(), tex_num_images[type], filename.c_str());
return -1;
}
/* Count if we're over the limit.
* Very unlikely, since max_num_images is insanely big. But better safe than sorry. */
int tex_count = 0;
for(int type = 0; type < IMAGE_DATA_NUM_TYPES; type++) {
tex_count += tex_num_images[type];
}
else {
/* Very unlikely, since max_num_images is insanely big. But better safe than sorry. */
int tex_count = 0;
for(int type = 0; type < IMAGE_DATA_NUM_TYPES; type++) {
tex_count += tex_num_images[type];
}
if(tex_count > max_num_images) {
printf("ImageManager::add_image: Reached image limit (%d), skipping '%s'\n",
max_num_images, filename.c_str());
return -1;
}
if(tex_count > max_num_images) {
printf("ImageManager::add_image: Reached image limit (%d), skipping '%s'\n",
max_num_images, filename.c_str());
return -1;
}
if(slot == images[type].size()) {

View File

@ -121,7 +121,6 @@ private:
int tex_num_images[IMAGE_DATA_NUM_TYPES];
int max_num_images;
bool has_half_images;
bool cuda_fermi_limits;
thread_mutex device_mutex;
int animation_frame;

View File

@ -79,12 +79,7 @@ ccl_device bool ray_aligned_disk_intersect(
return true;
}
#if defined(__KERNEL_CUDA__) && __CUDA_ARCH__ < 300
ccl_device_inline
#else
ccl_device_forceinline
#endif
bool ray_triangle_intersect(
ccl_device_forceinline bool ray_triangle_intersect(
float3 ray_P, float3 ray_dir, float ray_t,
#if defined(__KERNEL_SSE2__) && defined(__KERNEL_SSE__)
const ssef *ssef_verts,

View File

@ -20,22 +20,6 @@
CCL_NAMESPACE_BEGIN
/* Texture limits on devices. */
/* CUDA (Geforce 4xx and 5xx) */
#define TEX_NUM_FLOAT4_CUDA 5
#define TEX_NUM_BYTE4_CUDA 84
#define TEX_NUM_HALF4_CUDA 0
#define TEX_NUM_FLOAT_CUDA 0
#define TEX_NUM_BYTE_CUDA 0
#define TEX_NUM_HALF_CUDA 0
#define TEX_START_FLOAT4_CUDA 0
#define TEX_START_BYTE4_CUDA TEX_NUM_FLOAT4_CUDA
#define TEX_START_HALF4_CUDA (TEX_NUM_FLOAT4_CUDA + TEX_NUM_BYTE4_CUDA)
#define TEX_START_FLOAT_CUDA (TEX_NUM_FLOAT4_CUDA + TEX_NUM_BYTE4_CUDA + TEX_NUM_HALF4_CUDA)
#define TEX_START_BYTE_CUDA (TEX_NUM_FLOAT4_CUDA + TEX_NUM_BYTE4_CUDA + TEX_NUM_HALF4_CUDA + TEX_NUM_FLOAT_CUDA)
#define TEX_START_HALF_CUDA (TEX_NUM_FLOAT4_CUDA + TEX_NUM_BYTE4_CUDA + TEX_NUM_HALF4_CUDA + TEX_NUM_FLOAT_CUDA + TEX_NUM_BYTE_CUDA)
/* Any architecture other than old CUDA cards */
#define TEX_NUM_MAX (INT_MAX >> 4)
/* Color to use when textures are not found. */
@ -44,11 +28,8 @@ CCL_NAMESPACE_BEGIN
#define TEX_IMAGE_MISSING_B 1
#define TEX_IMAGE_MISSING_A 1
#if defined (__KERNEL_CUDA__) && (__CUDA_ARCH__ < 300)
# define kernel_tex_type(tex) (tex < TEX_START_BYTE4_CUDA ? IMAGE_DATA_TYPE_FLOAT4 : IMAGE_DATA_TYPE_BYTE4)
#else
# define kernel_tex_type(tex) (tex & IMAGE_DATA_TYPE_MASK)
#endif
/* Texture type. */
#define kernel_tex_type(tex) (tex & IMAGE_DATA_TYPE_MASK)
/* Interpolation types for textures
* cuda also use texture space to store other objects */

View File

@ -44,7 +44,7 @@
#include "atomic_ops.h"
#include "BLI_math.h"
#include "BLI_jitter.h"
#include "BLI_jitter_2d.h"
#include "BLI_bitmap.h"
#include "BLI_task.h"

View File

@ -43,7 +43,7 @@
#include "BLI_math.h"
#include "BLI_edgehash.h"
#include "BLI_bitmap.h"
#include "BLI_polyfill2d.h"
#include "BLI_polyfill_2d.h"
#include "BLI_linklist.h"
#include "BLI_linklist_stack.h"
#include "BLI_alloca.h"

View File

@ -36,7 +36,7 @@
#include "BLI_bitmap.h"
#include "BLI_math.h"
#include "BLI_memarena.h"
#include "BLI_polyfill2d.h"
#include "BLI_polyfill_2d.h"
#include "BLI_rand.h"
#include "BKE_bvhutils.h"

View File

@ -36,7 +36,7 @@
#include "MEM_guardedalloc.h"
#include "BLI_utildefines.h"
#include "BLI_jitter.h"
#include "BLI_jitter_2d.h"
#include "BLI_kdtree.h"
#include "BLI_math.h"
#include "BLI_math_geom.h"

View File

@ -58,7 +58,7 @@
#include "BLI_utildefines.h"
#include "BLI_edgehash.h"
#include "BLI_rand.h"
#include "BLI_jitter.h"
#include "BLI_jitter_2d.h"
#include "BLI_math.h"
#include "BLI_blenlib.h"
#include "BLI_kdtree.h"

View File

@ -25,10 +25,10 @@
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef __BLI_BOXPACK2D_H__
#define __BLI_BOXPACK2D_H__
#ifndef __BLI_BOXPACK_2D_H__
#define __BLI_BOXPACK_2D_H__
/** \file BLI_boxpack2d.h
/** \file BLI_boxpack_2d.h
* \ingroup bli
*/
@ -39,7 +39,7 @@ typedef struct BoxPack {
float y;
float w;
float h;
/* Verts this box uses
* (BL,TR,TL,BR) / 0,1,2,3 */
struct BoxVert *v[4];
@ -49,5 +49,5 @@ typedef struct BoxPack {
void BLI_box_pack_2d(BoxPack *boxarray, const unsigned int len, float *tot_width, float *tot_height);
#endif
#endif /* __BLI_BOXPACK_2D_H__ */

View File

@ -18,10 +18,10 @@
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef __BLI_CONVEXHULL2D_H__
#define __BLI_CONVEXHULL2D_H__
#ifndef __BLI_CONVEXHULL_2D_H__
#define __BLI_CONVEXHULL_2D_H__
/** \file BLI_convexhull2d.h
/** \file BLI_convexhull_2d.h
* \ingroup bli
*/
@ -31,4 +31,4 @@ int BLI_convexhull_2d(const float (*points)[2], const int n, int r_points[]);
float BLI_convexhull_aabb_fit_hull_2d(const float (*points_hull)[2], unsigned int n);
float BLI_convexhull_aabb_fit_points_2d(const float (*points)[2], unsigned int n);
#endif /* __BLI_CONVEXHULL2D_H__ */
#endif /* __BLI_CONVEXHULL_2D_H__ */

View File

@ -18,10 +18,10 @@
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef __BLI_DIAL_H__
#define __BLI_DIAL_H__
#ifndef __BLI_DIAL_2D_H__
#define __BLI_DIAL_2D_H__
/** \file BLI_dial.h
/** \file BLI_dial_2d.h
* \ingroup bli
*
* \note dials act similar to old rotation based phones and output an angle.
@ -56,4 +56,4 @@ Dial *BLI_dial_initialize(const float start_position[2], float threshold);
float BLI_dial_angle(Dial *dial, const float current_position[2]);
#endif /* __BLI_DIAL_H__ */
#endif /* __BLI_DIAL_2D_H__ */

View File

@ -81,7 +81,10 @@ enum {
#endif
};
/* *** */
/** \name GHash API
*
* Defined in ``BLI_ghash.c``
* \{ */
GHash *BLI_ghash_new_ex(
GHashHashFP hashfp, GHashCmpFP cmpfp, const char *info,
@ -114,7 +117,10 @@ unsigned int BLI_ghash_len(GHash *gh) ATTR_WARN_UNUSED_RESULT;
void BLI_ghash_flag_set(GHash *gh, unsigned int flag);
void BLI_ghash_flag_clear(GHash *gh, unsigned int flag);
/* *** */
/** \} */
/** \name GHash Iterator
* \{ */
GHashIterator *BLI_ghashIterator_new(GHash *gh) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
@ -149,7 +155,134 @@ BLI_INLINE bool BLI_ghashIterator_done(GHashIterator *ghi) { return !ghi
BLI_ghashIterator_done(&gh_iter_) == false; \
BLI_ghashIterator_step(&gh_iter_), i_++)
/** \name Callbacks for GHash
/** \} */
/** \name GSet API
* A 'set' implementation (unordered collection of unique elements).
*
* Internally this is a 'GHash' without any keys,
* which is why this API's are in the same header & source file.
*
* \{ */
typedef struct GSet GSet;
typedef GHashHashFP GSetHashFP;
typedef GHashCmpFP GSetCmpFP;
typedef GHashKeyFreeFP GSetKeyFreeFP;
typedef GHashKeyCopyFP GSetKeyCopyFP;
typedef GHashIterState GSetIterState;
GSet *BLI_gset_new_ex(
GSetHashFP hashfp, GSetCmpFP cmpfp, const char *info,
const unsigned int nentries_reserve) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
GSet *BLI_gset_new(GSetHashFP hashfp, GSetCmpFP cmpfp, const char *info) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
GSet *BLI_gset_copy(GSet *gs, GSetKeyCopyFP keycopyfp) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
unsigned int BLI_gset_len(GSet *gs) ATTR_WARN_UNUSED_RESULT;
void BLI_gset_flag_set(GSet *gs, unsigned int flag);
void BLI_gset_flag_clear(GSet *gs, unsigned int flag);
void BLI_gset_free(GSet *gs, GSetKeyFreeFP keyfreefp);
void BLI_gset_insert(GSet *gh, void *key);
bool BLI_gset_add(GSet *gs, void *key);
bool BLI_gset_ensure_p_ex(GSet *gs, const void *key, void ***r_key);
bool BLI_gset_reinsert(GSet *gh, void *key, GSetKeyFreeFP keyfreefp);
void *BLI_gset_replace_key(GSet *gs, void *key);
bool BLI_gset_haskey(GSet *gs, const void *key) ATTR_WARN_UNUSED_RESULT;
bool BLI_gset_pop(GSet *gs, GSetIterState *state, void **r_key) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL();
bool BLI_gset_remove(GSet *gs, const void *key, GSetKeyFreeFP keyfreefp);
void BLI_gset_clear_ex(GSet *gs, GSetKeyFreeFP keyfreefp,
const unsigned int nentries_reserve);
void BLI_gset_clear(GSet *gs, GSetKeyFreeFP keyfreefp);
/* When set's are used for key & value. */
void *BLI_gset_lookup(GSet *gh, const void *key) ATTR_WARN_UNUSED_RESULT;
void *BLI_gset_pop_key(GSet *gs, const void *key) ATTR_WARN_UNUSED_RESULT;
/** \name GSet Iterator
* \{ */
/* rely on inline api for now */
/* so we can cast but compiler sees as different */
typedef struct GSetIterator {
GHashIterator _ghi
#ifdef __GNUC__
__attribute__ ((deprecated))
#endif
;
} GSetIterator;
BLI_INLINE GSetIterator *BLI_gsetIterator_new(GSet *gs) { return (GSetIterator *)BLI_ghashIterator_new((GHash *)gs); }
BLI_INLINE void BLI_gsetIterator_init(GSetIterator *gsi, GSet *gs) { BLI_ghashIterator_init((GHashIterator *)gsi, (GHash *)gs); }
BLI_INLINE void BLI_gsetIterator_free(GSetIterator *gsi) { BLI_ghashIterator_free((GHashIterator *)gsi); }
BLI_INLINE void *BLI_gsetIterator_getKey(GSetIterator *gsi) { return BLI_ghashIterator_getKey((GHashIterator *)gsi); }
BLI_INLINE void BLI_gsetIterator_step(GSetIterator *gsi) { BLI_ghashIterator_step((GHashIterator *)gsi); }
BLI_INLINE bool BLI_gsetIterator_done(GSetIterator *gsi) { return BLI_ghashIterator_done((GHashIterator *)gsi); }
#define GSET_ITER(gs_iter_, gset_) \
for (BLI_gsetIterator_init(&gs_iter_, gset_); \
BLI_gsetIterator_done(&gs_iter_) == false; \
BLI_gsetIterator_step(&gs_iter_))
#define GSET_ITER_INDEX(gs_iter_, gset_, i_) \
for (BLI_gsetIterator_init(&gs_iter_, gset_), i_ = 0; \
BLI_gsetIterator_done(&gs_iter_) == false; \
BLI_gsetIterator_step(&gs_iter_), i_++)
/** \} */
/** \name GHash/GSet Debugging API's
* \{ */
/* For testing, debugging only */
#ifdef GHASH_INTERNAL_API
int BLI_ghash_buckets_len(GHash *gh);
int BLI_gset_buckets_len(GSet *gs);
double BLI_ghash_calc_quality_ex(
GHash *gh, double *r_load, double *r_variance,
double *r_prop_empty_buckets, double *r_prop_overloaded_buckets, int *r_biggest_bucket);
double BLI_gset_calc_quality_ex(
GSet *gs, double *r_load, double *r_variance,
double *r_prop_empty_buckets, double *r_prop_overloaded_buckets, int *r_biggest_bucket);
double BLI_ghash_calc_quality(GHash *gh);
double BLI_gset_calc_quality(GSet *gs);
#endif /* GHASH_INTERNAL_API */
/** \} */
/** \name GHash/GSet Macros
* \{ */
#define GHASH_FOREACH_BEGIN(type, var, what) \
do { \
GHashIterator gh_iter##var; \
GHASH_ITER(gh_iter##var, what) { \
type var = (type)(BLI_ghashIterator_getValue(&gh_iter##var)); \
#define GHASH_FOREACH_END() \
} \
} while(0)
#define GSET_FOREACH_BEGIN(type, var, what) \
do { \
GSetIterator gh_iter##var; \
GSET_ITER(gh_iter##var, what) { \
type var = (type)(BLI_gsetIterator_getKey(&gh_iter##var));
#define GSET_FOREACH_END() \
} \
} while(0)
/** \} */
/** \name GHash/GSet Utils
*
* Defined in ``BLI_ghash_utils.c``
* \{ */
/**
* Callbacks for GHash (``BLI_ghashutil_``)
*
* \note '_p' suffix denotes void pointer arg,
* so we can have functions that take correctly typed args too.
@ -198,7 +331,19 @@ bool BLI_ghashutil_uinthash_v4_cmp(const void *a, const void *b);
#define BLI_ghashutil_inthash_v4_cmp \
BLI_ghashutil_uinthash_v4_cmp
/** \} */
typedef struct GHashPair {
const void *first;
const void *second;
} GHashPair;
GHashPair *BLI_ghashutil_pairalloc(const void *first, const void *second);
unsigned int BLI_ghashutil_pairhash(const void *ptr);
bool BLI_ghashutil_paircmp(const void *a, const void *b);
void BLI_ghashutil_pairfree(void *ptr);
/**
* Wrapper GHash Creation Functions
*/
GHash *BLI_ghash_ptr_new_ex(
const char *info,
@ -221,126 +366,20 @@ GHash *BLI_ghash_pair_new_ex(
GHash *BLI_ghash_pair_new(
const char *info) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
typedef struct GHashPair {
const void *first;
const void *second;
} GHashPair;
GSet *BLI_gset_ptr_new_ex(
const char *info, const unsigned int nentries_reserve) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
GSet *BLI_gset_ptr_new(
const char *info);
GSet *BLI_gset_str_new_ex(
const char *info, const unsigned int nentries_reserve) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
GSet *BLI_gset_str_new(
const char *info);
GSet *BLI_gset_pair_new_ex(
const char *info, const unsigned int nentries_reserve) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
GSet *BLI_gset_pair_new(
const char *info) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
GHashPair *BLI_ghashutil_pairalloc(const void *first, const void *second);
unsigned int BLI_ghashutil_pairhash(const void *ptr);
bool BLI_ghashutil_paircmp(const void *a, const void *b);
void BLI_ghashutil_pairfree(void *ptr);
/**
* GSet is a 'set' implementation (unordered collection of unique elements).
*
* Internally this is a 'GHash' without any keys,
* which is why this API's are in the same header & source file.
*/
typedef struct GSet GSet;
typedef GHashHashFP GSetHashFP;
typedef GHashCmpFP GSetCmpFP;
typedef GHashKeyFreeFP GSetKeyFreeFP;
typedef GHashKeyCopyFP GSetKeyCopyFP;
typedef GHashIterState GSetIterState;
/* so we can cast but compiler sees as different */
typedef struct GSetIterator {
GHashIterator _ghi
#ifdef __GNUC__
__attribute__ ((deprecated))
#endif
;
} GSetIterator;
GSet *BLI_gset_new_ex(
GSetHashFP hashfp, GSetCmpFP cmpfp, const char *info,
const unsigned int nentries_reserve) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
GSet *BLI_gset_new(GSetHashFP hashfp, GSetCmpFP cmpfp, const char *info) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
GSet *BLI_gset_copy(GSet *gs, GSetKeyCopyFP keycopyfp) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
unsigned int BLI_gset_len(GSet *gs) ATTR_WARN_UNUSED_RESULT;
void BLI_gset_flag_set(GSet *gs, unsigned int flag);
void BLI_gset_flag_clear(GSet *gs, unsigned int flag);
void BLI_gset_free(GSet *gs, GSetKeyFreeFP keyfreefp);
void BLI_gset_insert(GSet *gh, void *key);
bool BLI_gset_add(GSet *gs, void *key);
bool BLI_gset_ensure_p_ex(GSet *gs, const void *key, void ***r_key);
bool BLI_gset_reinsert(GSet *gh, void *key, GSetKeyFreeFP keyfreefp);
void *BLI_gset_replace_key(GSet *gs, void *key);
bool BLI_gset_haskey(GSet *gs, const void *key) ATTR_WARN_UNUSED_RESULT;
bool BLI_gset_pop(GSet *gs, GSetIterState *state, void **r_key) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL();
bool BLI_gset_remove(GSet *gs, const void *key, GSetKeyFreeFP keyfreefp);
void BLI_gset_clear_ex(GSet *gs, GSetKeyFreeFP keyfreefp,
const unsigned int nentries_reserve);
void BLI_gset_clear(GSet *gs, GSetKeyFreeFP keyfreefp);
/* When set's are used for key & value. */
void *BLI_gset_lookup(GSet *gh, const void *key) ATTR_WARN_UNUSED_RESULT;
void *BLI_gset_pop_key(GSet *gs, const void *key) ATTR_WARN_UNUSED_RESULT;
GSet *BLI_gset_ptr_new_ex(const char *info, const unsigned int nentries_reserve) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
GSet *BLI_gset_ptr_new(const char *info);
GSet *BLI_gset_str_new_ex(const char *info, const unsigned int nentries_reserve) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
GSet *BLI_gset_str_new(const char *info);
GSet *BLI_gset_pair_new_ex(const char *info, const unsigned int nentries_reserve) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
GSet *BLI_gset_pair_new(const char *info) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT;
/* rely on inline api for now */
BLI_INLINE GSetIterator *BLI_gsetIterator_new(GSet *gs) { return (GSetIterator *)BLI_ghashIterator_new((GHash *)gs); }
BLI_INLINE void BLI_gsetIterator_init(GSetIterator *gsi, GSet *gs) { BLI_ghashIterator_init((GHashIterator *)gsi, (GHash *)gs); }
BLI_INLINE void BLI_gsetIterator_free(GSetIterator *gsi) { BLI_ghashIterator_free((GHashIterator *)gsi); }
BLI_INLINE void *BLI_gsetIterator_getKey(GSetIterator *gsi) { return BLI_ghashIterator_getKey((GHashIterator *)gsi); }
BLI_INLINE void BLI_gsetIterator_step(GSetIterator *gsi) { BLI_ghashIterator_step((GHashIterator *)gsi); }
BLI_INLINE bool BLI_gsetIterator_done(GSetIterator *gsi) { return BLI_ghashIterator_done((GHashIterator *)gsi); }
#define GSET_ITER(gs_iter_, gset_) \
for (BLI_gsetIterator_init(&gs_iter_, gset_); \
BLI_gsetIterator_done(&gs_iter_) == false; \
BLI_gsetIterator_step(&gs_iter_))
#define GSET_ITER_INDEX(gs_iter_, gset_, i_) \
for (BLI_gsetIterator_init(&gs_iter_, gset_), i_ = 0; \
BLI_gsetIterator_done(&gs_iter_) == false; \
BLI_gsetIterator_step(&gs_iter_), i_++)
/* For testing, debugging only */
#ifdef GHASH_INTERNAL_API
int BLI_ghash_buckets_len(GHash *gh);
int BLI_gset_buckets_len(GSet *gs);
double BLI_ghash_calc_quality_ex(
GHash *gh, double *r_load, double *r_variance,
double *r_prop_empty_buckets, double *r_prop_overloaded_buckets, int *r_biggest_bucket);
double BLI_gset_calc_quality_ex(
GSet *gs, double *r_load, double *r_variance,
double *r_prop_empty_buckets, double *r_prop_overloaded_buckets, int *r_biggest_bucket);
double BLI_ghash_calc_quality(GHash *gh);
double BLI_gset_calc_quality(GSet *gs);
#endif /* GHASH_INTERNAL_API */
#define GHASH_FOREACH_BEGIN(type, var, what) \
do { \
GHashIterator gh_iter##var; \
GHASH_ITER(gh_iter##var, what) { \
type var = (type)(BLI_ghashIterator_getValue(&gh_iter##var)); \
#define GHASH_FOREACH_END() \
} \
} while(0)
#define GSET_FOREACH_BEGIN(type, var, what) \
do { \
GSetIterator gh_iter##var; \
GSET_ITER(gh_iter##var, what) { \
type var = (type)(BLI_gsetIterator_getKey(&gh_iter##var));
#define GSET_FOREACH_END() \
} \
} while(0)
/** \} */
#ifdef __cplusplus
}

View File

@ -25,10 +25,10 @@
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef __BLI_JITTER_H__
#define __BLI_JITTER_H__
#ifndef __BLI_JITTER_2D_H__
#define __BLI_JITTER_2D_H__
/** \file BLI_jitter.h
/** \file BLI_jitter_2d.h
* \ingroup bli
*/
@ -36,5 +36,5 @@ void BLI_jitter_init(float (*jitarr)[2], int num);
void BLI_jitterate1(float (*jit1)[2], float (*jit2)[2], int num, float radius1);
void BLI_jitterate2(float (*jit1)[2], float (*jit2)[2], int num, float radius2);
#endif
#endif /* __BLI_JITTER_2D_H__ */

View File

@ -25,10 +25,10 @@
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef __BLI_LASSO_H__
#define __BLI_LASSO_H__
#ifndef __BLI_LASSO_2D_H__
#define __BLI_LASSO_2D_H__
/** \file BLI_lasso.h
/** \file BLI_lasso_2d.h
* \ingroup bli
*/
@ -38,4 +38,4 @@ void BLI_lasso_boundbox(struct rcti *rect, const int mcords[][2], const unsigned
bool BLI_lasso_is_point_inside(const int mcords[][2], const unsigned int moves, const int sx, const int sy, const int error_value);
bool BLI_lasso_is_edge_inside(const int mcords[][2], const unsigned int moves, int x0, int y0, int x1, int y1, const int error_value);
#endif
#endif /* __BLI_LASSO_2D_H__ */

View File

@ -18,8 +18,8 @@
* ***** END GPL LICENSE BLOCK *****
*/
#ifndef __BLI_POLYFILL2D_H__
#define __BLI_POLYFILL2D_H__
#ifndef __BLI_POLYFILL_2D_H__
#define __BLI_POLYFILL_2D_H__
struct MemArena;
@ -40,4 +40,4 @@ void BLI_polyfill_calc(
/* default size of polyfill arena */
#define BLI_POLYFILL_ARENA_SIZE MEM_SIZE_OPTIMAL(1 << 14)
#endif /* __BLI_POLYFILL2D_H__ */
#endif /* __BLI_POLYFILL_2D_H__ */

View File

@ -28,7 +28,7 @@
struct ListBase;
/** \file BLI_voronoi.h
/** \file BLI_voronoi_2d.h
* \ingroup bli
*/

View File

@ -41,10 +41,11 @@ set(INC_SYS
set(SRC
intern/BLI_args.c
intern/BLI_array.c
intern/BLI_dial.c
intern/BLI_dial_2d.c
intern/BLI_dynstr.c
intern/BLI_filelist.c
intern/BLI_ghash.c
intern/BLI_ghash_utils.c
intern/BLI_heap.c
intern/BLI_kdopbvh.c
intern/BLI_kdtree.c
@ -58,10 +59,10 @@ set(SRC
intern/array_utils.c
intern/astar.c
intern/bitmap_draw_2d.c
intern/boxpack2d.c
intern/boxpack_2d.c
intern/buffer.c
intern/callbacks.c
intern/convexhull2d.c
intern/convexhull_2d.c
intern/dynlib.c
intern/easing.c
intern/edgehash.c
@ -73,8 +74,8 @@ set(SRC
intern/gsqueue.c
intern/hash_md5.c
intern/hash_mm2a.c
intern/jitter.c
intern/lasso.c
intern/jitter_2d.c
intern/lasso_2d.c
intern/list_sort_impl.h
intern/listbase.c
intern/math_base.c
@ -95,8 +96,8 @@ set(SRC
intern/memory_utils.c
intern/noise.c
intern/path_util.c
intern/polyfill2d.c
intern/polyfill2d_beautify.c
intern/polyfill_2d.c
intern/polyfill_2d_beautify.c
intern/quadric.c
intern/rand.c
intern/rct.c
@ -117,7 +118,7 @@ set(SRC
intern/time.c
intern/timecode.c
intern/uvproject.c
intern/voronoi.c
intern/voronoi_2d.c
intern/voxel.c
intern/winstuff.c
intern/winstuff_dir.c
@ -132,14 +133,14 @@ set(SRC
BLI_bitmap.h
BLI_bitmap_draw_2d.h
BLI_blenlib.h
BLI_boxpack2d.h
BLI_boxpack_2d.h
BLI_buffer.h
BLI_callbacks.h
BLI_compiler_attrs.h
BLI_compiler_compat.h
BLI_compiler_typecheck.h
BLI_convexhull2d.h
BLI_dial.h
BLI_convexhull_2d.h
BLI_dial_2d.h
BLI_dlrbTree.h
BLI_dynlib.h
BLI_dynstr.h
@ -158,10 +159,10 @@ set(SRC
BLI_hash_mm2a.h
BLI_heap.h
BLI_iterator.h
BLI_jitter.h
BLI_jitter_2d.h
BLI_kdopbvh.h
BLI_kdtree.h
BLI_lasso.h
BLI_lasso_2d.h
BLI_link_utils.h
BLI_linklist.h
BLI_linklist_stack.h
@ -185,8 +186,8 @@ set(SRC
BLI_mempool.h
BLI_noise.h
BLI_path_util.h
BLI_polyfill2d.h
BLI_polyfill2d_beautify.h
BLI_polyfill_2d.h
BLI_polyfill_2d_beautify.h
BLI_quadric.h
BLI_rand.h
BLI_rect.h
@ -211,7 +212,7 @@ set(SRC
BLI_utildefines_variadic.h
BLI_uvproject.h
BLI_vfontdata.h
BLI_voronoi.h
BLI_voronoi_2d.h
BLI_voxel.h
BLI_winstuff.h
PIL_time.h

View File

@ -18,7 +18,11 @@
* ***** END GPL LICENSE BLOCK *****
*/
#include "BLI_dial.h"
/** \file blender/blenlib/intern/BLI_dial_2d.c
* \ingroup bli
*/
#include "BLI_dial_2d.h"
#include "BLI_math.h"
#include "MEM_guardedalloc.h"

View File

@ -43,11 +43,12 @@
#include "BLI_sys_types.h" /* for intptr_t support */
#include "BLI_utildefines.h"
#include "BLI_hash_mm2a.h"
#include "BLI_mempool.h"
#define GHASH_INTERNAL_API
#include "BLI_ghash.h"
#include "BLI_ghash.h" /* own include */
/* keep last */
#include "BLI_strict_flags.h"
/* -------------------------------------------------------------------- */
@ -1160,219 +1161,6 @@ bool BLI_ghashIterator_done(GHashIterator *ghi)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Generic Key Hash & Comparison Functions
* \{ */
/***/
#if 0
/* works but slower */
uint BLI_ghashutil_ptrhash(const void *key)
{
return (uint)(intptr_t)key;
}
#else
/* based python3.3's pointer hashing function */
uint BLI_ghashutil_ptrhash(const void *key)
{
size_t y = (size_t)key;
/* bottom 3 or 4 bits are likely to be 0; rotate y by 4 to avoid
* excessive hash collisions for dicts and sets */
y = (y >> 4) | (y << (8 * sizeof(void *) - 4));
return (uint)y;
}
#endif
bool BLI_ghashutil_ptrcmp(const void *a, const void *b)
{
return (a != b);
}
uint BLI_ghashutil_uinthash_v4(const uint key[4])
{
uint hash;
hash = key[0];
hash *= 37;
hash += key[1];
hash *= 37;
hash += key[2];
hash *= 37;
hash += key[3];
return hash;
}
uint BLI_ghashutil_uinthash_v4_murmur(const uint key[4])
{
return BLI_hash_mm2((const unsigned char *)key, sizeof(int) * 4 /* sizeof(key) */, 0);
}
bool BLI_ghashutil_uinthash_v4_cmp(const void *a, const void *b)
{
return (memcmp(a, b, sizeof(uint[4])) != 0);
}
uint BLI_ghashutil_uinthash(uint key)
{
key += ~(key << 16);
key ^= (key >> 5);
key += (key << 3);
key ^= (key >> 13);
key += ~(key << 9);
key ^= (key >> 17);
return key;
}
uint BLI_ghashutil_inthash_p(const void *ptr)
{
uintptr_t key = (uintptr_t)ptr;
key += ~(key << 16);
key ^= (key >> 5);
key += (key << 3);
key ^= (key >> 13);
key += ~(key << 9);
key ^= (key >> 17);
return (uint)(key & 0xffffffff);
}
uint BLI_ghashutil_inthash_p_murmur(const void *ptr)
{
uintptr_t key = (uintptr_t)ptr;
return BLI_hash_mm2((const unsigned char *)&key, sizeof(key), 0);
}
uint BLI_ghashutil_inthash_p_simple(const void *ptr)
{
return GET_UINT_FROM_POINTER(ptr);
}
bool BLI_ghashutil_intcmp(const void *a, const void *b)
{
return (a != b);
}
size_t BLI_ghashutil_combine_hash(size_t hash_a, size_t hash_b)
{
return hash_a ^ (hash_b + 0x9e3779b9 + (hash_a << 6) + (hash_a >> 2));
}
/**
* This function implements the widely used "djb" hash apparently posted
* by Daniel Bernstein to comp.lang.c some time ago. The 32 bit
* unsigned hash value starts at 5381 and for each byte 'c' in the
* string, is updated: ``hash = hash * 33 + c``. This
* function uses the signed value of each byte.
*
* note: this is the same hash method that glib 2.34.0 uses.
*/
uint BLI_ghashutil_strhash_n(const char *key, size_t n)
{
const signed char *p;
uint h = 5381;
for (p = (const signed char *)key; n-- && *p != '\0'; p++) {
h = (uint)((h << 5) + h) + (uint)*p;
}
return h;
}
uint BLI_ghashutil_strhash_p(const void *ptr)
{
const signed char *p;
uint h = 5381;
for (p = ptr; *p != '\0'; p++) {
h = (uint)((h << 5) + h) + (uint)*p;
}
return h;
}
uint BLI_ghashutil_strhash_p_murmur(const void *ptr)
{
const unsigned char *key = ptr;
return BLI_hash_mm2(key, strlen((const char *)key) + 1, 0);
}
bool BLI_ghashutil_strcmp(const void *a, const void *b)
{
return (a == b) ? false : !STREQ(a, b);
}
GHashPair *BLI_ghashutil_pairalloc(const void *first, const void *second)
{
GHashPair *pair = MEM_mallocN(sizeof(GHashPair), "GHashPair");
pair->first = first;
pair->second = second;
return pair;
}
uint BLI_ghashutil_pairhash(const void *ptr)
{
const GHashPair *pair = ptr;
uint hash = BLI_ghashutil_ptrhash(pair->first);
return hash ^ BLI_ghashutil_ptrhash(pair->second);
}
bool BLI_ghashutil_paircmp(const void *a, const void *b)
{
const GHashPair *A = a;
const GHashPair *B = b;
return (BLI_ghashutil_ptrcmp(A->first, B->first) ||
BLI_ghashutil_ptrcmp(A->second, B->second));
}
void BLI_ghashutil_pairfree(void *ptr)
{
MEM_freeN(ptr);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convenience GHash Creation Functions
* \{ */
GHash *BLI_ghash_ptr_new_ex(const char *info, const uint nentries_reserve)
{
return BLI_ghash_new_ex(BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, info, nentries_reserve);
}
GHash *BLI_ghash_ptr_new(const char *info)
{
return BLI_ghash_ptr_new_ex(info, 0);
}
GHash *BLI_ghash_str_new_ex(const char *info, const uint nentries_reserve)
{
return BLI_ghash_new_ex(BLI_ghashutil_strhash_p, BLI_ghashutil_strcmp, info, nentries_reserve);
}
GHash *BLI_ghash_str_new(const char *info)
{
return BLI_ghash_str_new_ex(info, 0);
}
GHash *BLI_ghash_int_new_ex(const char *info, const uint nentries_reserve)
{
return BLI_ghash_new_ex(BLI_ghashutil_inthash_p, BLI_ghashutil_intcmp, info, nentries_reserve);
}
GHash *BLI_ghash_int_new(const char *info)
{
return BLI_ghash_int_new_ex(info, 0);
}
GHash *BLI_ghash_pair_new_ex(const char *info, const uint nentries_reserve)
{
return BLI_ghash_new_ex(BLI_ghashutil_pairhash, BLI_ghashutil_paircmp, info, nentries_reserve);
}
GHash *BLI_ghash_pair_new(const char *info)
{
return BLI_ghash_pair_new_ex(info, 0);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name GSet Public API
*
@ -1576,39 +1364,6 @@ void *BLI_gset_pop_key(GSet *gs, const void *key)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convenience GSet Creation Functions
* \{ */
GSet *BLI_gset_ptr_new_ex(const char *info, const uint nentries_reserve)
{
return BLI_gset_new_ex(BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, info, nentries_reserve);
}
GSet *BLI_gset_ptr_new(const char *info)
{
return BLI_gset_ptr_new_ex(info, 0);
}
GSet *BLI_gset_str_new_ex(const char *info, const uint nentries_reserve)
{
return BLI_gset_new_ex(BLI_ghashutil_strhash_p, BLI_ghashutil_strcmp, info, nentries_reserve);
}
GSet *BLI_gset_str_new(const char *info)
{
return BLI_gset_str_new_ex(info, 0);
}
GSet *BLI_gset_pair_new_ex(const char *info, const uint nentries_reserve)
{
return BLI_gset_new_ex(BLI_ghashutil_pairhash, BLI_ghashutil_paircmp, info, nentries_reserve);
}
GSet *BLI_gset_pair_new(const char *info)
{
return BLI_gset_pair_new_ex(info, 0);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Debugging & Introspection
* \{ */

View File

@ -0,0 +1,288 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2001-2002 by NaN Holding BV.
* All rights reserved.
*
* The Original Code is: all of this file.
*
* Contributor(s): none yet.
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenlib/intern/BLI_ghash_utils.c
* \ingroup bli
*
* Helper functions and implementations of standard data types for #GHash
* (not it's implementation).
*/
#include <string.h>
#include "MEM_guardedalloc.h"
#include "BLI_utildefines.h"
#include "BLI_hash_mm2a.h"
#include "BLI_ghash.h" /* own include */
/* keep last */
#include "BLI_strict_flags.h"
/* -------------------------------------------------------------------- */
/** \name Generic Key Hash & Comparison Functions
* \{ */
#if 0
/* works but slower */
uint BLI_ghashutil_ptrhash(const void *key)
{
return (uint)(intptr_t)key;
}
#else
/* based python3.3's pointer hashing function */
uint BLI_ghashutil_ptrhash(const void *key)
{
size_t y = (size_t)key;
/* bottom 3 or 4 bits are likely to be 0; rotate y by 4 to avoid
* excessive hash collisions for dicts and sets */
y = (y >> 4) | (y << (8 * sizeof(void *) - 4));
return (uint)y;
}
#endif
bool BLI_ghashutil_ptrcmp(const void *a, const void *b)
{
return (a != b);
}
uint BLI_ghashutil_uinthash_v4(const uint key[4])
{
uint hash;
hash = key[0];
hash *= 37;
hash += key[1];
hash *= 37;
hash += key[2];
hash *= 37;
hash += key[3];
return hash;
}
uint BLI_ghashutil_uinthash_v4_murmur(const uint key[4])
{
return BLI_hash_mm2((const unsigned char *)key, sizeof(int) * 4 /* sizeof(key) */, 0);
}
bool BLI_ghashutil_uinthash_v4_cmp(const void *a, const void *b)
{
return (memcmp(a, b, sizeof(uint[4])) != 0);
}
uint BLI_ghashutil_uinthash(uint key)
{
key += ~(key << 16);
key ^= (key >> 5);
key += (key << 3);
key ^= (key >> 13);
key += ~(key << 9);
key ^= (key >> 17);
return key;
}
uint BLI_ghashutil_inthash_p(const void *ptr)
{
uintptr_t key = (uintptr_t)ptr;
key += ~(key << 16);
key ^= (key >> 5);
key += (key << 3);
key ^= (key >> 13);
key += ~(key << 9);
key ^= (key >> 17);
return (uint)(key & 0xffffffff);
}
uint BLI_ghashutil_inthash_p_murmur(const void *ptr)
{
uintptr_t key = (uintptr_t)ptr;
return BLI_hash_mm2((const unsigned char *)&key, sizeof(key), 0);
}
uint BLI_ghashutil_inthash_p_simple(const void *ptr)
{
return GET_UINT_FROM_POINTER(ptr);
}
bool BLI_ghashutil_intcmp(const void *a, const void *b)
{
return (a != b);
}
size_t BLI_ghashutil_combine_hash(size_t hash_a, size_t hash_b)
{
return hash_a ^ (hash_b + 0x9e3779b9 + (hash_a << 6) + (hash_a >> 2));
}
/**
* This function implements the widely used "djb" hash apparently posted
* by Daniel Bernstein to comp.lang.c some time ago. The 32 bit
* unsigned hash value starts at 5381 and for each byte 'c' in the
* string, is updated: ``hash = hash * 33 + c``. This
* function uses the signed value of each byte.
*
* note: this is the same hash method that glib 2.34.0 uses.
*/
uint BLI_ghashutil_strhash_n(const char *key, size_t n)
{
const signed char *p;
uint h = 5381;
for (p = (const signed char *)key; n-- && *p != '\0'; p++) {
h = (uint)((h << 5) + h) + (uint)*p;
}
return h;
}
uint BLI_ghashutil_strhash_p(const void *ptr)
{
const signed char *p;
uint h = 5381;
for (p = ptr; *p != '\0'; p++) {
h = (uint)((h << 5) + h) + (uint)*p;
}
return h;
}
uint BLI_ghashutil_strhash_p_murmur(const void *ptr)
{
const unsigned char *key = ptr;
return BLI_hash_mm2(key, strlen((const char *)key) + 1, 0);
}
bool BLI_ghashutil_strcmp(const void *a, const void *b)
{
return (a == b) ? false : !STREQ(a, b);
}
GHashPair *BLI_ghashutil_pairalloc(const void *first, const void *second)
{
GHashPair *pair = MEM_mallocN(sizeof(GHashPair), "GHashPair");
pair->first = first;
pair->second = second;
return pair;
}
uint BLI_ghashutil_pairhash(const void *ptr)
{
const GHashPair *pair = ptr;
uint hash = BLI_ghashutil_ptrhash(pair->first);
return hash ^ BLI_ghashutil_ptrhash(pair->second);
}
bool BLI_ghashutil_paircmp(const void *a, const void *b)
{
const GHashPair *A = a;
const GHashPair *B = b;
return (BLI_ghashutil_ptrcmp(A->first, B->first) ||
BLI_ghashutil_ptrcmp(A->second, B->second));
}
void BLI_ghashutil_pairfree(void *ptr)
{
MEM_freeN(ptr);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convenience GHash Creation Functions
* \{ */
GHash *BLI_ghash_ptr_new_ex(const char *info, const uint nentries_reserve)
{
return BLI_ghash_new_ex(BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, info, nentries_reserve);
}
GHash *BLI_ghash_ptr_new(const char *info)
{
return BLI_ghash_ptr_new_ex(info, 0);
}
GHash *BLI_ghash_str_new_ex(const char *info, const uint nentries_reserve)
{
return BLI_ghash_new_ex(BLI_ghashutil_strhash_p, BLI_ghashutil_strcmp, info, nentries_reserve);
}
GHash *BLI_ghash_str_new(const char *info)
{
return BLI_ghash_str_new_ex(info, 0);
}
GHash *BLI_ghash_int_new_ex(const char *info, const uint nentries_reserve)
{
return BLI_ghash_new_ex(BLI_ghashutil_inthash_p, BLI_ghashutil_intcmp, info, nentries_reserve);
}
GHash *BLI_ghash_int_new(const char *info)
{
return BLI_ghash_int_new_ex(info, 0);
}
GHash *BLI_ghash_pair_new_ex(const char *info, const uint nentries_reserve)
{
return BLI_ghash_new_ex(BLI_ghashutil_pairhash, BLI_ghashutil_paircmp, info, nentries_reserve);
}
GHash *BLI_ghash_pair_new(const char *info)
{
return BLI_ghash_pair_new_ex(info, 0);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convenience GSet Creation Functions
* \{ */
GSet *BLI_gset_ptr_new_ex(const char *info, const uint nentries_reserve)
{
return BLI_gset_new_ex(BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, info, nentries_reserve);
}
GSet *BLI_gset_ptr_new(const char *info)
{
return BLI_gset_ptr_new_ex(info, 0);
}
GSet *BLI_gset_str_new_ex(const char *info, const uint nentries_reserve)
{
return BLI_gset_new_ex(BLI_ghashutil_strhash_p, BLI_ghashutil_strcmp, info, nentries_reserve);
}
GSet *BLI_gset_str_new(const char *info)
{
return BLI_gset_str_new_ex(info, 0);
}
GSet *BLI_gset_pair_new_ex(const char *info, const uint nentries_reserve)
{
return BLI_gset_new_ex(BLI_ghashutil_pairhash, BLI_ghashutil_paircmp, info, nentries_reserve);
}
GSet *BLI_gset_pair_new(const char *info)
{
return BLI_gset_pair_new_ex(info, 0);
}
/** \} */

View File

@ -30,7 +30,7 @@
#include "MEM_guardedalloc.h"
#include "BLI_utildefines.h"
#include "BLI_boxpack2d.h" /* own include */
#include "BLI_boxpack_2d.h" /* own include */
#include "BLI_sort.h" /* qsort_r */
#define qsort_r BLI_qsort_r

View File

@ -18,7 +18,7 @@
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenlib/intern/convexhull2d.c
/** \file blender/blenlib/intern/convexhull_2d.c
* \ingroup bli
*/
@ -28,7 +28,7 @@
#include "MEM_guardedalloc.h"
#include "BLI_convexhull2d.h"
#include "BLI_convexhull_2d.h"
#include "BLI_math.h"
#include "BLI_strict_flags.h"
#include "BLI_utildefines.h"

View File

@ -35,7 +35,7 @@
#include "MEM_guardedalloc.h"
#include "BLI_rand.h"
#include "BLI_jitter.h"
#include "BLI_jitter_2d.h"
#include "BLI_strict_flags.h"

View File

@ -26,7 +26,7 @@
*
*/
/** \file blender/blenlib/intern/lasso.c
/** \file blender/blenlib/intern/lasso_2d.c
* \ingroup bli
*/
@ -35,7 +35,7 @@
#include "BLI_math.h"
#include "BLI_strict_flags.h"
#include "BLI_lasso.h" /* own include */
#include "BLI_lasso_2d.h" /* own include */
void BLI_lasso_boundbox(rcti *rect, const int mcords[][2], const unsigned int moves)
{

View File

@ -18,7 +18,7 @@
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenlib/intern/polyfill2d.c
/** \file blender/blenlib/intern/polyfill_2d.c
* \ingroup bli
*
* An ear clipping algorithm to triangulate single boundary polygons.
@ -53,7 +53,7 @@
#include "BLI_memarena.h"
#include "BLI_alloca.h"
#include "BLI_polyfill2d.h" /* own include */
#include "BLI_polyfill_2d.h" /* own include */
#include "BLI_strict_flags.h"

View File

@ -44,7 +44,7 @@
#include "BLI_memarena.h"
#include "BLI_heap.h"
#include "BLI_polyfill2d_beautify.h" /* own include */
#include "BLI_polyfill_2d_beautify.h" /* own include */
#include "BLI_strict_flags.h"

View File

@ -23,20 +23,18 @@
* ***** END GPL LICENSE BLOCK *****
*/
/*
/** \file blender/blenlib/intern/voronoi_2d.c
* \ingroup bli
*
* Fortune's algorithm implemented using explanation and some code snippets from
* http://blog.ivank.net/fortunes-algorithm-and-implementation.html
*/
/** \file blender/blenlib/intern/voronoi.c
* \ingroup bli
*/
#include "MEM_guardedalloc.h"
#include "BLI_listbase.h"
#include "BLI_math.h"
#include "BLI_voronoi.h"
#include "BLI_voronoi_2d.h"
#include "BLI_utildefines.h"
#define VORONOI_EPS 1e-2f

View File

@ -36,8 +36,8 @@
#include "BLI_alloca.h"
#include "BLI_math.h"
#include "BLI_memarena.h"
#include "BLI_polyfill2d.h"
#include "BLI_polyfill2d_beautify.h"
#include "BLI_polyfill_2d.h"
#include "BLI_polyfill_2d_beautify.h"
#include "BLI_linklist.h"
#include "BLI_edgehash.h"
#include "BLI_heap.h"

View File

@ -39,8 +39,8 @@
#include "BLI_alloca.h"
#include "BLI_memarena.h"
#include "BLI_heap.h"
#include "BLI_polyfill2d.h"
#include "BLI_polyfill2d_beautify.h"
#include "BLI_polyfill_2d.h"
#include "BLI_polyfill_2d_beautify.h"
#include "BLI_linklist.h"
#include "bmesh.h"

View File

@ -37,7 +37,7 @@
#include "BLI_math.h"
#include "BLI_heap.h"
#include "BLI_polyfill2d_beautify.h"
#include "BLI_polyfill_2d_beautify.h"
#include "MEM_guardedalloc.h"

View File

@ -38,8 +38,8 @@
#include "BLI_alloca.h"
#include "BLI_memarena.h"
#include "BLI_edgehash.h"
#include "BLI_polyfill2d.h"
#include "BLI_polyfill2d_beautify.h"
#include "BLI_polyfill_2d.h"
#include "BLI_polyfill_2d_beautify.h"
#include "BLI_utildefines_stack.h"

View File

@ -38,8 +38,8 @@
#include "BLI_linklist.h"
/* only for defines */
#include "BLI_polyfill2d.h"
#include "BLI_polyfill2d_beautify.h"
#include "BLI_polyfill_2d.h"
#include "BLI_polyfill_2d_beautify.h"
#include "bmesh.h"

View File

@ -35,7 +35,7 @@
#include "BLI_string.h"
extern "C" {
# include "BLI_voronoi.h"
# include "BLI_voronoi_2d.h"
}
/**

View File

@ -28,7 +28,7 @@ extern "C" {
#include "BLI_listbase.h"
#include "BLI_math.h"
#include "BLI_math_color.h"
#include "BLI_jitter.h"
#include "BLI_jitter_2d.h"
#include "BKE_movieclip.h"
#include "BKE_node.h"

View File

@ -36,7 +36,7 @@
#include "BLI_blenlib.h"
#include "BLI_utildefines.h"
#include "BLI_lasso.h"
#include "BLI_lasso_2d.h"
#include "BLI_math.h"
#include "DNA_anim_types.h"

View File

@ -41,7 +41,7 @@
#include "BLI_math.h"
#include "BLI_utildefines.h"
#include "BLI_polyfill2d.h"
#include "BLI_polyfill_2d.h"
#include "BLF_api.h"
#include "BLT_translation.h"

View File

@ -37,7 +37,7 @@
#include "BLI_blenlib.h"
#include "BLI_ghash.h"
#include "BLI_lasso.h"
#include "BLI_lasso_2d.h"
#include "BLI_utildefines.h"
#include "BLI_math_vector.h"

View File

@ -37,7 +37,7 @@
#include "MEM_guardedalloc.h"
#include "BLI_math.h"
#include "BLI_dial.h"
#include "BLI_dial_2d.h"
#include "BLI_rect.h"
#include "BKE_context.h"

View File

@ -37,7 +37,7 @@
#include "MEM_guardedalloc.h"
#include "BLI_math.h"
#include "BLI_dial.h"
#include "BLI_dial_2d.h"
#include "BLI_rect.h"
#include "BKE_context.h"

View File

@ -33,7 +33,7 @@
#include "BLI_utildefines.h"
#include "BLI_rect.h"
#include "BLI_lasso.h"
#include "BLI_lasso_2d.h"
#include "BLI_math.h"
#include "BKE_context.h"

View File

@ -46,7 +46,7 @@
#include "DNA_workspace_types.h"
#include "BLI_math.h"
#include "BLI_lasso.h"
#include "BLI_lasso_2d.h"
#include "BLI_listbase.h"
#include "BLI_string.h"
#include "BLI_kdtree.h"

View File

@ -39,7 +39,7 @@
#include "BLI_math_matrix.h"
#include "BLI_math_geom.h"
#include "BLI_utildefines.h"
#include "BLI_lasso.h"
#include "BLI_lasso_2d.h"
#include "BLI_task.h"
#include "BKE_pbvh.h"

View File

@ -37,7 +37,7 @@
#include "BLI_math.h"
#include "BLI_blenlib.h"
#include "BLI_dial.h"
#include "BLI_dial_2d.h"
#include "BLI_task.h"
#include "BLI_utildefines.h"
#include "BLI_ghash.h"

View File

@ -36,7 +36,7 @@
#include "BLI_blenlib.h"
#include "BLI_dlrbTree.h"
#include "BLI_lasso.h"
#include "BLI_lasso_2d.h"
#include "BLI_utildefines.h"
#include "DNA_anim_types.h"

View File

@ -37,7 +37,7 @@
#include "BLI_utildefines.h"
#include "BLI_math.h"
#include "BLI_rect.h"
#include "BLI_lasso.h"
#include "BLI_lasso_2d.h"
#include "BKE_context.h"
#include "BKE_tracking.h"

View File

@ -37,7 +37,7 @@
#include "BLI_blenlib.h"
#include "BLI_math.h"
#include "BLI_utildefines.h"
#include "BLI_lasso.h"
#include "BLI_lasso_2d.h"
#include "DNA_anim_types.h"
#include "DNA_screen_types.h"

View File

@ -34,7 +34,7 @@
#include "BLI_utildefines.h"
#include "BLI_rect.h"
#include "BLI_lasso.h"
#include "BLI_lasso_2d.h"
#include "BLI_math.h"
#include "BLI_string.h"
#include "BLI_string_utf8.h"

View File

@ -35,7 +35,7 @@
#include "BLI_rect.h"
#include "BLI_string.h"
#include "BLI_threads.h"
#include "BLI_jitter.h"
#include "BLI_jitter_2d.h"
#include "BIF_gl.h"
#include "BIF_glutil.h"

View File

@ -48,7 +48,7 @@
#include "BLI_blenlib.h"
#include "BLI_math.h"
#include "BLI_jitter.h"
#include "BLI_jitter_2d.h"
#include "BLI_utildefines.h"
#include "BLI_endian_switch.h"
#include "BLI_threads.h"

View File

@ -48,7 +48,7 @@
#include "MEM_guardedalloc.h"
#include "BLI_math.h"
#include "BLI_lasso.h"
#include "BLI_lasso_2d.h"
#include "BLI_rect.h"
#include "BLI_linklist.h"
#include "BLI_listbase.h"

View File

@ -49,7 +49,7 @@
#include "BLI_utildefines.h"
#include "BLI_alloca.h"
#include "BLI_math.h"
#include "BLI_lasso.h"
#include "BLI_lasso_2d.h"
#include "BLI_blenlib.h"
#include "BLI_array.h"

View File

@ -32,8 +32,8 @@
#include "BLI_math.h"
#include "BLI_rand.h"
#include "BLI_heap.h"
#include "BLI_boxpack2d.h"
#include "BLI_convexhull2d.h"
#include "BLI_boxpack_2d.h"
#include "BLI_convexhull_2d.h"
#include "uvedit_parametrizer.h"

View File

@ -34,7 +34,7 @@
#include "BLI_utildefines.h"
#include "BLI_rect.h"
#include "BLI_math.h"
#include "BLI_polyfill2d.h"
#include "BLI_polyfill_2d.h"
#include "BLI_sort_utils.h"

View File

@ -107,6 +107,7 @@ set(INC_SYS
set(SRC
../../blenlib/intern/BLI_ghash.c
../../blenlib/intern/BLI_ghash_utils.c
../../blenlib/intern/BLI_mempool.c
../../blenlib/intern/endian_switch.c
../../blenlib/intern/hash_mm2a.c

View File

@ -33,7 +33,7 @@
#include "BLI_utildefines.h"
#include "BLI_kdopbvh.h"
#include "BLI_polyfill2d.h"
#include "BLI_polyfill_2d.h"
#include "BLI_math.h"
#include "BLI_ghash.h"
#include "BLI_memarena.h"

View File

@ -34,8 +34,8 @@
#ifndef MATH_STANDALONE /* define when building outside blender */
# include "MEM_guardedalloc.h"
# include "BLI_blenlib.h"
# include "BLI_boxpack2d.h"
# include "BLI_convexhull2d.h"
# include "BLI_boxpack_2d.h"
# include "BLI_convexhull_2d.h"
# include "BKE_displist.h"
# include "BKE_curve.h"
#endif

View File

@ -38,7 +38,7 @@
#include "BLI_math.h"
#include "BLI_blenlib.h"
#include "BLI_jitter.h"
#include "BLI_jitter_2d.h"
#include "BLI_utildefines.h"
#include "DNA_camera_types.h"

View File

@ -40,7 +40,7 @@
#include "BLI_math.h"
#include "BLI_blenlib.h"
#include "BLI_jitter.h"
#include "BLI_jitter_2d.h"
#include "BLI_memarena.h"
#include "BLI_rand.h"
#include "BLI_utildefines.h"

View File

@ -42,7 +42,7 @@
#include "BLI_math.h"
#include "BLI_blenlib.h"
#include "BLI_jitter.h"
#include "BLI_jitter_2d.h"
#include "BLI_threads.h"
#include "BLI_utildefines.h"

View File

@ -41,7 +41,7 @@
#include "BLI_blenlib.h"
#include "BLI_math.h"
#include "BLI_utildefines.h"
#include "BLI_lasso.h"
#include "BLI_lasso_2d.h"
#include "BKE_context.h"

View File

@ -59,7 +59,7 @@
#include "PIL_time.h"
#include "BLI_blenlib.h"
#include "BLI_dial.h"
#include "BLI_dial_2d.h"
#include "BLI_dynstr.h" /*for WM_operator_pystring */
#include "BLI_math.h"
#include "BLI_utildefines.h"

View File

@ -12,7 +12,7 @@
extern "C" {
#include "BLI_utildefines.h"
#include "BLI_array_utils.h"
#include "BLI_polyfill2d.h"
#include "BLI_polyfill_2d.h"
#include "BLI_edgehash.h"
#include "BLI_math.h"
#include "MEM_guardedalloc.h"
@ -22,7 +22,7 @@ extern "C" {
#endif
#ifdef USE_BEAUTIFY
#include "BLI_polyfill2d_beautify.h"
#include "BLI_polyfill_2d_beautify.h"
#include "BLI_memarena.h"
#include "BLI_heap.h"
#endif

View File

@ -53,7 +53,7 @@ BLENDER_TEST(BLI_math_color "bf_blenlib")
BLENDER_TEST(BLI_math_geom "bf_blenlib")
BLENDER_TEST(BLI_memiter "bf_blenlib")
BLENDER_TEST(BLI_path_util "${BLI_path_util_extra_libs}")
BLENDER_TEST(BLI_polyfill2d "bf_blenlib")
BLENDER_TEST(BLI_polyfill_2d "bf_blenlib")
BLENDER_TEST(BLI_stack "bf_blenlib")
BLENDER_TEST(BLI_string "bf_blenlib")
BLENDER_TEST(BLI_string_utf8 "bf_blenlib")