Merge remote-tracking branch 'origin/master' into blender2.8

This commit is contained in:
Dalai Felinto 2016-10-25 11:18:41 +00:00
commit b4f849b9c6
62 changed files with 1001 additions and 544 deletions

View File

@ -343,9 +343,9 @@ option(WITH_OPENCOLLADA "Enable OpenCollada Support (http://www.opencollada.or
# Sound output
option(WITH_SDL "Enable SDL for sound and joystick support" ${_init_SDL})
option(WITH_OPENAL "Enable OpenAL Support (http://www.openal.org)" ON)
option(WITH_JACK "Enable Jack Support (http://www.jackaudio.org)" ${_init_JACK})
option(WITH_JACK "Enable JACK Support (http://www.jackaudio.org)" ${_init_JACK})
if(UNIX AND NOT APPLE)
option(WITH_JACK_DYNLOAD "Enable runtime dynamic Jack libraries loading" OFF)
option(WITH_JACK_DYNLOAD "Enable runtime dynamic JACK libraries loading" OFF)
endif()
if(UNIX AND NOT APPLE)
option(WITH_SDL_DYNLOAD "Enable runtime dynamic SDL libraries loading" OFF)

View File

@ -1,15 +1,15 @@
# - Find Jack library
# Find the native Jack includes and library
# - Find JACK library
# Find the native JACK includes and library
# This module defines
# JACK_INCLUDE_DIRS, where to find jack.h, Set when
# JACK_INCLUDE_DIR is found.
# JACK_LIBRARIES, libraries to link against to use Jack.
# JACK_ROOT_DIR, The base directory to search for Jack.
# JACK_LIBRARIES, libraries to link against to use JACK.
# JACK_ROOT_DIR, The base directory to search for JACK.
# This can also be an environment variable.
# JACK_FOUND, If false, do not try to use Jack.
# JACK_FOUND, If false, do not try to use JACK.
#
# also defined, but not for general use are
# JACK_LIBRARY, where to find the Jack library.
# JACK_LIBRARY, where to find the JACK library.
#=============================================================================
# Copyright 2011 Blender Foundation.

View File

@ -518,7 +518,8 @@ function(setup_liblinks
target_link_libraries(${target}
${BLENDER_GL_LIBRARIES})
target_link_libraries(${target} ${PLATFORM_LINKLIBS} ${CMAKE_DL_LIBS})
#target_link_libraries(${target} ${PLATFORM_LINKLIBS} ${CMAKE_DL_LIBS})
target_link_libraries(${target} ${PLATFORM_LINKLIBS})
endfunction()

View File

@ -121,14 +121,8 @@ Add the following script to the text editor in Blender.
obj.location.x += 1.0
.. image:: run_script.png
:width: 924px
:align: center
:height: 574px
:alt: Run Script button
Click the Run Script button, all objects in the active scene are moved by 1.0 Blender unit.
Next we will make this script into an add-on.
Click the :ref:`Run Script button <blender_manual:editors-text-run-script>`,
all objects in the active scene are moved by 1.0 Blender unit.
Write the Add-on (Simple)

View File

@ -1632,6 +1632,13 @@ def write_sphinx_conf_py(basepath):
file = open(filepath, "w", encoding="utf-8")
fw = file.write
fw("import sys, os\n")
fw("\n")
fw("extensions = ['sphinx.ext.intersphinx']\n")
fw("\n")
fw("intersphinx_mapping = {'blender_manual': ('https://www.blender.org/manual/', None)}\n")
fw("\n")
fw("project = 'Blender'\n")
# fw("master_doc = 'index'\n")
fw("copyright = u'Blender Foundation'\n")
@ -1648,6 +1655,7 @@ def write_sphinx_conf_py(basepath):
# not helpful since the source is generated, adds to upload size.
fw("html_copy_source = False\n")
fw("html_split_index = True\n")
fw("\n")
# needed for latex, pdf gen

View File

@ -155,18 +155,18 @@ AUD_Device* AUD_init(const char* device, AUD_DeviceSpecs specs, int buffersize,
}
#endif
#ifdef WITH_JACK
else if(dname == "Jack")
else if(dname == "JACK")
{
#ifdef __APPLE__
struct stat st;
if (stat("/Library/Frameworks/Jackmp.framework", &st) != 0) {
printf("Warning: Jack Framework not installed\n");
printf("Warning: JACK Framework not installed\n");
return NULL;
}
else
#endif
if (!AUD_jack_supported()) {
printf("Warning: Jack cllient not installed\n");
printf("Warning: JACK cllient not installed\n");
return NULL;
}
else {

View File

@ -61,7 +61,7 @@ typedef struct
#endif
/**
* Initializes audio rutines (FFMPEG/Jack if it is enabled).
* Initializes audio routines (FFMPEG/JACK if it is enabled).
*/
extern void AUD_initOnce(void);

View File

@ -95,6 +95,21 @@ void AUD_Mixer::mix(sample_t* buffer, int start, int length, float volume)
out[i + start] += buffer[i] * volume;
}
void AUD_Mixer::mix(sample_t* buffer, int start, int length, float volume_to, float volume_from)
{
sample_t* out = m_buffer.getBuffer();
length = (std::min(m_length, length + start) - start);
for(int i = 0; i < length; i++)
{
float volume = volume_from * (1.0f - i / float(length)) + volume_to * (i / float(length));
for(int c = 0; c < m_specs.channels; c++)
out[(i + start) * m_specs.channels + c] += buffer[i * m_specs.channels + c] * volume;
}
}
void AUD_Mixer::read(data_t* buffer, float volume)
{
sample_t* out = m_buffer.getBuffer();

View File

@ -95,6 +95,8 @@ public:
*/
void mix(sample_t* buffer, int start, int length, float volume);
void mix(sample_t* buffer, int start, int length, float volume_to, float volume_from);
/**
* Writes the mixing buffer into an output buffer.
* \param buffer The target buffer for superposing.

View File

@ -89,7 +89,7 @@ bool AUD_SoftwareDevice::AUD_SoftwareHandle::pause(bool keep)
}
AUD_SoftwareDevice::AUD_SoftwareHandle::AUD_SoftwareHandle(AUD_SoftwareDevice* device, boost::shared_ptr<AUD_IReader> reader, boost::shared_ptr<AUD_PitchReader> pitch, boost::shared_ptr<AUD_ResampleReader> resampler, boost::shared_ptr<AUD_ChannelMapperReader> mapper, bool keep) :
m_reader(reader), m_pitch(pitch), m_resampler(resampler), m_mapper(mapper), m_keep(keep), m_user_pitch(1.0f), m_user_volume(1.0f), m_user_pan(0.0f), m_volume(1.0f), m_loopcount(0),
m_reader(reader), m_pitch(pitch), m_resampler(resampler), m_mapper(mapper), m_keep(keep), m_user_pitch(1.0f), m_user_volume(1.0f), m_user_pan(0.0f), m_volume(1.0f), m_old_volume(1.0f), m_loopcount(0),
m_relative(true), m_volume_max(1.0f), m_volume_min(0), m_distance_max(std::numeric_limits<float>::max()),
m_distance_reference(1.0f), m_attenuation(1.0f), m_cone_angle_outer(M_PI), m_cone_angle_inner(M_PI), m_cone_volume_outer(0),
m_flags(AUD_RENDER_CONE), m_stop(NULL), m_stop_data(NULL), m_status(AUD_STATUS_PLAYING), m_device(device)
@ -100,6 +100,8 @@ void AUD_SoftwareDevice::AUD_SoftwareHandle::update()
{
int flags = 0;
m_old_volume = m_volume;
AUD_Vector3 SL;
if(m_relative)
SL = -m_location;
@ -404,7 +406,7 @@ bool AUD_SoftwareDevice::AUD_SoftwareHandle::setVolume(float volume)
if(volume == 0)
{
m_volume = volume;
m_old_volume = m_volume = volume;
m_flags |= AUD_RENDER_VOLUME;
}
else
@ -772,7 +774,7 @@ void AUD_SoftwareDevice::mix(data_t* buffer, int length)
// in case of looping
while(pos + len < length && sound->m_loopcount && eos)
{
m_mixer->mix(buf, pos, len, sound->m_volume);
m_mixer->mix(buf, pos, len, sound->m_volume, sound->m_old_volume);
pos += len;
@ -789,7 +791,7 @@ void AUD_SoftwareDevice::mix(data_t* buffer, int length)
break;
}
m_mixer->mix(buf, pos, len, sound->m_volume);
m_mixer->mix(buf, pos, len, sound->m_volume, sound->m_old_volume);
// in case the end of the sound is reached
if(eos && !sound->m_loopcount)

View File

@ -84,6 +84,7 @@ protected:
/// The calculated final volume of the source.
float m_volume;
float m_old_volume;
/// The loop count of the source.
int m_loopcount;

View File

@ -41,7 +41,7 @@
typedef void (*AUD_syncFunction)(void*, int, float);
/**
* This device plays back through Jack.
* This device plays back through JACK.
*/
class AUD_JackDevice : public AUD_SoftwareDevice
{
@ -90,7 +90,7 @@ private:
static int jack_sync(jack_transport_state_t state, jack_position_t* pos, void* data);
/**
* Next Jack Transport state (-1 if not expected to change).
* Next JACK Transport state (-1 if not expected to change).
*/
jack_transport_state_t m_nextState;
@ -150,7 +150,7 @@ protected:
public:
/**
* Creates a Jack client for audio output.
* Creates a JACK client for audio output.
* \param name The client name.
* \param specs The wanted audio specification, where only the channel count
* is important.
@ -160,7 +160,7 @@ public:
AUD_JackDevice(std::string name, AUD_DeviceSpecs specs, int buffersize = AUD_DEFAULT_BUFFER_SIZE);
/**
* Closes the Jack client.
* Closes the JACK client.
*/
virtual ~AUD_JackDevice();

View File

@ -42,7 +42,7 @@
# define JACK_SYM extern
#endif
/* All loadable Jack sumbols, prototypes from original jack.h */
/* All loadable JACK sumbols, prototypes from original jack.h */
JACK_SYM jack_transport_state_t (*AUD_jack_transport_query) (
const jack_client_t *client,

View File

@ -847,7 +847,7 @@ static void sync_mesh_fluid_motion(BL::Object& b_ob, Scene *scene, Mesh *mesh)
/* Only export previous and next frame, we don't have any in between data. */
float motion_times[2] = {-1.0f, 1.0f};
for (int step = 0; step < 2; step++) {
for(int step = 0; step < 2; step++) {
float relative_time = motion_times[step] * scene->motion_shutter_time() * 0.5f;
float3 *mP = attr_mP->data_float3() + step*mesh->verts.size();
@ -1081,7 +1081,7 @@ void BlenderSync::sync_mesh_motion(BL::Object& b_ob,
/* fluid motion is exported immediate with mesh, skip here */
BL::DomainFluidSettings b_fluid_domain = object_fluid_domain_find(b_ob);
if (b_fluid_domain)
if(b_fluid_domain)
return;
if(ccl::BKE_object_is_deform_modified(b_ob, b_scene, preview)) {

View File

@ -26,6 +26,7 @@
#include "util_md5.h"
#include "util_opengl.h"
#include "util_path.h"
#include "util_string.h"
#include "util_types.h"
#ifdef WITH_OSL
@ -437,13 +438,13 @@ static PyObject *osl_update_node_func(PyObject * /*self*/, PyObject *args)
continue;
/* determine socket type */
std::string socket_type;
string socket_type;
BL::NodeSocket::type_enum data_type = BL::NodeSocket::type_VALUE;
float4 default_float4 = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
float default_float = 0.0f;
int default_int = 0;
std::string default_string = "";
string default_string = "";
if(param->isclosure) {
socket_type = "NodeSocketShader";
data_type = BL::NodeSocket::type_SHADER;

View File

@ -27,12 +27,13 @@
#include "blender_util.h"
#include "util_debug.h"
#include "util_string.h"
CCL_NAMESPACE_BEGIN
typedef map<void*, ShaderInput*> PtrInputMap;
typedef map<void*, ShaderOutput*> PtrOutputMap;
typedef map<std::string, ConvertNode*> ProxyMap;
typedef map<string, ConvertNode*> ProxyMap;
/* Find */

View File

@ -159,7 +159,7 @@ private:
id_map<ObjectKey, Light> light_map;
set<Mesh*> mesh_synced;
set<Mesh*> mesh_motion_synced;
std::set<float> motion_times;
set<float> motion_times;
void *world_map;
bool world_recalc;

View File

@ -147,7 +147,7 @@ void BVH::pack_primitives()
/* Count number of triangles primitives in BVH. */
for(unsigned int i = 0; i < tidx_size; i++) {
if((pack.prim_index[i] != -1)) {
if ((pack.prim_type[i] & PRIMITIVE_ALL_TRIANGLE) != 0) {
if((pack.prim_type[i] & PRIMITIVE_ALL_TRIANGLE) != 0) {
++num_prim_triangles;
}
}
@ -343,7 +343,7 @@ void BVH::pack_instances(size_t nodes_size, size_t leaf_nodes_size)
size_t leaf_nodes_offset_size = bvh->pack.leaf_nodes.size();
for(size_t i = 0, j = 0;
i < leaf_nodes_offset_size;
i+= BVH_NODE_LEAF_SIZE, j++)
i += BVH_NODE_LEAF_SIZE, j++)
{
int4 data = leaf_nodes_offset[i];
data.x += prim_offset;
@ -450,7 +450,7 @@ void RegularBVH::pack_inner(const BVHStackEntry& e,
const BVHStackEntry& e0,
const BVHStackEntry& e1)
{
if (e0.node->is_unaligned() || e1.node->is_unaligned()) {
if(e0.node->is_unaligned() || e1.node->is_unaligned()) {
pack_unaligned_inner(e, e0, e1);
} else {
pack_aligned_inner(e, e0, e1);
@ -597,8 +597,8 @@ void RegularBVH::pack_nodes(const BVHNode *root)
else {
/* innner node */
int idx[2];
for (int i = 0; i < 2; ++i) {
if (e.node->get_child(i)->is_leaf()) {
for(int i = 0; i < 2; ++i) {
if(e.node->get_child(i)->is_leaf()) {
idx[i] = nextLeafNodeIdx++;
}
else {

View File

@ -477,6 +477,7 @@ BVHNode* BVHBuild::build_node(const BVHObjectBinning& range, int level)
float unalignedSplitSAH = FLT_MAX;
float unalignedLeafSAH = FLT_MAX;
Transform aligned_space;
bool do_unalinged_split = false;
if(params.use_unaligned_nodes &&
splitSAH > params.unaligned_split_threshold*leafSAH)
{
@ -496,11 +497,15 @@ BVHNode* BVHBuild::build_node(const BVHObjectBinning& range, int level)
return create_leaf_node(range, references);
}
}
/* Check whether unaligned split is better than the regulat one. */
if(unalignedSplitSAH < splitSAH) {
do_unalinged_split = true;
}
}
/* Perform split. */
BVHObjectBinning left, right;
if(unalignedSplitSAH < splitSAH) {
if(do_unalinged_split) {
unaligned_range.split(&references[0], left, right);
}
else {
@ -508,7 +513,7 @@ BVHNode* BVHBuild::build_node(const BVHObjectBinning& range, int level)
}
BoundBox bounds;
if(unalignedSplitSAH < splitSAH) {
if(do_unalinged_split) {
bounds = unaligned_heuristic.compute_aligned_boundbox(
range, &references[0], aligned_space);
}
@ -533,7 +538,7 @@ BVHNode* BVHBuild::build_node(const BVHObjectBinning& range, int level)
task_pool.push(new BVHBuildTask(this, inner, 1, right, level + 1), true);
}
if(unalignedSplitSAH < splitSAH) {
if(do_unalinged_split) {
inner->set_aligned_space(aligned_space);
}
@ -583,6 +588,7 @@ BVHNode* BVHBuild::build_node(const BVHRange& range,
float unalignedSplitSAH = FLT_MAX;
/* float unalignedLeafSAH = FLT_MAX; */
Transform aligned_space;
bool do_unalinged_split = false;
if(params.use_unaligned_nodes &&
splitSAH > params.unaligned_split_threshold*leafSAH)
{
@ -599,11 +605,15 @@ BVHNode* BVHBuild::build_node(const BVHRange& range,
unalignedSplitSAH = params.sah_node_cost * unaligned_split.bounds.half_area() +
params.sah_primitive_cost * unaligned_split.nodeSAH;
/* TOOD(sergey): Check we can create leaf already. */
/* Check whether unaligned split is better than the regulat one. */
if(unalignedSplitSAH < splitSAH) {
do_unalinged_split = true;
}
}
/* Do split. */
BVHRange left, right;
if(unalignedSplitSAH < splitSAH) {
if(do_unalinged_split) {
unaligned_split.split(this, left, right, range);
}
else {
@ -613,7 +623,7 @@ BVHNode* BVHBuild::build_node(const BVHRange& range,
progress_total += left.size() + right.size() - range.size();
BoundBox bounds;
if(unalignedSplitSAH < splitSAH) {
if(do_unalinged_split) {
bounds = unaligned_heuristic.compute_aligned_boundbox(
range, &references->at(0), aligned_space);
}
@ -657,7 +667,7 @@ BVHNode* BVHBuild::build_node(const BVHRange& range,
true);
}
if(unalignedSplitSAH < splitSAH) {
if(do_unalinged_split) {
inner->set_aligned_space(aligned_space);
}
@ -787,7 +797,7 @@ BVHNode* BVHBuild::create_leaf_node(const BVHRange& range,
if(params.use_unaligned_nodes && !alignment_found) {
alignment_found =
unaligned_heuristic.compute_aligned_space(p_ref[i][j],
&aligned_space);
&aligned_space);
}
}
LeafNode *leaf_node = new LeafNode(bounds[i],

View File

@ -66,7 +66,7 @@ public:
inline void set_aligned_space(const Transform& aligned_space)
{
m_is_unaligned = true;
if (m_aligned_space == NULL) {
if(m_aligned_space == NULL) {
m_aligned_space = new Transform(aligned_space);
}
else {

View File

@ -49,17 +49,17 @@ std::ostream& operator <<(std::ostream &os,
/* TODO(sergey): Decode bitflag into list of names. */
os << "Nodes features: " << requested_features.nodes_features << std::endl;
os << "Use hair: "
<< string_from_bool(requested_features.use_hair) << std::endl;
<< string_from_bool(requested_features.use_hair) << std::endl;
os << "Use object motion: "
<< string_from_bool(requested_features.use_object_motion) << std::endl;
<< string_from_bool(requested_features.use_object_motion) << std::endl;
os << "Use camera motion: "
<< string_from_bool(requested_features.use_camera_motion) << std::endl;
<< string_from_bool(requested_features.use_camera_motion) << std::endl;
os << "Use Baking: "
<< string_from_bool(requested_features.use_baking) << std::endl;
<< string_from_bool(requested_features.use_baking) << std::endl;
os << "Use Subsurface: "
<< string_from_bool(requested_features.use_subsurface) << std::endl;
os << "Use Volume: "
<< string_from_bool(requested_features.use_volume) << std::endl;
<< string_from_bool(requested_features.use_volume) << std::endl;
os << "Use Branched Integrator: "
<< string_from_bool(requested_features.use_integrator_branched) << std::endl;
os << "Use Patch Evaluation: "

View File

@ -409,7 +409,7 @@ void OpenCLDeviceBase::enqueue_kernel(cl_kernel kernel, size_t w, size_t h)
* much work per pixel (if we don't check global ID on Y axis) or will
* be checking for global ID to always have Y of 0.
*/
if (h == 1) {
if(h == 1) {
global_size[h] = 1;
}

View File

@ -125,8 +125,8 @@ struct NodeType
ustring name;
Type type;
std::vector<SocketType> inputs;
std::vector<SocketType> outputs;
vector<SocketType, std::allocator<SocketType> > inputs;
vector<SocketType, std::allocator<SocketType> > outputs;
CreateFunc create;
static NodeType *add(const char *name, CreateFunc create, Type type = NONE);

View File

@ -57,6 +57,7 @@ set(SRC_HEADERS
kernel_emission.h
kernel_film.h
kernel_globals.h
kernel_image_opencl.h
kernel_jitter.h
kernel_light.h
kernel_math.h

View File

@ -269,6 +269,67 @@ ccl_device_inline void triangle_intersect_subsurface(
const float4 tri_a = kernel_tex_fetch(__prim_tri_verts, tri_vindex+0),
tri_b = kernel_tex_fetch(__prim_tri_verts, tri_vindex+1),
tri_c = kernel_tex_fetch(__prim_tri_verts, tri_vindex+2);
#if defined(__KERNEL_AVX2__)
const avxf avxf_P(P.m128, P.m128);
const avxf tri_ab = kernel_tex_fetch_avxf(__prim_tri_verts, tri_vindex + 0);
const avxf tri_bc = kernel_tex_fetch_avxf(__prim_tri_verts, tri_vindex + 1);
const avxf AB = tri_ab - avxf_P;
const avxf BC = tri_bc - avxf_P;
const __m256i permuteMask = _mm256_set_epi32(0x3, kz, ky, kx, 0x3, kz, ky, kx);
const avxf AB_k = shuffle(AB, permuteMask);
const avxf BC_k = shuffle(BC, permuteMask);
/* Akz, Akz, Bkz, Bkz, Bkz, Bkz, Ckz, Ckz */
const avxf ABBC_kz = shuffle<2>(AB_k, BC_k);
/* Akx, Aky, Bkx, Bky, Bkx,Bky, Ckx, Cky */
const avxf ABBC_kxy = shuffle<0,1,0,1>(AB_k, BC_k);
const avxf Sxy(Sy, Sx, Sy, Sx);
/* Ax, Ay, Bx, By, Bx, By, Cx, Cy */
const avxf ABBC_xy = nmadd(ABBC_kz, Sxy, ABBC_kxy);
float ABBC_kz_array[8];
_mm256_storeu_ps((float*)&ABBC_kz_array, ABBC_kz);
const float A_kz = ABBC_kz_array[0];
const float B_kz = ABBC_kz_array[2];
const float C_kz = ABBC_kz_array[6];
/* By, Bx, Cy, Cx, By, Bx, Ay, Ax */
const avxf BCBA_yx = permute<3,2,7,6,3,2,1,0>(ABBC_xy);
const avxf negMask(0,0,0,0,0x80000000, 0x80000000, 0x80000000, 0x80000000);
/* W U V
* (AxBy-AyBx) (BxCy-ByCx) XX XX (BxBy-ByBx) (CxAy-CyAx) XX XX
*/
const avxf WUxxxxVxx_neg = _mm256_hsub_ps(ABBC_xy * BCBA_yx, negMask /* Dont care */);
const avxf WUVWnegWUVW = permute<0,1,5,0,0,1,5,0>(WUxxxxVxx_neg) ^ negMask;
/* Calculate scaled barycentric coordinates. */
float WUVW_array[4];
_mm_storeu_ps((float*)&WUVW_array, _mm256_castps256_ps128 (WUVWnegWUVW));
const float W = WUVW_array[0];
const float U = WUVW_array[1];
const float V = WUVW_array[2];
const int WUVW_mask = 0x7 & _mm256_movemask_ps(WUVWnegWUVW);
const int WUVW_zero = 0x7 & _mm256_movemask_ps(_mm256_cmp_ps(WUVWnegWUVW,
_mm256_setzero_ps(), 0));
if(!((WUVW_mask == 7) || (WUVW_mask == 0)) && ((WUVW_mask | WUVW_zero) != 7)) {
return;
}
#else
const float3 A = make_float3(tri_a.x - P.x, tri_a.y - P.y, tri_a.z - P.z);
const float3 B = make_float3(tri_b.x - P.x, tri_b.y - P.y, tri_b.z - P.z);
const float3 C = make_float3(tri_c.x - P.x, tri_c.y - P.y, tri_c.z - P.z);
@ -295,6 +356,7 @@ ccl_device_inline void triangle_intersect_subsurface(
{
return;
}
#endif
/* Calculate determinant. */
float det = U + V + W;

View File

@ -29,7 +29,7 @@ CCL_NAMESPACE_BEGIN
/* Return position normalized to 0..1 in mesh bounds */
#if defined(__KERNEL_GPU__) && __CUDA_ARCH__ < 300
#if defined(__KERNEL_CUDA__) && __CUDA_ARCH__ < 300
ccl_device float4 volume_image_texture_3d(int id, float x, float y, float z)
{
float4 r;
@ -42,7 +42,7 @@ ccl_device float4 volume_image_texture_3d(int id, float x, float y, float z)
}
return r;
}
#endif /* __KERNEL_GPU__ */
#endif /* __KERNEL_CUDA__ */
ccl_device_inline float3 volume_normalized_position(KernelGlobals *kg,
const ShaderData *sd,
@ -64,8 +64,8 @@ ccl_device_inline float3 volume_normalized_position(KernelGlobals *kg,
ccl_device float volume_attribute_float(KernelGlobals *kg, const ShaderData *sd, const AttributeDescriptor desc, float *dx, float *dy)
{
float3 P = volume_normalized_position(kg, sd, sd->P);
#ifdef __KERNEL_GPU__
float3 P = volume_normalized_position(kg, sd, ccl_fetch(sd, P));
#ifdef __KERNEL_CUDA__
# if __CUDA_ARCH__ >= 300
CUtexObject tex = kernel_tex_fetch(__bindless_mapping, desc.offset);
float f = kernel_tex_image_interp_3d_float(tex, P.x, P.y, P.z);
@ -73,6 +73,8 @@ ccl_device float volume_attribute_float(KernelGlobals *kg, const ShaderData *sd,
# else
float4 r = volume_image_texture_3d(desc.offset, P.x, P.y, P.z);
# endif
#elif defined(__KERNEL_OPENCL__)
float4 r = kernel_tex_image_interp_3d(kg, desc.offset, P.x, P.y, P.z);
#else
float4 r;
if(sd->flag & SD_VOLUME_CUBIC)
@ -89,14 +91,16 @@ ccl_device float volume_attribute_float(KernelGlobals *kg, const ShaderData *sd,
ccl_device float3 volume_attribute_float3(KernelGlobals *kg, const ShaderData *sd, const AttributeDescriptor desc, float3 *dx, float3 *dy)
{
float3 P = volume_normalized_position(kg, sd, sd->P);
#ifdef __KERNEL_GPU__
float3 P = volume_normalized_position(kg, sd, ccl_fetch(sd, P));
#ifdef __KERNEL_CUDA__
# if __CUDA_ARCH__ >= 300
CUtexObject tex = kernel_tex_fetch(__bindless_mapping, desc.offset);
float4 r = kernel_tex_image_interp_3d_float4(tex, P.x, P.y, P.z);
# else
float4 r = volume_image_texture_3d(desc.offset, P.x, P.y, P.z);
# endif
#elif defined(__KERNEL_OPENCL__)
float4 r = kernel_tex_image_interp_3d(kg, desc.offset, P.x, P.y, P.z);
#else
float4 r;
if(sd->flag & SD_VOLUME_CUBIC)

View File

@ -68,8 +68,8 @@ ccl_device void camera_sample_perspective(KernelGlobals *kg, float raster_x, flo
}
#endif
ray->P = make_float3(0.0f, 0.0f, 0.0f);
ray->D = Pcamera;
float3 P = make_float3(0.0f, 0.0f, 0.0f);
float3 D = Pcamera;
/* modify ray for depth of field */
float aperturesize = kernel_data.cam.aperturesize;
@ -79,12 +79,12 @@ ccl_device void camera_sample_perspective(KernelGlobals *kg, float raster_x, flo
float2 lensuv = camera_sample_aperture(kg, lens_u, lens_v)*aperturesize;
/* compute point on plane of focus */
float ft = kernel_data.cam.focaldistance/ray->D.z;
float3 Pfocus = ray->D*ft;
float ft = kernel_data.cam.focaldistance/D.z;
float3 Pfocus = D*ft;
/* update ray for effect of lens */
ray->P = make_float3(lensuv.x, lensuv.y, 0.0f);
ray->D = normalize(Pfocus - ray->P);
P = make_float3(lensuv.x, lensuv.y, 0.0f);
D = normalize(Pfocus - P);
}
/* transform ray from camera to world */
@ -105,39 +105,66 @@ ccl_device void camera_sample_perspective(KernelGlobals *kg, float raster_x, flo
}
#endif
float3 tP = transform_point(&cameratoworld, ray->P);
float3 tD = transform_direction(&cameratoworld, ray->D);
ray->P = spherical_stereo_position(kg, tD, tP);
ray->D = spherical_stereo_direction(kg, tD, tP, ray->P);
P = transform_point(&cameratoworld, P);
D = normalize(transform_direction(&cameratoworld, D));
bool use_stereo = kernel_data.cam.interocular_offset != 0.0f;
if(!use_stereo) {
/* No stereo */
ray->P = P;
ray->D = D;
#ifdef __RAY_DIFFERENTIALS__
/* ray differential */
ray->dP = differential3_zero();
float3 Dcenter = transform_direction(&cameratoworld, Pcamera);
float3 tD_diff = transform_direction(&cameratoworld, Pcamera);
float3 Pdiff = spherical_stereo_position(kg, tD_diff, Pcamera);
float3 Ddiff = spherical_stereo_direction(kg, tD_diff, Pcamera, Pdiff);
tP = transform_perspective(&rastertocamera,
make_float3(raster_x + 1.0f, raster_y, 0.0f));
tD = tD_diff + float4_to_float3(kernel_data.cam.dx);
Pcamera = spherical_stereo_position(kg, tD, tP);
ray->dD.dx = spherical_stereo_direction(kg, tD, tP, Pcamera) - Ddiff;
ray->dP.dx = Pcamera - Pdiff;
tP = transform_perspective(&rastertocamera,
make_float3(raster_x, raster_y + 1.0f, 0.0f));
tD = tD_diff + float4_to_float3(kernel_data.cam.dy);
Pcamera = spherical_stereo_position(kg, tD, tP);
ray->dD.dy = spherical_stereo_direction(kg, tD, tP, Pcamera) - Ddiff;
/* dP.dy is zero, since the omnidirectional panorama only shift the eyes horizontally */
ray->dP = differential3_zero();
ray->dD.dx = normalize(Dcenter + float4_to_float3(kernel_data.cam.dx)) - normalize(Dcenter);
ray->dD.dy = normalize(Dcenter + float4_to_float3(kernel_data.cam.dy)) - normalize(Dcenter);
#endif
}
else {
/* Spherical stereo */
spherical_stereo_transform(kg, &P, &D);
ray->P = P;
ray->D = D;
#ifdef __RAY_DIFFERENTIALS__
/* Ray differentials, computed from scratch using the raster coordinates
* because we don't want to be affected by depth of field. We compute
* ray origin and direction for the center and two neighbouring pixels
* and simply take their differences. */
float3 Pnostereo = transform_point(&cameratoworld, make_float3(0.0f, 0.0f, 0.0f));
float3 Pcenter = Pnostereo;
float3 Dcenter = Pcamera;
Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
spherical_stereo_transform(kg, &Pcenter, &Dcenter);
float3 Px = Pnostereo;
float3 Dx = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
Dx = normalize(transform_direction(&cameratoworld, Dx));
spherical_stereo_transform(kg, &Px, &Dx);
ray->dP.dx = Px - Pcenter;
ray->dD.dx = Dx - Dcenter;
float3 Py = Pnostereo;
float3 Dy = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
Dy = normalize(transform_direction(&cameratoworld, Dy));
spherical_stereo_transform(kg, &Py, &Dy);
ray->dP.dy = Py - Pcenter;
ray->dD.dy = Dy - Dcenter;
#endif
}
#ifdef __CAMERA_CLIPPING__
/* clipping */
float3 Pclip = normalize(Pcamera);
float z_inv = 1.0f / Pclip.z;
ray->P += kernel_data.cam.nearclip*ray->D * z_inv;
float z_inv = 1.0f / normalize(Pcamera).z;
float nearclip = kernel_data.cam.nearclip * z_inv;
ray->P += nearclip * ray->D;
ray->dP.dx += nearclip * ray->dD.dx;
ray->dP.dy += nearclip * ray->dD.dy;
ray->t = kernel_data.cam.cliplength * z_inv;
#else
ray->t = FLT_MAX;
@ -151,7 +178,8 @@ ccl_device void camera_sample_orthographic(KernelGlobals *kg, float raster_x, fl
Transform rastertocamera = kernel_data.cam.rastertocamera;
float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
ray->D = make_float3(0.0f, 0.0f, 1.0f);
float3 P;
float3 D = make_float3(0.0f, 0.0f, 1.0f);
/* modify ray for depth of field */
float aperturesize = kernel_data.cam.aperturesize;
@ -161,15 +189,15 @@ ccl_device void camera_sample_orthographic(KernelGlobals *kg, float raster_x, fl
float2 lensuv = camera_sample_aperture(kg, lens_u, lens_v)*aperturesize;
/* compute point on plane of focus */
float3 Pfocus = ray->D * kernel_data.cam.focaldistance;
float3 Pfocus = D * kernel_data.cam.focaldistance;
/* update ray for effect of lens */
float3 lensuvw = make_float3(lensuv.x, lensuv.y, 0.0f);
ray->P = Pcamera + lensuvw;
ray->D = normalize(Pfocus - lensuvw);
P = Pcamera + lensuvw;
D = normalize(Pfocus - lensuvw);
}
else {
ray->P = Pcamera;
P = Pcamera;
}
/* transform ray from camera to world */
Transform cameratoworld = kernel_data.cam.cameratoworld;
@ -189,9 +217,8 @@ ccl_device void camera_sample_orthographic(KernelGlobals *kg, float raster_x, fl
}
#endif
ray->P = transform_point(&cameratoworld, ray->P);
ray->D = transform_direction(&cameratoworld, ray->D);
ray->D = normalize(ray->D);
ray->P = transform_point(&cameratoworld, P);
ray->D = normalize(transform_direction(&cameratoworld, D));
#ifdef __RAY_DIFFERENTIALS__
/* ray differential */
@ -220,11 +247,11 @@ ccl_device_inline void camera_sample_panorama(KernelGlobals *kg,
float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
/* create ray form raster position */
ray->P = make_float3(0.0f, 0.0f, 0.0f);
ray->D = panorama_to_direction(kg, Pcamera.x, Pcamera.y);
float3 P = make_float3(0.0f, 0.0f, 0.0f);
float3 D = panorama_to_direction(kg, Pcamera.x, Pcamera.y);
/* indicates ray should not receive any light, outside of the lens */
if(is_zero(ray->D)) {
if(is_zero(D)) {
ray->t = 0.0f;
return;
}
@ -237,17 +264,17 @@ ccl_device_inline void camera_sample_panorama(KernelGlobals *kg,
float2 lensuv = camera_sample_aperture(kg, lens_u, lens_v)*aperturesize;
/* compute point on plane of focus */
float3 D = normalize(ray->D);
float3 Pfocus = D * kernel_data.cam.focaldistance;
float3 Dfocus = normalize(D);
float3 Pfocus = Dfocus * kernel_data.cam.focaldistance;
/* calculate orthonormal coordinates perpendicular to D */
/* calculate orthonormal coordinates perpendicular to Dfocus */
float3 U, V;
U = normalize(make_float3(1.0f, 0.0f, 0.0f) - D.x * D);
V = normalize(cross(D, U));
U = normalize(make_float3(1.0f, 0.0f, 0.0f) - Dfocus.x * Dfocus);
V = normalize(cross(Dfocus, U));
/* update ray for effect of lens */
ray->P = U * lensuv.x + V * lensuv.y;
ray->D = normalize(Pfocus - ray->P);
P = U * lensuv.x + V * lensuv.y;
D = normalize(Pfocus - P);
}
/* transform ray from camera to world */
@ -268,36 +295,60 @@ ccl_device_inline void camera_sample_panorama(KernelGlobals *kg,
}
#endif
float3 tP = transform_point(&cameratoworld, ray->P);
float3 tD = transform_direction(&cameratoworld, ray->D);
ray->P = spherical_stereo_position(kg, tD, tP);
ray->D = spherical_stereo_direction(kg, tD, tP, ray->P);
P = transform_point(&cameratoworld, P);
D = normalize(transform_direction(&cameratoworld, D));
/* Stereo transform */
bool use_stereo = kernel_data.cam.interocular_offset != 0.0f;
if(use_stereo) {
spherical_stereo_transform(kg, &P, &D);
}
ray->P = P;
ray->D = D;
#ifdef __RAY_DIFFERENTIALS__
/* ray differential */
ray->dP = differential3_zero();
/* Ray differentials, computed from scratch using the raster coordinates
* because we don't want to be affected by depth of field. We compute
* ray origin and direction for the center and two neighbouring pixels
* and simply take their differences. */
float3 Pcenter = Pcamera;
float3 Dcenter = panorama_to_direction(kg, Pcenter.x, Pcenter.y);
Pcenter = transform_point(&cameratoworld, Pcenter);
Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
if(use_stereo) {
spherical_stereo_transform(kg, &Pcenter, &Dcenter);
}
tP = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
tD = transform_direction(&cameratoworld, panorama_to_direction(kg, tP.x, tP.y));
float3 Pdiff = spherical_stereo_position(kg, tD, tP);
float3 Ddiff = spherical_stereo_direction(kg, tD, tP, Pdiff);
float3 Px = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
float3 Dx = panorama_to_direction(kg, Px.x, Px.y);
Px = transform_point(&cameratoworld, Px);
Dx = normalize(transform_direction(&cameratoworld, Dx));
if(use_stereo) {
spherical_stereo_transform(kg, &Px, &Dx);
}
tP = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
tD = transform_direction(&cameratoworld, panorama_to_direction(kg, tP.x, tP.y));
Pcamera = spherical_stereo_position(kg, tD, tP);
ray->dD.dx = spherical_stereo_direction(kg, tD, tP, Pcamera) - Ddiff;
ray->dP.dx = Pcamera - Pdiff;
ray->dP.dx = Px - Pcenter;
ray->dD.dx = Dx - Dcenter;
tP = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
tD = transform_direction(&cameratoworld, panorama_to_direction(kg, tP.x, tP.y));
Pcamera = spherical_stereo_position(kg, tD, tP);
ray->dD.dy = spherical_stereo_direction(kg, tD, tP, Pcamera) - Ddiff;
/* dP.dy is zero, since the omnidirectional panorama only shift the eyes horizontally */
float3 Py = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
float3 Dy = panorama_to_direction(kg, Py.x, Py.y);
Py = transform_point(&cameratoworld, Py);
Dy = normalize(transform_direction(&cameratoworld, Dy));
if(use_stereo) {
spherical_stereo_transform(kg, &Py, &Dy);
}
ray->dP.dy = Py - Pcenter;
ray->dD.dy = Dy - Dcenter;
#endif
#ifdef __CAMERA_CLIPPING__
/* clipping */
ray->P += kernel_data.cam.nearclip*ray->D;
float nearclip = kernel_data.cam.nearclip;
ray->P += nearclip * ray->D;
ray->dP.dx += nearclip * ray->dD.dx;
ray->dP.dy += nearclip * ray->dD.dy;
ray->t = kernel_data.cam.cliplength;
#else
ray->t = FLT_MAX;

View File

@ -0,0 +1,231 @@
/*
* Copyright 2016 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* For OpenCL all images are packed in a single array, and we do manual lookup
* and interpolation. */
ccl_device_inline float4 svm_image_texture_read(KernelGlobals *kg, int id, int offset)
{
/* Float4 */
if(id < TEX_START_BYTE4_OPENCL) {
return kernel_tex_fetch(__tex_image_float4_packed, offset);
}
/* Byte4 */
else if(id < TEX_START_FLOAT_OPENCL) {
uchar4 r = kernel_tex_fetch(__tex_image_byte4_packed, offset);
float f = 1.0f/255.0f;
return make_float4(r.x*f, r.y*f, r.z*f, r.w*f);
}
/* Float */
else if(id < TEX_START_BYTE_OPENCL) {
float f = kernel_tex_fetch(__tex_image_float_packed, offset);
return make_float4(f, f, f, 1.0f);
}
/* Byte */
else {
uchar r = kernel_tex_fetch(__tex_image_byte_packed, offset);
float f = r * (1.0f/255.0f);
return make_float4(f, f, f, 1.0f);
}
}
ccl_device_inline int svm_image_texture_wrap_periodic(int x, int width)
{
x %= width;
if(x < 0)
x += width;
return x;
}
ccl_device_inline int svm_image_texture_wrap_clamp(int x, int width)
{
return clamp(x, 0, width-1);
}
ccl_device_inline float svm_image_texture_frac(float x, int *ix)
{
int i = float_to_int(x) - ((x < 0.0f)? 1: 0);
*ix = i;
return x - (float)i;
}
ccl_device float4 kernel_tex_image_interp(KernelGlobals *kg, int id, float x, float y)
{
uint4 info = kernel_tex_fetch(__tex_image_packed_info, id*2);
uint width = info.x;
uint height = info.y;
uint offset = info.z;
/* Image Options */
uint interpolation = (info.w & (1 << 0)) ? INTERPOLATION_CLOSEST : INTERPOLATION_LINEAR;
uint extension;
if(info.w & (1 << 1))
extension = EXTENSION_REPEAT;
else if(info.w & (1 << 2))
extension = EXTENSION_EXTEND;
else
extension = EXTENSION_CLIP;
float4 r;
int ix, iy, nix, niy;
if(interpolation == INTERPOLATION_CLOSEST) {
svm_image_texture_frac(x*width, &ix);
svm_image_texture_frac(y*height, &iy);
if(extension == EXTENSION_REPEAT) {
ix = svm_image_texture_wrap_periodic(ix, width);
iy = svm_image_texture_wrap_periodic(iy, height);
}
else {
if(extension == EXTENSION_CLIP) {
if(x < 0.0f || y < 0.0f || x > 1.0f || y > 1.0f) {
return make_float4(0.0f, 0.0f, 0.0f, 0.0f);
}
}
/* Fall through. */
/* EXTENSION_EXTEND */
ix = svm_image_texture_wrap_clamp(ix, width);
iy = svm_image_texture_wrap_clamp(iy, height);
}
r = svm_image_texture_read(kg, id, offset + ix + iy*width);
}
else { /* INTERPOLATION_LINEAR */
float tx = svm_image_texture_frac(x*width - 0.5f, &ix);
float ty = svm_image_texture_frac(y*height - 0.5f, &iy);
if(extension == EXTENSION_REPEAT) {
ix = svm_image_texture_wrap_periodic(ix, width);
iy = svm_image_texture_wrap_periodic(iy, height);
nix = svm_image_texture_wrap_periodic(ix+1, width);
niy = svm_image_texture_wrap_periodic(iy+1, height);
}
else {
if(extension == EXTENSION_CLIP) {
if(x < 0.0f || y < 0.0f || x > 1.0f || y > 1.0f) {
return make_float4(0.0f, 0.0f, 0.0f, 0.0f);
}
}
nix = svm_image_texture_wrap_clamp(ix+1, width);
niy = svm_image_texture_wrap_clamp(iy+1, height);
ix = svm_image_texture_wrap_clamp(ix, width);
iy = svm_image_texture_wrap_clamp(iy, height);
}
r = (1.0f - ty)*(1.0f - tx)*svm_image_texture_read(kg, id, offset + ix + iy*width);
r += (1.0f - ty)*tx*svm_image_texture_read(kg, id, offset + nix + iy*width);
r += ty*(1.0f - tx)*svm_image_texture_read(kg, id, offset + ix + niy*width);
r += ty*tx*svm_image_texture_read(kg, id, offset + nix + niy*width);
}
return r;
}
ccl_device float4 kernel_tex_image_interp_3d(KernelGlobals *kg, int id, float x, float y, float z)
{
uint4 info = kernel_tex_fetch(__tex_image_packed_info, id*2);
uint width = info.x;
uint height = info.y;
uint offset = info.z;
uint depth = kernel_tex_fetch(__tex_image_packed_info, id*2+1).x;
/* Image Options */
uint interpolation = (info.w & (1 << 0)) ? INTERPOLATION_CLOSEST : INTERPOLATION_LINEAR;
uint extension;
if(info.w & (1 << 1))
extension = EXTENSION_REPEAT;
else if(info.w & (1 << 2))
extension = EXTENSION_EXTEND;
else
extension = EXTENSION_CLIP;
float4 r;
int ix, iy, iz, nix, niy, niz;
if(interpolation == INTERPOLATION_CLOSEST) {
svm_image_texture_frac(x*width, &ix);
svm_image_texture_frac(y*height, &iy);
svm_image_texture_frac(z*depth, &iz);
if(extension == EXTENSION_REPEAT) {
ix = svm_image_texture_wrap_periodic(ix, width);
iy = svm_image_texture_wrap_periodic(iy, height);
iz = svm_image_texture_wrap_periodic(iz, depth);
}
else {
if(extension == EXTENSION_CLIP) {
if(x < 0.0f || y < 0.0f || z < 0.0f ||
x > 1.0f || y > 1.0f || z > 1.0f)
{
return make_float4(0.0f, 0.0f, 0.0f, 0.0f);
}
}
/* Fall through. */
/* EXTENSION_EXTEND */
ix = svm_image_texture_wrap_clamp(ix, width);
iy = svm_image_texture_wrap_clamp(iy, height);
iz = svm_image_texture_wrap_clamp(iz, depth);
}
r = svm_image_texture_read(kg, id, offset + ix + iy*width + iz*width*height);
}
else { /* INTERPOLATION_LINEAR */
float tx = svm_image_texture_frac(x*(float)width - 0.5f, &ix);
float ty = svm_image_texture_frac(y*(float)height - 0.5f, &iy);
float tz = svm_image_texture_frac(z*(float)depth - 0.5f, &iz);
if(extension == EXTENSION_REPEAT) {
ix = svm_image_texture_wrap_periodic(ix, width);
iy = svm_image_texture_wrap_periodic(iy, height);
iz = svm_image_texture_wrap_periodic(iz, depth);
nix = svm_image_texture_wrap_periodic(ix+1, width);
niy = svm_image_texture_wrap_periodic(iy+1, height);
niz = svm_image_texture_wrap_periodic(iz+1, depth);
}
else {
if(extension == EXTENSION_CLIP)
if(x < 0.0f || y < 0.0f || z < 0.0f ||
x > 1.0f || y > 1.0f || z > 1.0f)
{
return make_float4(0.0f, 0.0f, 0.0f, 0.0f);
}
/* Fall through. */
/* EXTENSION_EXTEND */
nix = svm_image_texture_wrap_clamp(ix+1, width);
niy = svm_image_texture_wrap_clamp(iy+1, height);
niz = svm_image_texture_wrap_clamp(iz+1, depth);
ix = svm_image_texture_wrap_clamp(ix, width);
iy = svm_image_texture_wrap_clamp(iy, height);
iz = svm_image_texture_wrap_clamp(iz, depth);
}
r = (1.0f - tz)*(1.0f - ty)*(1.0f - tx)*svm_image_texture_read(kg, id, offset + ix + iy*width + iz*width*height);
r += (1.0f - tz)*(1.0f - ty)*tx*svm_image_texture_read(kg, id, offset + nix + iy*width + iz*width*height);
r += (1.0f - tz)*ty*(1.0f - tx)*svm_image_texture_read(kg, id, offset + ix + niy*width + iz*width*height);
r += (1.0f - tz)*ty*tx*svm_image_texture_read(kg, id, offset + nix + niy*width + iz*width*height);
r += tz*(1.0f - ty)*(1.0f - tx)*svm_image_texture_read(kg, id, offset + ix + iy*width + niz*width*height);
r += tz*(1.0f - ty)*tx*svm_image_texture_read(kg, id, offset + nix + iy*width + niz*width*height);
r += tz*ty*(1.0f - tx)*svm_image_texture_read(kg, id, offset + ix + niy*width + niz*width*height);
r += tz*ty*tx*svm_image_texture_read(kg, id, offset + nix + niy*width + niz*width*height);
}
return r;
}

View File

@ -224,24 +224,18 @@ ccl_device_inline float2 direction_to_panorama(KernelGlobals *kg, float3 dir)
}
}
ccl_device_inline float3 spherical_stereo_position(KernelGlobals *kg,
float3 dir,
float3 pos)
ccl_device_inline void spherical_stereo_transform(KernelGlobals *kg, float3 *P, float3 *D)
{
float interocular_offset = kernel_data.cam.interocular_offset;
/* Interocular offset of zero means either non stereo, or stereo without
* spherical stereo.
*/
if(interocular_offset == 0.0f) {
return pos;
}
* spherical stereo. */
kernel_assert(interocular_offset != 0.0f);
if(kernel_data.cam.pole_merge_angle_to > 0.0f) {
float3 normalized_direction = normalize(dir);
const float pole_merge_angle_from = kernel_data.cam.pole_merge_angle_from,
pole_merge_angle_to = kernel_data.cam.pole_merge_angle_to;
float altitude = fabsf(safe_asinf(normalized_direction.z));
float altitude = fabsf(safe_asinf(D->z));
if(altitude > pole_merge_angle_to) {
interocular_offset = 0.0f;
}
@ -253,32 +247,20 @@ ccl_device_inline float3 spherical_stereo_position(KernelGlobals *kg,
}
float3 up = make_float3(0.0f, 0.0f, 1.0f);
float3 side = normalize(cross(dir, up));
float3 side = normalize(cross(*D, up));
float3 stereo_offset = side * interocular_offset;
return pos + (side * interocular_offset);
}
*P += stereo_offset;
/* NOTE: Ensures direction is normalized. */
ccl_device float3 spherical_stereo_direction(KernelGlobals *kg,
float3 dir,
float3 pos,
float3 newpos)
{
/* Convergence distance is FLT_MAX in the case of parallel convergence mode,
* no need to modify direction in this case either. */
const float convergence_distance = kernel_data.cam.convergence_distance;
const float3 normalized_dir = normalize(dir);
/* Interocular offset of zero means either no stereo, or stereo without
* spherical stereo.
* Convergence distance is FLT_MAX in the case of parallel convergence mode,
* no need to mdify direction in this case either.
*/
if(kernel_data.cam.interocular_offset == 0.0f ||
convergence_distance == FLT_MAX)
{
return normalized_dir;
}
float3 screenpos = pos + (normalized_dir * convergence_distance);
return normalize(screenpos - newpos);
if(convergence_distance != FLT_MAX)
{
float3 screen_offset = convergence_distance * (*D);
*D = normalize(screen_offset - stereo_offset);
}
}
CCL_NAMESPACE_END

View File

@ -20,6 +20,7 @@
#include "../../kernel_math.h"
#include "../../kernel_types.h"
#include "../../kernel_globals.h"
#include "../../kernel_image_opencl.h"
#include "../../kernel_film.h"

View File

@ -21,6 +21,7 @@
#include "kernel_math.h"
#include "kernel_types.h"
#include "kernel_globals.h"
#include "kernel_image_opencl.h"
#include "util_atomic.h"

View File

@ -29,147 +29,6 @@ CCL_NAMESPACE_BEGIN
# define TEX_NUM_FLOAT4_IMAGES TEX_NUM_FLOAT4_OPENCL
#endif
#ifdef __KERNEL_OPENCL__
/* For OpenCL all images are packed in a single array, and we do manual lookup
* and interpolation. */
ccl_device_inline float4 svm_image_texture_read(KernelGlobals *kg, int id, int offset)
{
/* Float4 */
if(id < TEX_START_BYTE4_OPENCL) {
return kernel_tex_fetch(__tex_image_float4_packed, offset);
}
/* Byte4 */
else if(id < TEX_START_FLOAT_OPENCL) {
uchar4 r = kernel_tex_fetch(__tex_image_byte4_packed, offset);
float f = 1.0f/255.0f;
return make_float4(r.x*f, r.y*f, r.z*f, r.w*f);
}
/* Float */
else if(id < TEX_START_BYTE_OPENCL) {
float f = kernel_tex_fetch(__tex_image_float_packed, offset);
return make_float4(f, f, f, 1.0f);
}
/* Byte */
else {
uchar r = kernel_tex_fetch(__tex_image_byte_packed, offset);
float f = r * (1.0f/255.0f);
return make_float4(f, f, f, 1.0f);
}
}
ccl_device_inline int svm_image_texture_wrap_periodic(int x, int width)
{
x %= width;
if(x < 0)
x += width;
return x;
}
ccl_device_inline int svm_image_texture_wrap_clamp(int x, int width)
{
return clamp(x, 0, width-1);
}
ccl_device_inline float svm_image_texture_frac(float x, int *ix)
{
int i = float_to_int(x) - ((x < 0.0f)? 1: 0);
*ix = i;
return x - (float)i;
}
ccl_device float4 svm_image_texture(KernelGlobals *kg, int id, float x, float y, uint srgb, uint use_alpha)
{
uint4 info = kernel_tex_fetch(__tex_image_packed_info, id);
uint width = info.x;
uint height = info.y;
uint offset = info.z;
/* Image Options */
uint interpolation = (info.w & (1 << 0)) ? INTERPOLATION_CLOSEST : INTERPOLATION_LINEAR;
uint extension;
if(info.w & (1 << 1))
extension = EXTENSION_REPEAT;
else if(info.w & (1 << 2))
extension = EXTENSION_EXTEND;
else
extension = EXTENSION_CLIP;
float4 r;
int ix, iy, nix, niy;
if(interpolation == INTERPOLATION_CLOSEST) {
svm_image_texture_frac(x*width, &ix);
svm_image_texture_frac(y*height, &iy);
if(extension == EXTENSION_REPEAT) {
ix = svm_image_texture_wrap_periodic(ix, width);
iy = svm_image_texture_wrap_periodic(iy, height);
}
else if(extension == EXTENSION_CLIP) {
if(x < 0.0f || y < 0.0f || x > 1.0f || y > 1.0f)
return make_float4(0.0f, 0.0f, 0.0f, 0.0f);
}
else { /* EXTENSION_EXTEND */
ix = svm_image_texture_wrap_clamp(ix, width);
iy = svm_image_texture_wrap_clamp(iy, height);
}
r = svm_image_texture_read(kg, id, offset + ix + iy*width);
}
else { /* INTERPOLATION_LINEAR */
float tx = svm_image_texture_frac(x*width - 0.5f, &ix);
float ty = svm_image_texture_frac(y*height - 0.5f, &iy);
if(extension == EXTENSION_REPEAT) {
ix = svm_image_texture_wrap_periodic(ix, width);
iy = svm_image_texture_wrap_periodic(iy, height);
nix = svm_image_texture_wrap_periodic(ix+1, width);
niy = svm_image_texture_wrap_periodic(iy+1, height);
}
else {
if(extension == EXTENSION_CLIP) {
if(x < 0.0f || y < 0.0f || x > 1.0f || y > 1.0f) {
return make_float4(0.0f, 0.0f, 0.0f, 0.0f);
}
}
nix = svm_image_texture_wrap_clamp(ix+1, width);
niy = svm_image_texture_wrap_clamp(iy+1, height);
ix = svm_image_texture_wrap_clamp(ix, width);
iy = svm_image_texture_wrap_clamp(iy, height);
}
r = (1.0f - ty)*(1.0f - tx)*svm_image_texture_read(kg, id, offset + ix + iy*width);
r += (1.0f - ty)*tx*svm_image_texture_read(kg, id, offset + nix + iy*width);
r += ty*(1.0f - tx)*svm_image_texture_read(kg, id, offset + ix + niy*width);
r += ty*tx*svm_image_texture_read(kg, id, offset + nix + niy*width);
}
if(use_alpha && r.w != 1.0f && r.w != 0.0f) {
float invw = 1.0f/r.w;
r.x *= invw;
r.y *= invw;
r.z *= invw;
if(id >= TEX_NUM_FLOAT4_IMAGES) {
r.x = min(r.x, 1.0f);
r.y = min(r.y, 1.0f);
r.z = min(r.z, 1.0f);
}
}
if(srgb) {
r.x = color_srgb_to_scene_linear(r.x);
r.y = color_srgb_to_scene_linear(r.y);
r.z = color_srgb_to_scene_linear(r.z);
}
return r;
}
#else
ccl_device float4 svm_image_texture(KernelGlobals *kg, int id, float x, float y, uint srgb, uint use_alpha)
{
#ifdef __KERNEL_CPU__
@ -180,6 +39,8 @@ ccl_device float4 svm_image_texture(KernelGlobals *kg, int id, float x, float y,
# else
float4 r = kernel_tex_image_interp(id, x, y);
# endif
#elif defined(__KERNEL_OPENCL__)
float4 r = kernel_tex_image_interp(kg, id, x, y);
#else
float4 r;
@ -339,8 +200,6 @@ ccl_device float4 svm_image_texture(KernelGlobals *kg, int id, float x, float y,
return r;
}
#endif
/* Remap coordnate from 0..1 box to -1..-1 */
ccl_device_inline float3 texco_remap_square(float3 co)
{

View File

@ -27,9 +27,9 @@ ccl_device_inline float3 rgb_ramp_lookup(const float3 *ramp,
bool extrapolate,
int table_size)
{
if ((f < 0.0f || f > 1.0f) && extrapolate) {
if((f < 0.0f || f > 1.0f) && extrapolate) {
float3 t0, dy;
if (f < 0.0f) {
if(f < 0.0f) {
t0 = ramp[0];
dy = t0 - ramp[1],
f = -f;
@ -50,8 +50,9 @@ ccl_device_inline float3 rgb_ramp_lookup(const float3 *ramp,
float3 result = ramp[i];
if (interpolate && t > 0.0f)
if(interpolate && t > 0.0f) {
result = (1.0f - t) * result + t * ramp[i + 1];
}
return result;
}
@ -62,9 +63,9 @@ ccl_device float float_ramp_lookup(const float *ramp,
bool extrapolate,
int table_size)
{
if ((f < 0.0f || f > 1.0f) && extrapolate) {
if((f < 0.0f || f > 1.0f) && extrapolate) {
float t0, dy;
if (f < 0.0f) {
if(f < 0.0f) {
t0 = ramp[0];
dy = t0 - ramp[1],
f = -f;
@ -85,8 +86,9 @@ ccl_device float float_ramp_lookup(const float *ramp,
float result = ramp[i];
if (interpolate && t > 0.0f)
if(interpolate && t > 0.0f) {
result = (1.0f - t) * result + t * ramp[i + 1];
}
return result;
}

View File

@ -43,7 +43,7 @@ ccl_device void svm_node_tex_voxel(KernelGlobals *kg,
co = transform_point(&tfm, co);
}
float4 r;
# if defined(__KERNEL_GPU__)
# if defined(__KERNEL_CUDA__)
# if __CUDA_ARCH__ >= 300
CUtexObject tex = kernel_tex_fetch(__bindless_mapping, id);
if(id < 2048) /* TODO(dingto): Make this a variable */
@ -55,9 +55,11 @@ ccl_device void svm_node_tex_voxel(KernelGlobals *kg,
# else /* __CUDA_ARCH__ >= 300 */
r = volume_image_texture_3d(id, co.x, co.y, co.z);
# endif
# else /* __KERNEL_GPU__ */
# elif defined(__KERNEL_OPENCL__)
r = kernel_tex_image_interp_3d(kg, id, co.x, co.y, co.z);
# else
r = kernel_tex_image_interp_3d(id, co.x, co.y, co.z);
# endif
# endif /* __KERNEL_CUDA__ */
#else
float4 r = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
#endif

View File

@ -32,8 +32,9 @@ CCL_NAMESPACE_BEGIN
static float shutter_curve_eval(float x,
array<float>& shutter_curve)
{
if (shutter_curve.size() == 0)
if(shutter_curve.size() == 0) {
return 1.0f;
}
x *= shutter_curve.size();
int index = (int)x;

View File

@ -148,8 +148,9 @@ void ShaderNode::attributes(Shader *shader, AttributeRequestSet *attributes)
bool ShaderNode::equals(const ShaderNode& other)
{
if (type != other.type || bump != other.bump)
if(type != other.type || bump != other.bump) {
return false;
}
assert(inputs.size() == other.inputs.size());
@ -597,13 +598,13 @@ void ShaderGraph::deduplicate_nodes()
/* Try to merge this node with another one. */
ShaderNode *merge_with = NULL;
foreach(ShaderNode *other_node, candidates[node->type->name]) {
if (node != other_node && node->equals(*other_node)) {
if(node != other_node && node->equals(*other_node)) {
merge_with = other_node;
break;
}
}
/* If found an equivalent, merge; otherwise keep node for later merges */
if (merge_with != NULL) {
if(merge_with != NULL) {
for(int i = 0; i < node->outputs.size(); ++i) {
relink(node, node->outputs[i], merge_with->outputs[i]);
}

View File

@ -43,7 +43,7 @@ ImageManager::ImageManager(const DeviceInfo& info)
* be screwed on so many levels..
*/
DeviceType device_type = info.type;
if (device_type == DEVICE_MULTI) {
if(device_type == DEVICE_MULTI) {
device_type = info.multi_devices[0].type;
}
@ -1107,7 +1107,7 @@ void ImageManager::device_pack_images(Device *device,
int info_size = tex_num_images[IMAGE_DATA_TYPE_FLOAT4] + tex_num_images[IMAGE_DATA_TYPE_BYTE4]
+ tex_num_images[IMAGE_DATA_TYPE_FLOAT] + tex_num_images[IMAGE_DATA_TYPE_BYTE];
uint4 *info = dscene->tex_image_packed_info.resize(info_size);
uint4 *info = dscene->tex_image_packed_info.resize(info_size*2);
/* Byte4 Textures*/
type = IMAGE_DATA_TYPE_BYTE4;
@ -1130,7 +1130,9 @@ void ImageManager::device_pack_images(Device *device,
uint8_t options = pack_image_options(type, slot);
info[type_index_to_flattened_slot(slot, type)] = make_uint4(tex_img.data_width, tex_img.data_height, offset, options);
int index = type_index_to_flattened_slot(slot, type) * 2;
info[index] = make_uint4(tex_img.data_width, tex_img.data_height, offset, options);
info[index+1] = make_uint4(tex_img.data_depth, 0, 0, 0);
memcpy(pixels_byte4+offset, (void*)tex_img.data_pointer, tex_img.memory_size());
offset += tex_img.size();
@ -1159,7 +1161,10 @@ void ImageManager::device_pack_images(Device *device,
/* todo: support 3D textures, only CPU for now */
uint8_t options = pack_image_options(type, slot);
info[type_index_to_flattened_slot(slot, type)] = make_uint4(tex_img.data_width, tex_img.data_height, offset, options);
int index = type_index_to_flattened_slot(slot, type) * 2;
info[index] = make_uint4(tex_img.data_width, tex_img.data_height, offset, options);
info[index+1] = make_uint4(tex_img.data_depth, 0, 0, 0);
memcpy(pixels_float4+offset, (void*)tex_img.data_pointer, tex_img.memory_size());
offset += tex_img.size();
@ -1187,7 +1192,9 @@ void ImageManager::device_pack_images(Device *device,
uint8_t options = pack_image_options(type, slot);
info[type_index_to_flattened_slot(slot, type)] = make_uint4(tex_img.data_width, tex_img.data_height, offset, options);
int index = type_index_to_flattened_slot(slot, type) * 2;
info[index] = make_uint4(tex_img.data_width, tex_img.data_height, offset, options);
info[index+1] = make_uint4(tex_img.data_depth, 0, 0, 0);
memcpy(pixels_byte+offset, (void*)tex_img.data_pointer, tex_img.memory_size());
offset += tex_img.size();
@ -1216,7 +1223,10 @@ void ImageManager::device_pack_images(Device *device,
/* todo: support 3D textures, only CPU for now */
uint8_t options = pack_image_options(type, slot);
info[type_index_to_flattened_slot(slot, type)] = make_uint4(tex_img.data_width, tex_img.data_height, offset, options);
int index = type_index_to_flattened_slot(slot, type) * 2;
info[index] = make_uint4(tex_img.data_width, tex_img.data_height, offset, options);
info[index+1] = make_uint4(tex_img.data_depth, 0, 0, 0);
memcpy(pixels_float+offset, (void*)tex_img.data_pointer, tex_img.memory_size());
offset += tex_img.size();

View File

@ -1462,7 +1462,7 @@ void MeshManager::device_update_mesh(Device *device,
else {
PackedBVH& pack = bvh->pack;
for(size_t i = 0; i < pack.prim_index.size(); ++i) {
if ((pack.prim_type[i] & PRIMITIVE_ALL_TRIANGLE) != 0) {
if((pack.prim_type[i] & PRIMITIVE_ALL_TRIANGLE) != 0) {
tri_prim_index[pack.prim_index[i]] = pack.prim_tri_index[i];
}
}

View File

@ -92,7 +92,7 @@ namespace Far {
if(vert_edges.size() == 2) {
float sharpness = refiner.getLevel(0).getEdgeSharpness(vert_edges[0]);
sharpness = std::min(sharpness, refiner.getLevel(0).getEdgeSharpness(vert_edges[1]));
sharpness = min(sharpness, refiner.getLevel(0).getEdgeSharpness(vert_edges[1]));
setBaseVertexSharpness(refiner, i, sharpness);
}

View File

@ -2378,8 +2378,9 @@ void EmissionNode::constant_fold(const ConstantFolder& folder)
ShaderInput *color_in = input("Color");
ShaderInput *strength_in = input("Strength");
if ((!color_in->link && color == make_float3(0.0f, 0.0f, 0.0f)) ||
(!strength_in->link && strength == 0.0f)) {
if((!color_in->link && color == make_float3(0.0f, 0.0f, 0.0f)) ||
(!strength_in->link && strength == 0.0f))
{
folder.discard();
}
}
@ -2430,8 +2431,9 @@ void BackgroundNode::constant_fold(const ConstantFolder& folder)
ShaderInput *color_in = input("Color");
ShaderInput *strength_in = input("Strength");
if ((!color_in->link && color == make_float3(0.0f, 0.0f, 0.0f)) ||
(!strength_in->link && strength == 0.0f)) {
if((!color_in->link && color == make_float3(0.0f, 0.0f, 0.0f)) ||
(!strength_in->link && strength == 0.0f))
{
folder.discard();
}
}
@ -4864,8 +4866,9 @@ void CurvesNode::constant_fold(const ConstantFolder& folder, ShaderInput *value_
/* evaluate fully constant node */
if(folder.all_inputs_constant()) {
if (curves.size() == 0)
if(curves.size() == 0) {
return;
}
float3 pos = (value - make_float3(min_x, min_x, min_x)) / (max_x - min_x);
float3 result;
@ -5140,7 +5143,7 @@ OSLNode* OSLNode::create(size_t num_inputs, const OSLNode *from)
char *node_memory = (char*) operator new(node_size + inputs_size);
memset(node_memory, 0, node_size + inputs_size);
if (!from) {
if(!from) {
return new(node_memory) OSLNode();
}
else {

View File

@ -253,7 +253,7 @@ vector<float> Object::motion_times()
bool Object::is_traceable()
{
/* Mesh itself can be empty,can skip all such objects. */
if (!bounds.valid() || bounds.size() == make_float3(0.0f, 0.0f, 0.0f)) {
if(!bounds.valid() || bounds.size() == make_float3(0.0f, 0.0f, 0.0f)) {
return false;
}
/* TODO(sergey): Check for mesh vertices/curves. visibility flags. */
@ -624,8 +624,9 @@ void ObjectManager::device_update_flags(Device *device,
void ObjectManager::device_update_patch_map_offsets(Device *device, DeviceScene *dscene, Scene *scene)
{
if (scene->objects.size() == 0)
if(scene->objects.size() == 0) {
return;
}
uint4* objects = (uint4*)dscene->objects.get_data();

View File

@ -825,7 +825,7 @@ void OSLCompiler::parameter(ShaderNode* node, const char *name)
// OSL does not support booleans, so convert to int
const array<bool>& value = node->get_bool_array(socket);
array<int> intvalue(value.size());
for (size_t i = 0; i < value.size(); i++)
for(size_t i = 0; i < value.size(); i++)
intvalue[i] = value[i];
ss->Parameter(uname, array_typedesc(TypeDesc::TypeInt, value.size()), intvalue.data());
break;
@ -861,8 +861,7 @@ void OSLCompiler::parameter(ShaderNode* node, const char *name)
// convert to tightly packed array since float3 has padding
const array<float3>& value = node->get_float3_array(socket);
array<float> fvalue(value.size() * 3);
for (size_t i = 0, j = 0; i < value.size(); i++)
{
for(size_t i = 0, j = 0; i < value.size(); i++) {
fvalue[j++] = value[i].x;
fvalue[j++] = value[i].y;
fvalue[j++] = value[i].z;

View File

@ -46,7 +46,7 @@ struct PatchMapQuadNode {
/* sets all the children to point to the patch of index */
void set_child(int index)
{
for (int i = 0; i < 4; i++) {
for(int i = 0; i < 4; i++) {
children[i] = index | PATCH_MAP_NODE_IS_SET | PATCH_MAP_NODE_IS_LEAF;
}
}

View File

@ -1407,8 +1407,9 @@ void init_test_curve(array<T> &buffer, T start, T end, int steps)
{
buffer.resize(steps);
for (int i = 0; i < steps; i++)
for(int i = 0; i < steps; i++) {
buffer[i] = lerp(start, end, float(i)/(steps-1));
}
}
/*

View File

@ -19,7 +19,7 @@
CCL_NAMESPACE_BEGIN
static Stats global_stats;
static Stats global_stats(Stats::static_init);
/* Internal API. */

View File

@ -23,7 +23,10 @@ CCL_NAMESPACE_BEGIN
class Stats {
public:
enum static_init_t { static_init = 0 };
Stats() : mem_used(0), mem_peak(0) {}
explicit Stats(static_init_t) {}
void mem_alloc(size_t size) {
atomic_add_z(&mem_used, size);

View File

@ -39,177 +39,188 @@ def write_sysinfo(filepath):
r = r[1:-1]
return r
output = open(filepath, 'w', encoding="utf-8")
with open(filepath, 'w', encoding="utf-8") as output:
try:
header = "= Blender %s System Information =\n" % bpy.app.version_string
lilies = "%s\n\n" % ((len(header) - 1) * "=")
output.write(lilies[:-1])
output.write(header)
output.write(lilies)
header = "= Blender %s System Information =\n" % bpy.app.version_string
lilies = "%s\n\n" % ((len(header) - 1) * "=")
output.write(lilies[:-1])
output.write(header)
output.write(lilies)
def title(text):
return "\n%s:\n%s" % (text, lilies)
def title(text):
return "\n%s:\n%s" % (text, lilies)
# build info
output.write(title("Blender"))
output.write(
"version: %s, branch: %s, commit date: %s %s, hash: %s, type: %s\n" %
(bpy.app.version_string,
prepr(bpy.app.build_branch),
prepr(bpy.app.build_commit_date),
prepr(bpy.app.build_commit_time),
prepr(bpy.app.build_hash),
prepr(bpy.app.build_type),
))
output.write("build date: %s, %s\n" % (prepr(bpy.app.build_date), prepr(bpy.app.build_time)))
output.write("platform: %s\n" % prepr(bpy.app.build_platform))
output.write("binary path: %s\n" % prepr(bpy.app.binary_path))
output.write("build cflags: %s\n" % prepr(bpy.app.build_cflags))
output.write("build cxxflags: %s\n" % prepr(bpy.app.build_cxxflags))
output.write("build linkflags: %s\n" % prepr(bpy.app.build_linkflags))
output.write("build system: %s\n" % prepr(bpy.app.build_system))
# python info
output.write(title("Python"))
output.write("version: %s\n" % (sys.version))
output.write("paths:\n")
for p in sys.path:
output.write("\t%r\n" % p)
output.write(title("Python (External Binary)"))
output.write("binary path: %s\n" % prepr(bpy.app.binary_path_python))
try:
py_ver = prepr(subprocess.check_output([
bpy.app.binary_path_python,
"--version",
]).strip())
except Exception as e:
py_ver = str(e)
output.write("version: %s\n" % py_ver)
del py_ver
output.write(title("Directories"))
output.write("scripts:\n")
for p in bpy.utils.script_paths():
output.write("\t%r\n" % p)
output.write("user scripts: %r\n" % (bpy.utils.script_path_user()))
output.write("pref scripts: %r\n" % (bpy.utils.script_path_pref()))
output.write("datafiles: %r\n" % (bpy.utils.user_resource('DATAFILES')))
output.write("config: %r\n" % (bpy.utils.user_resource('CONFIG')))
output.write("scripts : %r\n" % (bpy.utils.user_resource('SCRIPTS')))
output.write("autosave: %r\n" % (bpy.utils.user_resource('AUTOSAVE')))
output.write("tempdir: %r\n" % (bpy.app.tempdir))
output.write(title("FFmpeg"))
ffmpeg = bpy.app.ffmpeg
if ffmpeg.supported:
for lib in ("avcodec", "avdevice", "avformat", "avutil", "swscale"):
# build info
output.write(title("Blender"))
output.write(
"%s:%s%r\n" % (lib, " " * (10 - len(lib)),
getattr(ffmpeg, lib + "_version_string")))
else:
output.write("Blender was built without FFmpeg support\n")
"version: %s, branch: %s, commit date: %s %s, hash: %s, type: %s\n" %
(bpy.app.version_string,
prepr(bpy.app.build_branch),
prepr(bpy.app.build_commit_date),
prepr(bpy.app.build_commit_time),
prepr(bpy.app.build_hash),
prepr(bpy.app.build_type),
))
if bpy.app.build_options.sdl:
output.write(title("SDL"))
output.write("Version: %s\n" % bpy.app.sdl.version_string)
output.write("Loading method: ")
if bpy.app.build_options.sdl_dynload:
output.write("dynamically loaded by Blender (WITH_SDL_DYNLOAD=ON)\n")
else:
output.write("linked (WITH_SDL_DYNLOAD=OFF)\n")
if not bpy.app.sdl.available:
output.write("WARNING: Blender could not load SDL library\n")
output.write("build date: %s, %s\n" % (prepr(bpy.app.build_date), prepr(bpy.app.build_time)))
output.write("platform: %s\n" % prepr(bpy.app.build_platform))
output.write("binary path: %s\n" % prepr(bpy.app.binary_path))
output.write("build cflags: %s\n" % prepr(bpy.app.build_cflags))
output.write("build cxxflags: %s\n" % prepr(bpy.app.build_cxxflags))
output.write("build linkflags: %s\n" % prepr(bpy.app.build_linkflags))
output.write("build system: %s\n" % prepr(bpy.app.build_system))
output.write(title("Other Libraries"))
ocio = bpy.app.ocio
output.write("OpenColorIO: ")
if ocio.supported:
if ocio.version_string == "fallback":
output.write("Blender was built with OpenColorIO, " +
"but it currently uses fallback color management.\n")
else:
output.write("%s\n" % (ocio.version_string))
else:
output.write("Blender was built without OpenColorIO support\n")
# python info
output.write(title("Python"))
output.write("version: %s\n" % (sys.version))
output.write("paths:\n")
for p in sys.path:
output.write("\t%r\n" % p)
oiio = bpy.app.oiio
output.write("OpenImageIO: ")
if ocio.supported:
output.write("%s\n" % (oiio.version_string))
else:
output.write("Blender was built without OpenImageIO support\n")
output.write(title("Python (External Binary)"))
output.write("binary path: %s\n" % prepr(bpy.app.binary_path_python))
try:
py_ver = prepr(subprocess.check_output([
bpy.app.binary_path_python,
"--version",
]).strip())
except Exception as e:
py_ver = str(e)
output.write("version: %s\n" % py_ver)
del py_ver
output.write("OpenShadingLanguage: ")
if bpy.app.build_options.cycles:
if bpy.app.build_options.cycles_osl:
from _cycles import osl_version_string
output.write("%s\n" % (osl_version_string))
else:
output.write("Blender was built without OpenShadingLanguage support in Cycles\n")
else:
output.write("Blender was built without Cycles support\n")
output.write(title("Directories"))
output.write("scripts:\n")
for p in bpy.utils.script_paths():
output.write("\t%r\n" % p)
output.write("user scripts: %r\n" % (bpy.utils.script_path_user()))
output.write("pref scripts: %r\n" % (bpy.utils.script_path_pref()))
output.write("datafiles: %r\n" % (bpy.utils.user_resource('DATAFILES')))
output.write("config: %r\n" % (bpy.utils.user_resource('CONFIG')))
output.write("scripts : %r\n" % (bpy.utils.user_resource('SCRIPTS')))
output.write("autosave: %r\n" % (bpy.utils.user_resource('AUTOSAVE')))
output.write("tempdir: %r\n" % (bpy.app.tempdir))
openvdb = bpy.app.openvdb
output.write("OpenVDB: ")
if openvdb.supported:
output.write("%s\n" % openvdb.version_string)
else:
output.write("Blender was built without OpenVDB support\n")
output.write(title("FFmpeg"))
ffmpeg = bpy.app.ffmpeg
if ffmpeg.supported:
for lib in ("avcodec", "avdevice", "avformat", "avutil", "swscale"):
output.write(
"%s:%s%r\n" % (lib, " " * (10 - len(lib)),
getattr(ffmpeg, lib + "_version_string")))
else:
output.write("Blender was built without FFmpeg support\n")
alembic = bpy.app.alembic
output.write("Alembic: ")
if alembic.supported:
output.write("%s\n" % alembic.version_string)
else:
output.write("Blender was built without Alembic support\n")
if bpy.app.build_options.sdl:
output.write(title("SDL"))
output.write("Version: %s\n" % bpy.app.sdl.version_string)
output.write("Loading method: ")
if bpy.app.build_options.sdl_dynload:
output.write("dynamically loaded by Blender (WITH_SDL_DYNLOAD=ON)\n")
else:
output.write("linked (WITH_SDL_DYNLOAD=OFF)\n")
if not bpy.app.sdl.available:
output.write("WARNING: Blender could not load SDL library\n")
if not bpy.app.build_options.sdl:
output.write("SDL: Blender was built without SDL support\n")
output.write(title("Other Libraries"))
ocio = bpy.app.ocio
output.write("OpenColorIO: ")
if ocio.supported:
if ocio.version_string == "fallback":
output.write("Blender was built with OpenColorIO, " +
"but it currently uses fallback color management.\n")
else:
output.write("%s\n" % (ocio.version_string))
else:
output.write("Blender was built without OpenColorIO support\n")
if bpy.app.background:
output.write("\nOpenGL: missing, background mode\n")
else:
output.write(title("OpenGL"))
version = bgl.glGetString(bgl.GL_RENDERER)
output.write("renderer:\t%r\n" % version)
output.write("vendor:\t\t%r\n" % (bgl.glGetString(bgl.GL_VENDOR)))
output.write("version:\t%r\n" % (bgl.glGetString(bgl.GL_VERSION)))
output.write("extensions:\n")
oiio = bpy.app.oiio
output.write("OpenImageIO: ")
if ocio.supported:
output.write("%s\n" % (oiio.version_string))
else:
output.write("Blender was built without OpenImageIO support\n")
glext = sorted(bgl.glGetString(bgl.GL_EXTENSIONS).split())
for l in glext:
output.write("\t%s\n" % l)
output.write("OpenShadingLanguage: ")
if bpy.app.build_options.cycles:
if bpy.app.build_options.cycles_osl:
from _cycles import osl_version_string
output.write("%s\n" % (osl_version_string))
else:
output.write("Blender was built without OpenShadingLanguage support in Cycles\n")
else:
output.write("Blender was built without Cycles support\n")
output.write(title("Implementation Dependent OpenGL Limits"))
limit = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGetIntegerv(bgl.GL_MAX_TEXTURE_UNITS, limit)
output.write("Maximum Fixed Function Texture Units:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_ELEMENTS_VERTICES, limit)
output.write("Maximum DrawElements Vertices:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_ELEMENTS_INDICES, limit)
output.write("Maximum DrawElements Indices:\t%d\n" % limit[0])
openvdb = bpy.app.openvdb
output.write("OpenVDB: ")
if openvdb.supported:
output.write("%s\n" % openvdb.version_string)
else:
output.write("Blender was built without OpenVDB support\n")
output.write("\nGLSL:\n")
bgl.glGetIntegerv(bgl.GL_MAX_VARYING_FLOATS, limit)
output.write("Maximum Varying Floats:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_VERTEX_ATTRIBS, limit)
output.write("Maximum Vertex Attributes:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_VERTEX_UNIFORM_COMPONENTS, limit)
output.write("Maximum Vertex Uniform Components:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_FRAGMENT_UNIFORM_COMPONENTS, limit)
output.write("Maximum Fragment Uniform Components:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, limit)
output.write("Maximum Vertex Image Units:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_TEXTURE_IMAGE_UNITS, limit)
output.write("Maximum Fragment Image Units:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, limit)
output.write("Maximum Pipeline Image Units:\t%d\n" % limit[0])
alembic = bpy.app.alembic
output.write("Alembic: ")
if alembic.supported:
output.write("%s\n" % alembic.version_string)
else:
output.write("Blender was built without Alembic support\n")
if bpy.app.build_options.cycles:
import cycles
output.write(title("Cycles"))
output.write(cycles.engine.system_info())
if not bpy.app.build_options.sdl:
output.write("SDL: Blender was built without SDL support\n")
output.close()
if bpy.app.background:
output.write("\nOpenGL: missing, background mode\n")
else:
output.write(title("OpenGL"))
version = bgl.glGetString(bgl.GL_RENDERER)
output.write("renderer:\t%r\n" % version)
output.write("vendor:\t\t%r\n" % (bgl.glGetString(bgl.GL_VENDOR)))
output.write("version:\t%r\n" % (bgl.glGetString(bgl.GL_VERSION)))
output.write("extensions:\n")
glext = sorted(bgl.glGetString(bgl.GL_EXTENSIONS).split())
for l in glext:
output.write("\t%s\n" % l)
output.write(title("Implementation Dependent OpenGL Limits"))
limit = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGetIntegerv(bgl.GL_MAX_TEXTURE_UNITS, limit)
output.write("Maximum Fixed Function Texture Units:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_ELEMENTS_VERTICES, limit)
output.write("Maximum DrawElements Vertices:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_ELEMENTS_INDICES, limit)
output.write("Maximum DrawElements Indices:\t%d\n" % limit[0])
output.write("\nGLSL:\n")
bgl.glGetIntegerv(bgl.GL_MAX_VARYING_FLOATS, limit)
output.write("Maximum Varying Floats:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_VERTEX_ATTRIBS, limit)
output.write("Maximum Vertex Attributes:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_VERTEX_UNIFORM_COMPONENTS, limit)
output.write("Maximum Vertex Uniform Components:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_FRAGMENT_UNIFORM_COMPONENTS, limit)
output.write("Maximum Fragment Uniform Components:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, limit)
output.write("Maximum Vertex Image Units:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_TEXTURE_IMAGE_UNITS, limit)
output.write("Maximum Fragment Image Units:\t%d\n" % limit[0])
bgl.glGetIntegerv(bgl.GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, limit)
output.write("Maximum Pipeline Image Units:\t%d\n" % limit[0])
if bpy.app.build_options.cycles:
import cycles
output.write(title("Cycles"))
output.write(cycles.engine.system_info())
import addon_utils
addon_utils.modules()
output.write(title("Enabled add-ons"))
for addon in bpy.context.user_preferences.addons.keys():
addon_mod = addon_utils.addons_fake_modules.get(addon, None)
if addon_mod is None:
output.write("%s (MISSING)\n" % (addon))
else:
output.write("%s (version: %s, path: %s)\n" %
(addon, addon_mod.bl_info.get('version', "UNKNOWN"), addon_mod.__file__))
except Exception as e:
output.write("ERROR: %s\n" % e)

View File

@ -336,6 +336,9 @@ class DATA_PT_modifiers(ModifierButtonsPanel, Panel):
col = split.column(align=True)
col.label(text="Direction:")
col.prop(md, "direction", text="")
if md.direction in {'X', 'Y', 'Z', 'RGB_TO_XYZ'}:
col.label(text="Space:")
col.prop(md, "space", text="")
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")

View File

@ -52,6 +52,12 @@ def gpencil_stroke_placement_settings(context, layout):
row.active = getattr(ts, propname) in {'SURFACE', 'STROKE'}
row.prop(ts, "use_gpencil_stroke_endpoints")
if context.scene.tool_settings.gpencil_stroke_placement_view3d == 'CURSOR':
row = col.row(align=True)
row.label("Lock axis:")
row = col.row(align=True)
row.prop(ts.gpencil_sculpt, "lockaxis", expand=True)
def gpencil_active_brush_settings_simple(context, layout):
brush = context.active_gpencil_brush

View File

@ -3161,8 +3161,10 @@ class VIEW3D_PT_view3d_display(Panel):
row.prop(view, "show_axis_z", text="Z", toggle=True)
sub = col.column(align=True)
sub.active = (display_all and view.show_floor)
sub.prop(view, "grid_lines", text="Lines")
sub.active = bool(view.show_floor or view.region_quadviews or not view.region_3d.is_perspective)
subsub = sub.column(align=True)
subsub.active = view.show_floor
subsub.prop(view, "grid_lines", text="Lines")
sub.prop(view, "grid_scale", text="Scale")
subsub = sub.column(align=True)
subsub.active = scene.unit_settings.system == 'NONE'

View File

@ -49,6 +49,7 @@
#include "BKE_context.h"
#include "BKE_main.h"
#include "BKE_screen.h"
#include "BKE_sound.h"
#include "RNA_access.h"
@ -882,6 +883,7 @@ Main *CTX_data_main(const bContext *C)
void CTX_data_main_set(bContext *C, Main *bmain)
{
C->data.main = bmain;
BKE_sound_init_main(bmain);
}
Scene *CTX_data_scene(const bContext *C)

View File

@ -2389,7 +2389,7 @@ Mesh *BKE_mesh_new_from_object(
/* are we an object material or data based? */
tmpmesh->mat[i] = give_current_material(ob, i + 1);
if (((ob->matbits[i] && ob->matbits) || do_mat_id_data_us) && tmpmesh->mat[i]) {
if (((ob->matbits && ob->matbits[i]) || do_mat_id_data_us) && tmpmesh->mat[i]) {
id_us_plus(&tmpmesh->mat[i]->id);
}
}

View File

@ -110,8 +110,18 @@ void TextureBaseOperation::executePixelSampled(float output[4], float x, float y
int retval;
const float cx = this->getWidth() / 2;
const float cy = this->getHeight() / 2;
const float u = (x - cx) / this->getWidth() * 2;
const float v = (y - cy) / this->getHeight() * 2;
float u = (x - cx) / this->getWidth() * 2;
float v = (y - cy) / this->getHeight() * 2;
/* When no interpolation/filtering happens in multitex() foce nearest interpolation.
* We do it here because (a) we can't easily say multitex() that we want nearest
* interpolaiton and (b) in such configuration multitex() sinply floor's the value
* which often produces artifacts.
*/
if ((m_texture->imaflag & TEX_INTERPOL) == 0) {
u += 0.5f / cx;
v += 0.5f / cy;
}
this->m_inputSize->readSampled(textureSize, x, y, sampler);
this->m_inputOffset->readSampled(textureOffset, x, y, sampler);

View File

@ -40,6 +40,7 @@
#include "BLI_math.h"
#include "BLI_utildefines.h"
#include "BLI_rand.h"
#include "BLI_math_geom.h"
#include "BLT_translation.h"
@ -161,6 +162,7 @@ typedef struct tGPsdata {
bGPDpalettecolor *palettecolor; /* current palette color */
bGPDbrush *brush; /* current drawing brush */
short straight[2]; /* 1: line horizontal, 2: line vertical, other: not defined, second element position */
int lock_axis; /* lock drawing to one axis */
} tGPsdata;
/* ------ */
@ -279,6 +281,64 @@ static bool gp_stroke_filtermval(tGPsdata *p, const int mval[2], int pmval[2])
return false;
}
/* reproject the points of the stroke to a plane locked to axis to avoid stroke offset */
static void gp_project_points_to_plane(RegionView3D *rv3d, bGPDstroke *gps, const float origin[3], const int axis)
{
float plane_normal[3];
float vn[3];
float ray[3];
float rpoint[3];
/* normal vector for a plane locked to axis */
zero_v3(plane_normal);
plane_normal[axis] = 1.0f;
/* Reproject the points in the plane */
for (int i = 0; i < gps->totpoints; i++) {
bGPDspoint *pt = &gps->points[i];
/* get a vector from the point with the current view direction of the viewport */
ED_view3d_global_to_vector(rv3d, &pt->x, vn);
/* calculate line extrem point to create a ray that cross the plane */
mul_v3_fl(vn, -50.0f);
add_v3_v3v3(ray, &pt->x, vn);
/* if the line never intersect, the point is not changed */
if (isect_line_plane_v3(rpoint, &pt->x, ray, origin, plane_normal)) {
copy_v3_v3(&pt->x, rpoint);
}
}
}
/* reproject stroke to plane locked to axis in 3d cursor location */
static void gp_reproject_toplane(tGPsdata *p, bGPDstroke *gps)
{
bGPdata *gpd = p->gpd;
float origin[3];
float cursor[3];
RegionView3D *rv3d = p->ar->regiondata;
/* verify the stroke mode is CURSOR 3d space mode */
if ((gpd->sbuffer_sflag & GP_STROKE_3DSPACE) == 0) {
return;
}
if ((*p->align_flag & GP_PROJECT_VIEWSPACE) == 0) {
return;
}
if ((*p->align_flag & GP_PROJECT_DEPTH_VIEW) || (*p->align_flag & GP_PROJECT_DEPTH_STROKE)) {
return;
}
/* get 3d cursor and set origin for locked axis only. Uses axis-1 because the enum for XYZ start with 1 */
gp_get_3d_reference(p, cursor);
zero_v3(origin);
origin[p->lock_axis - 1] = cursor[p->lock_axis - 1];
gp_project_points_to_plane(rv3d, gps, origin, p->lock_axis - 1);
}
/* convert screen-coordinates to buffer-coordinates */
/* XXX this method needs a total overhaul! */
static void gp_stroke_convertcoords(tGPsdata *p, const int mval[2], float out[3], float *depth)
@ -582,6 +642,10 @@ static short gp_stroke_addpoint(tGPsdata *p, const int mval[2], float pressure,
/* convert screen-coordinates to appropriate coordinates (and store them) */
gp_stroke_convertcoords(p, &pt->x, &pts->x, NULL);
/* if axis locked, reproject to plane locked (only in 3d space) */
if (p->lock_axis > GP_LOCKAXIS_NONE) {
gp_reproject_toplane(p, gps);
}
/* if parented change position relative to parent object */
if (gpl->parent != NULL) {
gp_apply_parent_point(gpl, pts);
@ -680,7 +744,6 @@ static void gp_stroke_simplify(tGPsdata *p)
MEM_freeN(old_points);
}
/* make a new stroke from the buffer data */
static void gp_stroke_newfrombuffer(tGPsdata *p)
{
@ -757,6 +820,10 @@ static void gp_stroke_newfrombuffer(tGPsdata *p)
/* convert screen-coordinates to appropriate coordinates (and store them) */
gp_stroke_convertcoords(p, &ptc->x, &pt->x, NULL);
/* if axis locked, reproject to plane locked (only in 3d space) */
if (p->lock_axis > GP_LOCKAXIS_NONE) {
gp_reproject_toplane(p, gps);
}
/* if parented change position relative to parent object */
if (gpl->parent != NULL) {
gp_apply_parent_point(gpl, pt);
@ -776,6 +843,10 @@ static void gp_stroke_newfrombuffer(tGPsdata *p)
/* convert screen-coordinates to appropriate coordinates (and store them) */
gp_stroke_convertcoords(p, &ptc->x, &pt->x, NULL);
/* if axis locked, reproject to plane locked (only in 3d space) */
if (p->lock_axis > GP_LOCKAXIS_NONE) {
gp_reproject_toplane(p, gps);
}
/* if parented change position relative to parent object */
if (gpl->parent != NULL) {
gp_apply_parent_point(gpl, pt);
@ -794,6 +865,10 @@ static void gp_stroke_newfrombuffer(tGPsdata *p)
/* convert screen-coordinates to appropriate coordinates (and store them) */
gp_stroke_convertcoords(p, &ptc->x, &pt->x, NULL);
/* if axis locked, reproject to plane locked (only in 3d space) */
if (p->lock_axis > GP_LOCKAXIS_NONE) {
gp_reproject_toplane(p, gps);
}
/* if parented change position relative to parent object */
if (gpl->parent != NULL) {
gp_apply_parent_point(gpl, pt);
@ -806,30 +881,30 @@ static void gp_stroke_newfrombuffer(tGPsdata *p)
}
else {
float *depth_arr = NULL;
/* get an array of depths, far depths are blended */
if (gpencil_project_check(p)) {
int mval[2], mval_prev[2] = {0};
int mval[2], mval_prev[2] = { 0 };
int interp_depth = 0;
int found_depth = 0;
depth_arr = MEM_mallocN(sizeof(float) * gpd->sbuffer_size, "depth_points");
for (i = 0, ptc = gpd->sbuffer; i < gpd->sbuffer_size; i++, ptc++, pt++) {
copy_v2_v2_int(mval, &ptc->x);
if ((ED_view3d_autodist_depth(p->ar, mval, depth_margin, depth_arr + i) == 0) &&
(i && (ED_view3d_autodist_depth_seg(p->ar, mval, mval_prev, depth_margin + 1, depth_arr + i) == 0)))
(i && (ED_view3d_autodist_depth_seg(p->ar, mval, mval_prev, depth_margin + 1, depth_arr + i) == 0)))
{
interp_depth = true;
}
else {
found_depth = true;
}
copy_v2_v2_int(mval_prev, mval);
}
if (found_depth == false) {
/* eeh... not much we can do.. :/, ignore depth in this case, use the 3D cursor */
for (i = gpd->sbuffer_size - 1; i >= 0; i--)
@ -840,54 +915,54 @@ static void gp_stroke_newfrombuffer(tGPsdata *p)
/* remove all info between the valid endpoints */
int first_valid = 0;
int last_valid = 0;
for (i = 0; i < gpd->sbuffer_size; i++) {
if (depth_arr[i] != FLT_MAX)
break;
}
first_valid = i;
for (i = gpd->sbuffer_size - 1; i >= 0; i--) {
if (depth_arr[i] != FLT_MAX)
break;
}
last_valid = i;
/* invalidate non-endpoints, so only blend between first and last */
for (i = first_valid + 1; i < last_valid; i++)
depth_arr[i] = FLT_MAX;
interp_depth = true;
}
if (interp_depth) {
interp_sparse_array(depth_arr, gpd->sbuffer_size, FLT_MAX);
}
}
}
pt = gps->points;
/* convert all points (normal behavior) */
for (i = 0, ptc = gpd->sbuffer; i < gpd->sbuffer_size && ptc; i++, ptc++, pt++) {
/* convert screen-coordinates to appropriate coordinates (and store them) */
gp_stroke_convertcoords(p, &ptc->x, &pt->x, depth_arr ? depth_arr + i : NULL);
/* copy pressure and time */
pt->pressure = ptc->pressure;
pt->strength = ptc->strength;
CLAMP(pt->strength, GPENCIL_STRENGTH_MIN, 1.0f);
pt->time = ptc->time;
}
/* subdivide the stroke */
if (sublevel > 0) {
int totpoints = gps->totpoints;
for (i = 0; i < sublevel; i++) {
/* we're adding one new point between each pair of verts on each step */
totpoints += totpoints - 1;
gp_subdivide_stroke(gps, totpoints);
}
}
@ -896,8 +971,8 @@ static void gp_stroke_newfrombuffer(tGPsdata *p)
gp_randomize_stroke(gps, brush);
}
/* smooth stroke after subdiv - only if there's something to do
* for each iteration, the factor is reduced to get a better smoothing without changing too much
/* smooth stroke after subdiv - only if there's something to do
* for each iteration, the factor is reduced to get a better smoothing without changing too much
* the original stroke
*/
if (brush->draw_smoothfac > 0.0f) {
@ -910,6 +985,11 @@ static void gp_stroke_newfrombuffer(tGPsdata *p)
reduce += 0.25f; // reduce the factor
}
}
/* if axis locked, reproject to plane locked (only in 3d space) */
if (p->lock_axis > GP_LOCKAXIS_NONE) {
gp_reproject_toplane(p, gps);
}
/* if parented change position relative to parent object */
if (gpl->parent != NULL) {
gp_apply_parent(gpl, gps);
@ -1469,6 +1549,8 @@ static bool gp_session_initdata(bContext *C, tGPsdata *p)
copy_v4_v4(pdata->scolor, palcolor->color);
copy_v4_v4(pdata->sfill, palcolor->fill);
pdata->sflag = palcolor->flag;
/* lock axis */
p->lock_axis = ts->gp_sculpt.lock_axis;
return 1;
}

View File

@ -1574,7 +1574,7 @@ static int make_links_data_exec(bContext *C, wmOperator *op)
Material *ma = give_current_material(ob_src, a + 1);
assign_material(ob_dst, ma, a + 1, BKE_MAT_ASSIGN_USERPREF); /* also works with ma==NULL */
}
DAG_id_tag_update(&ob_dst->id, 0);
DAG_id_tag_update(&ob_dst->id, OB_RECALC_DATA);
break;
case MAKE_LINKS_ANIMDATA:
BKE_animdata_copy_id((ID *)ob_dst, (ID *)ob_src, false);

View File

@ -2086,7 +2086,13 @@ void ED_screen_animation_timer(bContext *C, int redraws, int refresh, int sync,
sad->flag |= (sync == 0) ? ANIMPLAY_FLAG_NO_SYNC : (sync == 1) ? ANIMPLAY_FLAG_SYNC : 0;
ScrArea *sa = CTX_wm_area(C);
sad->from_anim_edit = (ELEM(sa->spacetype, SPACE_IPO, SPACE_ACTION, SPACE_NLA, SPACE_TIME));
char spacetype = -1;
if (sa)
spacetype = sa->spacetype;
sad->from_anim_edit = (ELEM(spacetype, SPACE_IPO, SPACE_ACTION, SPACE_NLA, SPACE_TIME));
screen->animtimer->customdata = sad;

View File

@ -4197,7 +4197,7 @@ static int space_context_cycle_poll(bContext *C)
/**
* Helper to get the correct RNA pointer/property pair for changing
* the display context of active space type in \sa.
* the display context of active space type in \a sa.
*/
static void context_cycle_prop_get(
bScreen *screen, const ScrArea *sa,
@ -4214,6 +4214,9 @@ static void context_cycle_prop_get(
RNA_pointer_create(NULL, &RNA_UserPreferences, &U, r_ptr);
propname = "active_section";
break;
default:
BLI_assert(0);
propname = "";
}
*r_prop = RNA_struct_find_property(r_ptr, propname);

View File

@ -383,7 +383,7 @@ typedef struct DisplaceModifierData {
int direction;
char defgrp_name[64]; /* MAX_VGROUP_NAME */
float midlevel;
int pad;
int space;
} DisplaceModifierData;
/* DisplaceModifierData->direction */
@ -404,6 +404,12 @@ enum {
MOD_DISP_MAP_UV = 3,
};
/* DisplaceModifierData->space */
enum {
MOD_DISP_SPACE_LOCAL = 0,
MOD_DISP_SPACE_GLOBAL = 1,
};
typedef struct UVProjectModifierData {
ModifierData modifier;

View File

@ -1127,6 +1127,14 @@ typedef enum eGP_EditBrush_Types {
TOT_GP_EDITBRUSH_TYPES
} eGP_EditBrush_Types;
/* Lock axis options */
typedef enum eGP_Lockaxis_Types {
GP_LOCKAXIS_NONE = 0,
GP_LOCKAXIS_X = 1,
GP_LOCKAXIS_Y = 2,
GP_LOCKAXIS_Z = 3
} eGP_Lockaxis_Types;
/* Settings for a GPencil Stroke Sculpting Brush */
typedef struct GP_EditBrush_Data {
short size; /* radius of brush */
@ -1157,7 +1165,7 @@ typedef struct GP_BrushEdit_Settings {
int brushtype; /* eGP_EditBrush_Types */
int flag; /* eGP_BrushEdit_SettingsFlag */
char pad[4];
int lock_axis; /* lock drawing to one axis */
float alpha; /* alpha factor for selection color */
} GP_BrushEdit_Settings;

View File

@ -2080,6 +2080,12 @@ static void rna_def_modifier_displace(BlenderRNA *brna)
{0, NULL, 0, NULL, NULL}
};
static EnumPropertyItem prop_space_items[] = {
{MOD_DISP_SPACE_LOCAL, "LOCAL", 0, "Local", "Direction is defined in local coordinates"},
{MOD_DISP_SPACE_GLOBAL, "GLOBAL", 0, "Global", "Direction is defined in global coordinates"},
{0, NULL, 0, NULL, NULL}
};
srna = RNA_def_struct(brna, "DisplaceModifier", "Modifier");
RNA_def_struct_ui_text(srna, "Displace Modifier", "Displacement modifier");
RNA_def_struct_sdna(srna, "DisplaceModifierData");
@ -2110,6 +2116,11 @@ static void rna_def_modifier_displace(BlenderRNA *brna)
RNA_def_property_ui_text(prop, "Direction", "");
RNA_def_property_update(prop, 0, "rna_Modifier_update");
prop = RNA_def_property(srna, "space", PROP_ENUM, PROP_NONE);
RNA_def_property_enum_items(prop, prop_space_items);
RNA_def_property_ui_text(prop, "Space", "");
RNA_def_property_update(prop, 0, "rna_Modifier_dependency_update");
rna_def_modifier_generic_map_info(srna);
}

View File

@ -64,6 +64,14 @@ EnumPropertyItem rna_enum_gpencil_sculpt_brush_items[] = {
{ 0, NULL, 0, NULL, NULL }
};
EnumPropertyItem rna_enum_gpencil_lockaxis_items[] = {
{ GP_LOCKAXIS_NONE, "GP_LOCKAXIS_NONE", 0, "None", "" },
{ GP_LOCKAXIS_X, "GP_LOCKAXIS_X", 0, "X", "Project strokes to plane locked to X" },
{ GP_LOCKAXIS_Y, "GP_LOCKAXIS_Y", 0, "Y", "Project strokes to plane locked to Y" },
{ GP_LOCKAXIS_Z, "GP_LOCKAXIS_Z", 0, "Z", "Project strokes to plane locked to Z" },
{ 0, NULL, 0, NULL, NULL }
};
EnumPropertyItem rna_enum_symmetrize_direction_items[] = {
{BMO_SYMMETRIZE_NEGATIVE_X, "NEGATIVE_X", 0, "-X to +X", ""},
{BMO_SYMMETRIZE_POSITIVE_X, "POSITIVE_X", 0, "+X to -X", ""},
@ -734,6 +742,13 @@ static void rna_def_gpencil_sculpt(BlenderRNA *brna)
RNA_def_property_ui_text(prop, "Alpha", "Alpha value for selected vertices");
RNA_def_property_update(prop, NC_SCENE | ND_TOOLSETTINGS, "rna_GPencil_update");
/* lock axis */
prop = RNA_def_property(srna, "lockaxis", PROP_ENUM, PROP_NONE);
RNA_def_property_enum_sdna(prop, NULL, "lock_axis");
RNA_def_property_enum_items(prop, rna_enum_gpencil_lockaxis_items);
RNA_def_property_ui_text(prop, "Lock", "");
RNA_def_property_update(prop, NC_SCENE | ND_TOOLSETTINGS, NULL);
/* brush */
srna = RNA_def_struct(brna, "GPencilSculptBrush", NULL);
RNA_def_struct_sdna(srna, "GP_EditBrush_Data");

View File

@ -46,6 +46,7 @@
#include "BKE_modifier.h"
#include "BKE_texture.h"
#include "BKE_deform.h"
#include "BKE_object.h"
#include "depsgraph_private.h"
#include "MEM_guardedalloc.h"
@ -65,6 +66,7 @@ static void initData(ModifierData *md)
dmd->strength = 1;
dmd->direction = MOD_DISP_DIR_NOR;
dmd->midlevel = 0.5;
dmd->space = MOD_DISP_SPACE_LOCAL;
}
static void copyData(ModifierData *md, ModifierData *target)
@ -171,10 +173,13 @@ static void updateDepgraph(ModifierData *md, DagForest *forest,
}
if (dmd->texmapping == MOD_DISP_MAP_GLOBAL)
if (dmd->texmapping == MOD_DISP_MAP_GLOBAL ||
(ELEM(dmd->direction, MOD_DISP_DIR_X, MOD_DISP_DIR_Y, MOD_DISP_DIR_Z, MOD_DISP_DIR_RGB_XYZ) &&
dmd->space == MOD_DISP_SPACE_GLOBAL))
{
dag_add_relation(forest, obNode, obNode,
DAG_RL_DATA_DATA | DAG_RL_OB_DATA, "Displace Modifier");
}
}
static void updateDepsgraph(ModifierData *md,
@ -187,7 +192,10 @@ static void updateDepsgraph(ModifierData *md,
if (dmd->map_object != NULL && dmd->texmapping == MOD_DISP_MAP_OBJECT) {
DEG_add_object_relation(node, dmd->map_object, DEG_OB_COMP_TRANSFORM, "Displace Modifier");
}
if (dmd->texmapping == MOD_DISP_MAP_GLOBAL) {
if (dmd->texmapping == MOD_DISP_MAP_GLOBAL ||
(ELEM(dmd->direction, MOD_DISP_DIR_X, MOD_DISP_DIR_Y, MOD_DISP_DIR_Z, MOD_DISP_DIR_RGB_XYZ) &&
dmd->space == MOD_DISP_SPACE_GLOBAL))
{
DEG_add_object_relation(node, ob, DEG_OB_COMP_TRANSFORM, "Displace Modifier");
}
}
@ -206,6 +214,8 @@ static void displaceModifier_do(
float weight = 1.0f; /* init value unused but some compilers may complain */
const float delta_fixed = 1.0f - dmd->midlevel; /* when no texture is used, we fallback to white */
float (*vert_clnors)[3] = NULL;
float local_mat[4][4];
const bool use_global_direction = dmd->space == MOD_DISP_SPACE_GLOBAL;
if (!dmd->texture && dmd->direction == MOD_DISP_DIR_RGB_XYZ) return;
if (dmd->strength == 0.0f) return;
@ -243,11 +253,17 @@ static void displaceModifier_do(
direction = MOD_DISP_DIR_NOR;
}
}
else if (ELEM(direction, MOD_DISP_DIR_X, MOD_DISP_DIR_Y, MOD_DISP_DIR_Z, MOD_DISP_DIR_RGB_XYZ) &&
use_global_direction)
{
copy_m4_m4(local_mat, ob->obmat);
}
for (i = 0; i < numVerts; i++) {
TexResult texres;
float strength = dmd->strength;
float delta;
float local_vec[3];
if (dvert) {
weight = defvert_find_weight(dvert + i, defgrp_index);
@ -270,18 +286,44 @@ static void displaceModifier_do(
switch (direction) {
case MOD_DISP_DIR_X:
vertexCos[i][0] += delta;
if (use_global_direction) {
vertexCos[i][0] += delta * local_mat[0][0];
vertexCos[i][1] += delta * local_mat[1][0];
vertexCos[i][2] += delta * local_mat[2][0];
}
else {
vertexCos[i][0] += delta;
}
break;
case MOD_DISP_DIR_Y:
vertexCos[i][1] += delta;
if (use_global_direction) {
vertexCos[i][0] += delta * local_mat[0][1];
vertexCos[i][1] += delta * local_mat[1][1];
vertexCos[i][2] += delta * local_mat[2][1];
}
else {
vertexCos[i][1] += delta;
}
break;
case MOD_DISP_DIR_Z:
vertexCos[i][2] += delta;
if (use_global_direction) {
vertexCos[i][0] += delta * local_mat[0][2];
vertexCos[i][1] += delta * local_mat[1][2];
vertexCos[i][2] += delta * local_mat[2][2];
}
else {
vertexCos[i][2] += delta;
}
break;
case MOD_DISP_DIR_RGB_XYZ:
vertexCos[i][0] += (texres.tr - dmd->midlevel) * strength;
vertexCos[i][1] += (texres.tg - dmd->midlevel) * strength;
vertexCos[i][2] += (texres.tb - dmd->midlevel) * strength;
local_vec[0] = texres.tr - dmd->midlevel;
local_vec[1] = texres.tg - dmd->midlevel;
local_vec[2] = texres.tb - dmd->midlevel;
if (use_global_direction) {
mul_transposed_mat3_m4_v3(local_mat, local_vec);
}
mul_v3_fl(local_vec, strength);
add_v3_v3(vertexCos[i], local_vec);
break;
case MOD_DISP_DIR_NOR:
vertexCos[i][0] += delta * (mvert[i].no[0] / 32767.0f);