Cycles: support arbitrary number of motion blur steps for objects.

This commit is contained in:
Brecht Van Lommel 2018-03-08 04:04:52 +01:00
parent 78c2063685
commit db333d9ea4
Notes: blender-bot 2023-02-14 05:26:49 +01:00
Referenced by issue #56396, Cycles - Non-Deformation Motion Blur is inconsistant
14 changed files with 188 additions and 140 deletions

View File

@ -1088,7 +1088,7 @@ class CyclesObjectSettings(bpy.types.PropertyGroup):
cls.motion_steps = IntProperty(
name="Motion Steps",
description="Control accuracy of deformation motion blur, more steps gives more memory usage (actual number of steps is 2^(steps - 1))",
description="Control accuracy of motion blur, more steps gives more memory usage (actual number of steps is 2^(steps - 1))",
min=1, soft_max=8,
default=1,
)

View File

@ -229,9 +229,7 @@ static void blender_camera_from_object(BlenderCamera *bcam,
else
bcam->sensor_fit = BlenderCamera::VERTICAL;
if(object_use_motion(b_ob, b_ob)) {
bcam->motion_steps = object_motion_steps(b_ob);
}
bcam->motion_steps = object_motion_steps(b_ob, b_ob);
}
else {
/* from lamp not implemented yet */

View File

@ -327,22 +327,11 @@ Object *BlenderSync::sync_object(BL::Object& b_parent,
if(motion) {
object = object_map.find(key);
if(object && (scene->need_motion() == Scene::MOTION_PASS ||
object_use_motion(b_parent, b_ob)))
{
/* object transformation */
if(tfm != object->tfm) {
VLOG(1) << "Object " << b_ob.name() << " motion detected.";
if(motion_time == -1.0f || motion_time == 1.0f) {
object->use_motion = true;
}
}
if(motion_time == -1.0f) {
object->motion.pre = tfm;
}
else if(motion_time == 1.0f) {
object->motion.post = tfm;
if(object && object->use_motion()) {
/* Set transform at matching motion time step. */
int time_index = object->motion_step(motion_time);
if(time_index >= 0) {
object->motion[time_index] = tfm;
}
/* mesh deformation */
@ -389,24 +378,34 @@ Object *BlenderSync::sync_object(BL::Object& b_parent,
object->name = b_ob.name().c_str();
object->pass_id = b_ob.pass_index();
object->tfm = tfm;
object->motion.pre = transform_empty();
object->motion.post = transform_empty();
object->use_motion = false;
object->motion.clear();
/* motion blur */
if(scene->need_motion() == Scene::MOTION_BLUR && object->mesh) {
Scene::MotionType need_motion = scene->need_motion();
if(need_motion != Scene::MOTION_NONE && object->mesh) {
Mesh *mesh = object->mesh;
mesh->use_motion_blur = false;
mesh->motion_steps = 0;
if(object_use_motion(b_parent, b_ob)) {
uint motion_steps;
if(scene->need_motion() == Scene::MOTION_BLUR) {
motion_steps = object_motion_steps(b_parent, b_ob);
if(object_use_deform_motion(b_parent, b_ob)) {
mesh->motion_steps = object_motion_steps(b_ob);
mesh->motion_steps = motion_steps;
mesh->use_motion_blur = true;
}
}
else {
motion_steps = 3;
mesh->motion_steps = motion_steps;
}
for(size_t step = 0; step < mesh->motion_steps - 1; step++) {
motion_times.insert(mesh->motion_time(step));
}
object->motion.resize(motion_steps, transform_empty());
object->motion[motion_steps/2] = tfm;
for(size_t step = 0; step < motion_steps; step++) {
motion_times.insert(object->motion_time(step));
}
}

View File

@ -484,33 +484,34 @@ static inline void mesh_texture_space(BL::Mesh& b_mesh,
loc = loc*size - make_float3(0.5f, 0.5f, 0.5f);
}
/* object used for motion blur */
static inline bool object_use_motion(BL::Object& b_parent, BL::Object& b_ob)
/* Object motion steps, returns 0 if no motion blur needed. */
static inline uint object_motion_steps(BL::Object& b_parent, BL::Object& b_ob)
{
/* Get motion enabled and steps from object itself. */
PointerRNA cobject = RNA_pointer_get(&b_ob.ptr, "cycles");
bool use_motion = get_boolean(cobject, "use_motion_blur");
/* If motion blur is enabled for the object we also check
* whether it's enabled for the parent object as well.
*
* This way we can control motion blur from the dupligroup
* duplicator much easier.
*/
if(use_motion && b_parent.ptr.data != b_ob.ptr.data) {
if(!use_motion) {
return 0;
}
uint steps = max(1, get_int(cobject, "motion_steps"));
/* Also check parent object, so motion blur and steps can be
* controlled by dupligroup duplicator for linked groups. */
if(b_parent.ptr.data != b_ob.ptr.data) {
PointerRNA parent_cobject = RNA_pointer_get(&b_parent.ptr, "cycles");
use_motion &= get_boolean(parent_cobject, "use_motion_blur");
if(!use_motion) {
return 0;
}
steps = max(steps, get_int(parent_cobject, "motion_steps"));
}
return use_motion;
}
/* object motion steps */
static inline uint object_motion_steps(BL::Object& b_ob)
{
PointerRNA cobject = RNA_pointer_get(&b_ob.ptr, "cycles");
uint steps = get_int(cobject, "motion_steps");
/* use uneven number of steps so we get one keyframe at the current frame,
* and ue 2^(steps - 1) so objects with more/fewer steps still have samples
* at the same times, to avoid sampling at many different times */
/* Use uneven number of steps so we get one keyframe at the current frame,
* and use 2^(steps - 1) so objects with more/fewer steps still have samples
* at the same times, to avoid sampling at many different times. */
return (2 << (steps - 1)) + 1;
}

View File

@ -40,18 +40,12 @@ enum ObjectVectorTransform {
ccl_device_inline Transform object_fetch_transform(KernelGlobals *kg, int object, enum ObjectTransform type)
{
Transform tfm;
if(type == OBJECT_INVERSE_TRANSFORM) {
tfm.x = kernel_tex_fetch(__objects, object).tfm.mid.x;
tfm.y = kernel_tex_fetch(__objects, object).tfm.mid.y;
tfm.z = kernel_tex_fetch(__objects, object).tfm.mid.z;
return kernel_tex_fetch(__objects, object).itfm;
}
else {
tfm.x = kernel_tex_fetch(__objects, object).tfm.pre.x;
tfm.y = kernel_tex_fetch(__objects, object).tfm.pre.y;
tfm.z = kernel_tex_fetch(__objects, object).tfm.pre.z;
return kernel_tex_fetch(__objects, object).tfm;
}
return tfm;
}
/* Lamp to world space transformation */
@ -79,10 +73,12 @@ ccl_device_inline Transform object_fetch_motion_pass_transform(KernelGlobals *kg
#ifdef __OBJECT_MOTION__
ccl_device_inline Transform object_fetch_transform_motion(KernelGlobals *kg, int object, float time)
{
const ccl_global DecomposedMotionTransform *motion = &kernel_tex_fetch(__objects, object).tfm;
const uint motion_offset = kernel_tex_fetch(__objects, object).motion_offset;
const ccl_global DecomposedTransform *motion = &kernel_tex_fetch(__object_motion, motion_offset);
const uint num_steps = kernel_tex_fetch(__objects, object).numsteps * 2 + 1;
Transform tfm;
transform_motion_array_interpolate(&tfm, &motion->pre, 3, time);
transform_motion_array_interpolate(&tfm, motion, num_steps, time);
return tfm;
}

View File

@ -792,7 +792,8 @@ ccl_device_inline bool triangle_world_space_vertices(KernelGlobals *kg, int obje
#ifdef __INSTANCING__
if(!(object_flag & SD_OBJECT_TRANSFORM_APPLIED)) {
# ifdef __OBJECT_MOTION__
Transform tfm = object_fetch_transform_motion_test(kg, object, time, NULL);
float object_time = (time >= 0.0f) ? time : 0.5f;
Transform tfm = object_fetch_transform_motion_test(kg, object, object_time, NULL);
# else
Transform tfm = object_fetch_transform(kg, object, OBJECT_TRANSFORM);
# endif

View File

@ -33,6 +33,7 @@ KERNEL_TEX(float2, __prim_time)
/* objects */
KERNEL_TEX(KernelObject, __objects)
KERNEL_TEX(Transform, __object_motion_pass)
KERNEL_TEX(DecomposedTransform, __object_motion)
KERNEL_TEX(uint, __object_flag)
/* cameras */

View File

@ -1433,7 +1433,8 @@ static_assert_align(KernelData, 16);
/* Kernel data structures. */
typedef struct KernelObject {
DecomposedMotionTransform tfm;
Transform tfm;
Transform itfm;
float surface_area;
float pass_id;
@ -1449,7 +1450,8 @@ typedef struct KernelObject {
uint patch_map_offset;
uint attribute_map_offset;
uint pad1, pad2;
uint motion_offset;
uint pad;
} KernelObject;;
static_assert_align(KernelObject, 16);

View File

@ -54,6 +54,9 @@ struct UpdateObjectTransformState {
*/
map<Mesh*, float> surface_area_map;
/* Motion offsets for each object. */
array<uint> motion_offset;
/* Packed object arrays. Those will be filled in. */
uint *object_flag;
KernelObject *objects;
@ -91,6 +94,7 @@ NODE_DEFINE(Object)
SOCKET_BOOLEAN(hide_on_missing_motion, "Hide on Missing Motion", false);
SOCKET_POINT(dupli_generated, "Dupli Generated", make_float3(0.0f, 0.0f, 0.0f));
SOCKET_POINT2(dupli_uv, "Dupli UV", make_float2(0.0f, 0.0f));
SOCKET_TRANSFORM_ARRAY(motion, "Motion", array<Transform>());
SOCKET_BOOLEAN(is_shadow_catcher, "Shadow Catcher", false);
@ -103,46 +107,54 @@ Object::Object()
particle_system = NULL;
particle_index = 0;
bounds = BoundBox::empty;
motion.pre = transform_empty();
motion.mid = transform_empty();
motion.post = transform_empty();
use_motion = false;
}
Object::~Object()
{
}
void Object::update_motion()
{
if(!use_motion()) {
return;
}
bool have_motion = false;
for(size_t i = 0; i < motion.size(); i++) {
if(motion[i] == transform_empty()) {
if(hide_on_missing_motion) {
/* Hide objects that have no valid previous or next
* transform, for example particle that stop existing. It
* would be better to handle this in the kernel and make
* objects invisible outside certain motion steps. */
tfm = transform_empty();
motion.clear();
return;
}
else {
/* Otherwise just copy center motion. */
motion[i] = tfm;
}
}
/* Test if any of the transforms are actually different. */
have_motion = have_motion || motion[i] != tfm;
}
/* Clear motion array if there is no actual motion. */
if(!have_motion) {
motion.clear();
}
}
void Object::compute_bounds(bool motion_blur)
{
BoundBox mbounds = mesh->bounds;
if(motion_blur && use_motion) {
MotionTransform mtfm = motion;
if(hide_on_missing_motion) {
/* Hide objects that have no valid previous or next transform, for
* example particle that stop existing. TODO: add support for this
* case in the kernel so we don't get render artifacts. */
if(mtfm.pre == transform_empty() ||
mtfm.post == transform_empty()) {
bounds = BoundBox::empty;
return;
}
}
/* In case of missing motion information for previous/next frame,
* assume there is no motion. */
if(mtfm.pre == transform_empty()) {
mtfm.pre = tfm;
}
if(mtfm.post == transform_empty()) {
mtfm.post = tfm;
}
mtfm.mid = tfm;
DecomposedMotionTransform decomp;
transform_motion_decompose(&decomp.pre, &mtfm.pre, 3);
if(motion_blur && use_motion()) {
array<DecomposedTransform> decomp(motion.size());
transform_motion_decompose(decomp.data(), motion.data(), motion.size());
bounds = BoundBox::empty;
@ -152,11 +164,12 @@ void Object::compute_bounds(bool motion_blur)
for(float t = 0.0f; t < 1.0f; t += (1.0f/128.0f)) {
Transform ttfm;
transform_motion_array_interpolate(&ttfm, &decomp.pre, 3, t);
transform_motion_array_interpolate(&ttfm, decomp.data(), motion.size(), t);
bounds.grow(mbounds.transformed(&ttfm));
}
}
else {
/* No motion blur case. */
if(mesh->transform_applied) {
bounds = mbounds;
}
@ -276,6 +289,29 @@ void Object::tag_update(Scene *scene)
scene->object_manager->need_update = true;
}
bool Object::use_motion() const
{
return (motion.size() > 1);
}
float Object::motion_time(int step) const
{
return (use_motion()) ? 2.0f * step / (motion.size() - 1) - 1.0f : 0.0f;
}
int Object::motion_step(float time) const
{
if(use_motion()) {
for(size_t step = 0; step < motion.size(); step++) {
if(time == motion_time(step)) {
return step;
}
}
}
return -1;
}
bool Object::is_traceable() const
{
/* Mesh itself can be empty,can skip all such objects. */
@ -381,12 +417,13 @@ void ObjectManager::device_update_object_transform(UpdateObjectTransformState *s
}
}
memcpy(&kobject.tfm.pre, &tfm, sizeof(tfm));
memcpy(&kobject.tfm.mid, &itfm, sizeof(itfm));
kobject.tfm = tfm;
kobject.itfm = itfm;
kobject.surface_area = surface_area;
kobject.pass_id = pass_id;
kobject.random_number = random_number;
kobject.particle_index = particle_index;
kobject.motion_offset = 0;
if(mesh->use_motion_blur) {
state->have_motion = true;
@ -396,38 +433,39 @@ void ObjectManager::device_update_object_transform(UpdateObjectTransformState *s
}
if(state->need_motion == Scene::MOTION_PASS) {
/* Clear motion array if there is no actual motion. */
ob->update_motion();
/* Compute motion transforms. */
Transform tfm_pre, tfm_post;
if(ob->use_motion()) {
tfm_pre = ob->motion[0];
tfm_post = ob->motion[ob->motion.size() - 1];
}
else {
tfm_pre = tfm;
tfm_post = tfm;
}
/* Motion transformations, is world/object space depending if mesh
* comes with deformed position in object space, or if we transform
* the shading point in world space.
*/
MotionTransform mtfm = ob->motion;
/* In case of missing motion information for previous/next frame,
* assume there is no motion. */
if(!ob->use_motion || mtfm.pre == transform_empty()) {
mtfm.pre = ob->tfm;
}
if(!ob->use_motion || mtfm.post == transform_empty()) {
mtfm.post = ob->tfm;
}
* the shading point in world space. */
if(!mesh->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION)) {
mtfm.pre = mtfm.pre * itfm;
mtfm.post = mtfm.post * itfm;
tfm_pre = tfm_pre * itfm;
tfm_post = tfm_post * itfm;
}
object_motion_pass[object_index*OBJECT_MOTION_PASS_SIZE+0] = mtfm.pre;
object_motion_pass[object_index*OBJECT_MOTION_PASS_SIZE+1] = mtfm.post;
int motion_pass_offset = object_index*OBJECT_MOTION_PASS_SIZE;
object_motion_pass[motion_pass_offset + 0] = tfm_pre;
object_motion_pass[motion_pass_offset + 1] = tfm_post;
}
else if(state->need_motion == Scene::MOTION_BLUR) {
if(ob->use_motion) {
/* decompose transformations for interpolation. */
DecomposedMotionTransform decomp;
MotionTransform mtfm = ob->motion;
mtfm.mid = tfm;
if(ob->use_motion()) {
kobject.motion_offset = state->motion_offset[object_index];
transform_motion_decompose(&decomp.pre, &mtfm.pre, 3);
kobject.tfm = decomp;
/* Decompose transforms for interpolation. */
DecomposedTransform *decomp = state->object_motion + kobject.motion_offset;
transform_motion_decompose(decomp, ob->motion.data(), ob->motion.size());
flag |= SD_OBJECT_MOTION;
state->have_motion = true;
}
@ -512,11 +550,28 @@ void ObjectManager::device_update_transforms(DeviceScene *dscene,
state.objects = dscene->objects.alloc(scene->objects.size());
state.object_flag = dscene->object_flag.alloc(scene->objects.size());
state.object_motion = NULL;
state.object_motion_pass = NULL;
if(state.need_motion == Scene::MOTION_PASS) {
state.object_motion_pass = dscene->object_motion_pass.alloc(OBJECT_MOTION_PASS_SIZE*scene->objects.size());
}
else if(state.need_motion == Scene::MOTION_BLUR) {
/* Set object offsets into global object motion array. */
uint *motion_offsets = state.motion_offset.resize(scene->objects.size());
uint motion_offset = 0;
foreach(Object *ob, scene->objects) {
*motion_offsets = motion_offset;
motion_offsets++;
/* Clear motion array if there is no actual motion. */
ob->update_motion();
motion_offset += ob->motion.size();
}
state.object_motion = dscene->object_motion.alloc(motion_offset);
}
/* Particle system device offsets
* 0 is dummy particle, index starts at 1.
@ -560,6 +615,9 @@ void ObjectManager::device_update_transforms(DeviceScene *dscene,
if(state.need_motion == Scene::MOTION_PASS) {
dscene->object_motion_pass.copy_to_device();
}
else if(state.need_motion == Scene::MOTION_BLUR) {
dscene->object_motion.copy_to_device();
}
dscene->data.bvh.have_motion = state.have_motion;
dscene->data.bvh.have_curves = state.have_curves;
@ -709,6 +767,7 @@ void ObjectManager::device_free(Device *, DeviceScene *dscene)
{
dscene->objects.free();
dscene->object_motion_pass.free();
dscene->object_motion.free();
dscene->object_flag.free();
}
@ -748,7 +807,7 @@ void ObjectManager::apply_static_transforms(DeviceScene *dscene, Scene *scene, P
if((mesh_users[object->mesh] == 1 && !object->mesh->has_surface_bssrdf) &&
!object->mesh->has_true_displacement() && object->mesh->subdivision_type == Mesh::SUBDIVISION_NONE)
{
if(!(motion_blur && object->use_motion)) {
if(!(motion_blur && object->use_motion())) {
if(!object->mesh->transform_applied) {
object->apply_transform(apply_to_motion);
object->mesh->transform_applied = true;

View File

@ -50,8 +50,7 @@ public:
int pass_id;
vector<ParamValue> attributes;
uint visibility;
MotionTransform motion;
bool use_motion;
array<Transform> motion;
bool hide_on_missing_motion;
bool use_holdout;
bool is_shadow_catcher;
@ -70,6 +69,13 @@ public:
void compute_bounds(bool motion_blur);
void apply_transform(bool apply_to_motion);
/* Convert between normalized -1..1 motion time and index
* in the motion array. */
bool use_motion() const;
float motion_time(int step) const;
int motion_step(float time) const;
void update_motion();
/* Check whether object is traceable and it worth adding it to
* kernel scene.
*/

View File

@ -61,6 +61,7 @@ DeviceScene::DeviceScene(Device *device)
patches(device, "__patches", MEM_TEXTURE),
objects(device, "__objects", MEM_TEXTURE),
object_motion_pass(device, "__object_motion_pass", MEM_TEXTURE),
object_motion(device, "__object_motion", MEM_TEXTURE),
object_flag(device, "__object_flag", MEM_TEXTURE),
camera_motion(device, "__camera_motion", MEM_TEXTURE),
attributes_map(device, "__attributes_map", MEM_TEXTURE),

View File

@ -88,6 +88,7 @@ public:
/* objects */
device_vector<KernelObject> objects;
device_vector<Transform> object_motion_pass;
device_vector<DecomposedTransform> object_motion;
device_vector<uint> object_flag;
/* cameras */

View File

@ -662,7 +662,7 @@ DeviceRequestedFeatures Session::get_requested_device_features()
if(mesh->num_curves()) {
requested_features.use_hair = true;
}
requested_features.use_object_motion |= object->use_motion | mesh->use_motion_blur;
requested_features.use_object_motion |= object->use_motion() | mesh->use_motion_blur;
requested_features.use_camera_motion |= mesh->use_motion_blur;
#ifdef WITH_OPENSUBDIV
if(mesh->subdivision_type != Mesh::SUBDIVISION_NONE) {

View File

@ -37,12 +37,6 @@ typedef struct Transform {
#endif
} Transform;
typedef struct ccl_may_alias MotionTransform {
Transform pre;
Transform mid;
Transform post;
} MotionTransform;
/* Transform decomposed in rotation/translation/scale. we use the same data
* structure as Transform, and tightly pack decomposition into it. first the
* rotation (4), then translation (3), then 3x3 scale matrix (9). */
@ -51,12 +45,6 @@ typedef struct DecomposedTransform {
float4 x, y, z, w;
} DecomposedTransform;
typedef struct ccl_may_alias DecomposedMotionTransform {
DecomposedTransform pre;
DecomposedTransform mid;
DecomposedTransform post;
} DecomposedMotionTransform;
/* Functions */
ccl_device_inline float3 transform_point(const Transform *t, const float3 a)
@ -443,11 +431,6 @@ ccl_device_inline bool operator==(const DecomposedTransform& A, const Decomposed
return memcmp(&A, &B, sizeof(DecomposedTransform)) == 0;
}
ccl_device_inline bool operator==(const MotionTransform& A, const MotionTransform& B)
{
return (A.pre == B.pre && A.post == B.post);
}
float4 transform_to_quat(const Transform& tfm);
void transform_motion_decompose(DecomposedTransform *decomp, const Transform *motion, size_t size);
Transform transform_from_viewplane(BoundBox2D& viewplane);