Cycles: support arbitrary number of motion blur steps for cameras.

This commit is contained in:
Brecht Van Lommel 2018-03-10 01:36:09 +01:00
parent 267d892326
commit 78c2063685
Notes: blender-bot 2023-02-14 10:29:32 +01:00
Referenced by issue #72889, Camera motion blur bug
Referenced by issue #56396, Cycles - Non-Deformation Motion Blur is inconsistant
Referenced by issue #55626, Cycles - Motion blur seems to be additive when rendering animations
Referenced by issue #55106, Motion Blur after changing active camera
18 changed files with 175 additions and 132 deletions

View File

@ -809,7 +809,7 @@ class CYCLES_OBJECT_PT_motion_blur(CyclesButtonsPanel, Panel):
def poll(cls, context):
ob = context.object
if CyclesButtonsPanel.poll(context) and ob:
if ob.type in {'MESH', 'CURVE', 'CURVE', 'SURFACE', 'FONT', 'META'}:
if ob.type in {'MESH', 'CURVE', 'CURVE', 'SURFACE', 'FONT', 'META', 'CAMERA'}:
return True
if ob.dupli_type == 'GROUP' and ob.dupli_group:
return True
@ -841,11 +841,9 @@ class CYCLES_OBJECT_PT_motion_blur(CyclesButtonsPanel, Panel):
layout.active = (rd.use_motion_blur and cob.use_motion_blur)
row = layout.row()
row.prop(cob, "use_deform_motion", text="Deformation")
sub = row.row()
sub.active = cob.use_deform_motion
sub.prop(cob, "motion_steps", text="Steps")
if ob.type != 'CAMERA':
row.prop(cob, "use_deform_motion", text="Deformation")
row.prop(cob, "motion_steps", text="Steps")
class CYCLES_OBJECT_PT_cycles_settings(CyclesButtonsPanel, Panel):

View File

@ -83,6 +83,8 @@ struct BlenderCamera {
Transform matrix;
float offscreen_dicing_scale;
int motion_steps;
};
static void blender_camera_init(BlenderCamera *bcam,
@ -226,6 +228,10 @@ static void blender_camera_from_object(BlenderCamera *bcam,
bcam->sensor_fit = BlenderCamera::HORIZONTAL;
else
bcam->sensor_fit = BlenderCamera::VERTICAL;
if(object_use_motion(b_ob, b_ob)) {
bcam->motion_steps = object_motion_steps(b_ob);
}
}
else {
/* from lamp not implemented yet */
@ -453,9 +459,7 @@ static void blender_camera_sync(Camera *cam,
cam->matrix = blender_camera_matrix(bcam->matrix,
bcam->type,
bcam->panorama_type);
cam->motion.pre = cam->matrix;
cam->motion.post = cam->matrix;
cam->use_motion = false;
cam->motion.resize(bcam->motion_steps, cam->matrix);
cam->use_perspective_motion = false;
cam->shuttertime = bcam->shuttertime;
cam->fov_pre = cam->fov;
@ -564,20 +568,15 @@ void BlenderSync::sync_camera_motion(BL::RenderSettings& b_render,
Transform tfm = get_transform(b_ob_matrix);
tfm = blender_camera_matrix(tfm, cam->type, cam->panorama_type);
if(tfm != cam->matrix) {
VLOG(1) << "Camera " << b_ob.name() << " motion detected.";
if(motion_time == 0.0f) {
/* When motion blur is not centered in frame, cam->matrix gets reset. */
cam->matrix = tfm;
}
else if(motion_time == -1.0f) {
cam->motion.pre = tfm;
cam->use_motion = true;
}
else if(motion_time == 1.0f) {
cam->motion.post = tfm;
cam->use_motion = true;
}
if(motion_time == 0.0f) {
/* When motion blur is not centered in frame, cam->matrix gets reset. */
cam->matrix = tfm;
}
/* Set transform in motion array. */
int motion_step = cam->motion_step(motion_time);
if(motion_step >= 0) {
cam->motion[motion_step] = tfm;
}
if(cam->type == CAMERA_PERSPECTIVE) {

View File

@ -82,7 +82,7 @@ ccl_device_inline Transform object_fetch_transform_motion(KernelGlobals *kg, int
const ccl_global DecomposedMotionTransform *motion = &kernel_tex_fetch(__objects, object).tfm;
Transform tfm;
transform_motion_interpolate(&tfm, motion, time);
transform_motion_array_interpolate(&tfm, &motion->pre, 3, time);
return tfm;
}

View File

@ -91,11 +91,12 @@ ccl_device void camera_sample_perspective(KernelGlobals *kg, float raster_x, flo
Transform cameratoworld = kernel_data.cam.cameratoworld;
#ifdef __CAMERA_MOTION__
if(kernel_data.cam.have_motion) {
ccl_constant DecomposedMotionTransform *motion = &kernel_data.cam.motion;
transform_motion_interpolate_constant(&cameratoworld,
motion,
ray->time);
if(kernel_data.cam.num_motion_steps) {
transform_motion_array_interpolate(
&cameratoworld,
kernel_tex_array(__camera_motion),
kernel_data.cam.num_motion_steps,
ray->time);
}
#endif
@ -197,11 +198,12 @@ ccl_device void camera_sample_orthographic(KernelGlobals *kg, float raster_x, fl
Transform cameratoworld = kernel_data.cam.cameratoworld;
#ifdef __CAMERA_MOTION__
if(kernel_data.cam.have_motion) {
ccl_constant DecomposedMotionTransform *motion = &kernel_data.cam.motion;
transform_motion_interpolate_constant(&cameratoworld,
motion,
ray->time);
if(kernel_data.cam.num_motion_steps) {
transform_motion_array_interpolate(
&cameratoworld,
kernel_tex_array(__camera_motion),
kernel_data.cam.num_motion_steps,
ray->time);
}
#endif
@ -227,6 +229,7 @@ ccl_device void camera_sample_orthographic(KernelGlobals *kg, float raster_x, fl
/* Panorama Camera */
ccl_device_inline void camera_sample_panorama(ccl_constant KernelCamera *cam,
const ccl_global DecomposedTransform *cam_motion,
float raster_x, float raster_y,
float lens_u, float lens_v,
ccl_addr_space Ray *ray)
@ -269,11 +272,12 @@ ccl_device_inline void camera_sample_panorama(ccl_constant KernelCamera *cam,
Transform cameratoworld = cam->cameratoworld;
#ifdef __CAMERA_MOTION__
if(cam->have_motion) {
ccl_constant DecomposedMotionTransform *motion = &cam->motion;
transform_motion_interpolate_constant(&cameratoworld,
motion,
ray->time);
if(cam->num_motion_steps) {
transform_motion_array_interpolate(
&cameratoworld,
cam_motion,
cam->num_motion_steps,
ray->time);
}
#endif
@ -392,12 +396,16 @@ ccl_device_inline void camera_sample(KernelGlobals *kg,
#endif
/* sample */
if(kernel_data.cam.type == CAMERA_PERSPECTIVE)
if(kernel_data.cam.type == CAMERA_PERSPECTIVE) {
camera_sample_perspective(kg, raster_x, raster_y, lens_u, lens_v, ray);
else if(kernel_data.cam.type == CAMERA_ORTHOGRAPHIC)
}
else if(kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
camera_sample_orthographic(kg, raster_x, raster_y, lens_u, lens_v, ray);
else
camera_sample_panorama(&kernel_data.cam, raster_x, raster_y, lens_u, lens_v, ray);
}
else {
const ccl_global DecomposedTransform *cam_motion = kernel_tex_array(__camera_motion);
camera_sample_panorama(&kernel_data.cam, cam_motion, raster_x, raster_y, lens_u, lens_v, ray);
}
}
/* Utilities */

View File

@ -118,6 +118,7 @@ template<typename T> struct texture {
#define kernel_tex_fetch_ssef(tex, index) (kg->tex.fetch_ssef(index))
#define kernel_tex_fetch_ssei(tex, index) (kg->tex.fetch_ssei(index))
#define kernel_tex_lookup(tex, t, offset, size) (kg->tex.lookup(t, offset, size))
#define kernel_tex_array(tex) (kg->tex.data)
#define kernel_data (kg->__data)

View File

@ -137,6 +137,7 @@ ccl_device_inline uint ccl_num_groups(uint d)
/* Use arrays for regular data. */
#define kernel_tex_fetch(t, index) t[(index)]
#define kernel_tex_array(t) (t)
#define kernel_data __data

View File

@ -144,7 +144,8 @@
/* data lookup defines */
#define kernel_data (*kg->data)
#define kernel_tex_fetch(tex, index) ((const ccl_global tex##_t*)(kg->buffers[kg->tex.cl_buffer] + kg->tex.data))[(index)]
#define kernel_tex_array(tex) ((const ccl_global tex##_t*)(kg->buffers[kg->tex.cl_buffer] + kg->tex.data))
#define kernel_tex_fetch(tex, index) kernel_tex_array(tex)[(index)]
/* define NULL */
#define NULL 0

View File

@ -35,6 +35,9 @@ KERNEL_TEX(KernelObject, __objects)
KERNEL_TEX(Transform, __object_motion_pass)
KERNEL_TEX(uint, __object_flag)
/* cameras */
KERNEL_TEX(DecomposedTransform, __camera_motion)
/* triangles */
KERNEL_TEX(uint, __tri_shader)
KERNEL_TEX(float4, __tri_vnormal)

View File

@ -1173,7 +1173,7 @@ typedef struct KernelCamera {
/* motion blur */
float shuttertime;
int have_motion, have_perspective_motion;
int num_motion_steps, have_perspective_motion;
/* clipping */
float nearclip;
@ -1201,8 +1201,6 @@ typedef struct KernelCamera {
ProjectionTransform worldtondc;
Transform worldtocamera;
DecomposedMotionTransform motion;
/* Stores changes in the projeciton matrix. Use for camera zoom motion
* blur and motion pass output for perspective camera. */
ProjectionTransform perspective_pre;

View File

@ -82,6 +82,7 @@ NODE_DEFINE(Camera)
SOCKET_FLOAT(bladesrotation, "Blades Rotation", 0.0f);
SOCKET_TRANSFORM(matrix, "Matrix", transform_identity());
SOCKET_TRANSFORM_ARRAY(motion, "Motion", array<Transform>());
SOCKET_FLOAT(aperture_ratio, "Aperture Ratio", 1.0f);
@ -151,9 +152,6 @@ Camera::Camera()
height = 512;
resolution = 1;
motion.pre = transform_identity();
motion.post = transform_identity();
use_motion = false;
use_perspective_motion = false;
shutter_curve.resize(RAMP_TABLE_SIZE);
@ -317,15 +315,22 @@ void Camera::update(Scene *scene)
kcam->ndctoworld = ndctoworld;
/* camera motion */
kcam->have_motion = 0;
kcam->num_motion_steps = 0;
kcam->have_perspective_motion = 0;
kernel_camera_motion.clear();
/* Test if any of the transforms are actually different. */
bool have_motion = false;
for(size_t i = 0; i < motion.size(); i++) {
have_motion = have_motion || motion[i] != matrix;
}
if(need_motion == Scene::MOTION_PASS) {
/* TODO(sergey): Support perspective (zoom, fov) motion. */
if(type == CAMERA_PANORAMA) {
if(use_motion) {
kcam->motion_pass_pre = transform_inverse(motion.pre);
kcam->motion_pass_post = transform_inverse(motion.post);
if(have_motion) {
kcam->motion_pass_pre = transform_inverse(motion[0]);
kcam->motion_pass_post = transform_inverse(motion[motion.size()-1]);
}
else {
kcam->motion_pass_pre = kcam->worldtocamera;
@ -333,9 +338,9 @@ void Camera::update(Scene *scene)
}
}
else {
if(use_motion) {
kcam->perspective_pre = cameratoraster * transform_inverse(motion.pre);
kcam->perspective_post = cameratoraster * transform_inverse(motion.post);
if(have_motion) {
kcam->perspective_pre = cameratoraster * transform_inverse(motion[0]);
kcam->perspective_post = cameratoraster * transform_inverse(motion[motion.size()-1]);
}
else {
kcam->perspective_pre = worldtoraster;
@ -344,9 +349,10 @@ void Camera::update(Scene *scene)
}
}
else if(need_motion == Scene::MOTION_BLUR) {
if(use_motion) {
transform_motion_decompose(&kcam->motion, &motion, &matrix);
kcam->have_motion = 1;
if(have_motion) {
kernel_camera_motion.resize(motion.size());
transform_motion_decompose(kernel_camera_motion.data(), motion.data(), motion.size());
kcam->num_motion_steps = motion.size();
}
/* TODO(sergey): Support other types of camera. */
@ -469,6 +475,16 @@ void Camera::device_update(Device * /* device */,
}
dscene->data.cam = kernel_camera;
size_t num_motion_steps = kernel_camera_motion.size();
if(num_motion_steps) {
DecomposedTransform *camera_motion = dscene->camera_motion.alloc(num_motion_steps);
memcpy(camera_motion, kernel_camera_motion.data(), sizeof(*camera_motion) * num_motion_steps);
dscene->camera_motion.copy_to_device();
}
else {
dscene->camera_motion.free();
}
}
void Camera::device_update_volume(Device * /*device*/,
@ -495,10 +511,11 @@ void Camera::device_update_volume(Device * /*device*/,
}
void Camera::device_free(Device * /*device*/,
DeviceScene * /*dscene*/,
DeviceScene *dscene,
Scene *scene)
{
scene->lookup_tables->remove_table(&shutter_table_offset);
dscene->camera_motion.free();
}
bool Camera::modified(const Camera& cam)
@ -509,7 +526,6 @@ bool Camera::modified(const Camera& cam)
bool Camera::motion_modified(const Camera& cam)
{
return !((motion == cam.motion) &&
(use_motion == cam.use_motion) &&
(use_perspective_motion == cam.use_perspective_motion));
}
@ -706,17 +722,17 @@ float Camera::world_to_raster_size(float3 P)
* may be a better way to do this, but calculating differentials from the
* point directly ahead seems to produce good enough results. */
#if 0
float2 dir = direction_to_panorama(&kernel_camera, normalize(D));
float2 dir = direction_to_panorama(&kernel_camera, kernel_camera_motion.data(), normalize(D));
float3 raster = transform_perspective(&cameratoraster, make_float3(dir.x, dir.y, 0.0f));
ray.t = 1.0f;
camera_sample_panorama(&kernel_camera, raster.x, raster.y, 0.0f, 0.0f, &ray);
camera_sample_panorama(&kernel_camera, kernel_camera_motion.data(), raster.x, raster.y, 0.0f, 0.0f, &ray);
if(ray.t == 0.0f) {
/* No differentials, just use from directly ahead. */
camera_sample_panorama(&kernel_camera, 0.5f*width, 0.5f*height, 0.0f, 0.0f, &ray);
camera_sample_panorama(&kernel_camera, kernel_camera_motion.data(), 0.5f*width, 0.5f*height, 0.0f, 0.0f, &ray);
}
#else
camera_sample_panorama(&kernel_camera, 0.5f*width, 0.5f*height, 0.0f, 0.0f, &ray);
camera_sample_panorama(&kernel_camera, kernel_camera_motion.data(), 0.5f*width, 0.5f*height, 0.0f, 0.0f, &ray);
#endif
differential_transfer(&ray.dP, ray.dP, ray.D, ray.dD, ray.D, dist);
@ -728,4 +744,27 @@ float Camera::world_to_raster_size(float3 P)
return res;
}
bool Camera::use_motion() const
{
return motion.size() > 1;
}
float Camera::motion_time(int step) const
{
return (use_motion()) ? 2.0f * step / (motion.size() - 1) - 1.0f : 0.0f;
}
int Camera::motion_step(float time) const
{
if(use_motion()) {
for(int step = 0; step < motion.size(); step++) {
if(time == motion_time(step)) {
return step;
}
}
}
return -1;
}
CCL_NAMESPACE_END

View File

@ -141,8 +141,8 @@ public:
Transform matrix;
/* motion */
MotionTransform motion;
bool use_motion, use_perspective_motion;
array<Transform> motion;
bool use_perspective_motion;
float fov_pre, fov_post;
/* computed camera parameters */
@ -176,6 +176,7 @@ public:
/* Kernel camera data, copied here for dicing. */
KernelCamera kernel_camera;
array<DecomposedTransform> kernel_camera_motion;
/* functions */
Camera();
@ -199,6 +200,11 @@ public:
/* Calculates the width of a pixel at point in world space. */
float world_to_raster_size(float3 P);
/* Motion blur. */
float motion_time(int step) const;
int motion_step(float time) const;
bool use_motion() const;
private:
/* Private utility functions. */
float3 transform_raster_to_world(float raster_x, float raster_y);

View File

@ -139,9 +139,10 @@ void Object::compute_bounds(bool motion_blur)
if(mtfm.post == transform_empty()) {
mtfm.post = tfm;
}
mtfm.mid = tfm;
DecomposedMotionTransform decomp;
transform_motion_decompose(&decomp, &mtfm, &tfm);
transform_motion_decompose(&decomp.pre, &mtfm.pre, 3);
bounds = BoundBox::empty;
@ -151,7 +152,7 @@ void Object::compute_bounds(bool motion_blur)
for(float t = 0.0f; t < 1.0f; t += (1.0f/128.0f)) {
Transform ttfm;
transform_motion_interpolate(&ttfm, &decomp, t);
transform_motion_array_interpolate(&ttfm, &decomp.pre, 3, t);
bounds.grow(mbounds.transformed(&ttfm));
}
}
@ -422,8 +423,10 @@ void ObjectManager::device_update_object_transform(UpdateObjectTransformState *s
if(ob->use_motion) {
/* decompose transformations for interpolation. */
DecomposedMotionTransform decomp;
MotionTransform mtfm = ob->motion;
mtfm.mid = tfm;
transform_motion_decompose(&decomp, &ob->motion, &ob->tfm);
transform_motion_decompose(&decomp.pre, &mtfm.pre, 3);
kobject.tfm = decomp;
flag |= SD_OBJECT_MOTION;
state->have_motion = true;

View File

@ -62,6 +62,7 @@ DeviceScene::DeviceScene(Device *device)
objects(device, "__objects", MEM_TEXTURE),
object_motion_pass(device, "__object_motion_pass", MEM_TEXTURE),
object_flag(device, "__object_flag", MEM_TEXTURE),
camera_motion(device, "__camera_motion", MEM_TEXTURE),
attributes_map(device, "__attributes_map", MEM_TEXTURE),
attributes_float(device, "__attributes_float", MEM_TEXTURE),
attributes_float3(device, "__attributes_float3", MEM_TEXTURE),

View File

@ -90,6 +90,9 @@ public:
device_vector<Transform> object_motion_pass;
device_vector<uint> object_flag;
/* cameras */
device_vector<DecomposedTransform> camera_motion;
/* attributes */
device_vector<uint4> attributes_map;
device_vector<float> attributes_float;

View File

@ -656,7 +656,7 @@ DeviceRequestedFeatures Session::get_requested_device_features()
*/
requested_features.use_hair = false;
requested_features.use_object_motion = false;
requested_features.use_camera_motion = scene->camera->use_motion;
requested_features.use_camera_motion = scene->camera->use_motion();
foreach(Object *object, scene->objects) {
Mesh *mesh = object->mesh;
if(mesh->num_curves()) {

View File

@ -261,18 +261,18 @@ static void transform_decompose(DecomposedTransform *decomp, const Transform *tf
decomp->w = make_float4(scale.y.z, scale.z.x, scale.z.y, scale.z.z);
}
void transform_motion_decompose(DecomposedMotionTransform *decomp, const MotionTransform *motion, const Transform *mid)
void transform_motion_decompose(DecomposedTransform *decomp, const Transform *motion, size_t size)
{
transform_decompose(&decomp->pre, &motion->pre);
transform_decompose(&decomp->mid, mid);
transform_decompose(&decomp->post, &motion->post);
for(size_t i = 0; i < size; i++) {
transform_decompose(decomp + i, motion + i);
/* ensure rotation around shortest angle, negated quaternions are the same
* but this means we don't have to do the check in quat_interpolate */
if(dot(decomp->pre.x, decomp->mid.x) < 0.0f)
decomp->pre.x = -decomp->pre.x;
if(dot(decomp->mid.x, decomp->post.x) < 0.0f)
decomp->mid.x = -decomp->mid.x;
if(i > 0) {
/* Ensure rotation around shortest angle, negated quaternions are the same
* but this means we don't have to do the check in quat_interpolate */
if(dot(decomp[i-1].x, decomp[i].x) < 0.0f)
decomp[i-1].x = -decomp[i-1].x;
}
}
}
Transform transform_from_viewplane(BoundBox2D& viewplane)

View File

@ -409,58 +409,28 @@ ccl_device_inline void transform_compose(Transform *tfm, const DecomposedTransfo
tfm->z = make_float4(dot(rotation_z, scale_x), dot(rotation_z, scale_y), dot(rotation_z, scale_z), decomp->y.z);
}
ccl_device void transform_motion_interpolate(Transform *tfm, const ccl_global DecomposedMotionTransform *motion, float t)
/* Interpolate from array of decomposed transforms. */
ccl_device void transform_motion_array_interpolate(Transform *tfm,
const ccl_global DecomposedTransform *motion,
uint numsteps,
float time)
{
/* Figure out which steps we need to interpolate. */
int maxstep = numsteps-1;
int step = min((int)(time*maxstep), maxstep-1);
float t = time*maxstep - step;
const ccl_global DecomposedTransform *a = motion + step;
const ccl_global DecomposedTransform *b = motion + step + 1;
/* Interpolate rotation, translation and scale. */
DecomposedTransform decomp;
decomp.x = quat_interpolate(a->x, b->x, t);
decomp.y = (1.0f - t)*a->y + t*b->y;
decomp.z = (1.0f - t)*a->z + t*b->z;
decomp.w = (1.0f - t)*a->w + t*b->w;
/* linear interpolation for rotation and scale */
if(t < 0.5f) {
t *= 2.0f;
decomp.x = quat_interpolate(motion->pre.x, motion->mid.x, t);
decomp.y = (1.0f - t)*motion->pre.y + t*motion->mid.y;
decomp.z = (1.0f - t)*motion->pre.z + t*motion->mid.z;
decomp.w = (1.0f - t)*motion->pre.w + t*motion->mid.w;
}
else {
t = (t - 0.5f)*2.0f;
decomp.x = quat_interpolate(motion->mid.x, motion->post.x, t);
decomp.y = (1.0f - t)*motion->mid.y + t*motion->post.y;
decomp.z = (1.0f - t)*motion->mid.z + t*motion->post.z;
decomp.w = (1.0f - t)*motion->mid.w + t*motion->post.w;
}
/* compose rotation, translation, scale into matrix */
transform_compose(tfm, &decomp);
}
ccl_device void transform_motion_interpolate_constant(Transform *tfm, ccl_constant DecomposedMotionTransform *motion, float t)
{
/* possible optimization: is it worth it adding a check to skip scaling?
* it's probably quite uncommon to have scaling objects. or can we skip
* just shearing perhaps? */
DecomposedTransform decomp;
/* linear interpolation for rotation and scale */
if(t < 0.5f) {
t *= 2.0f;
decomp.x = quat_interpolate(motion->pre.x, motion->mid.x, t);
decomp.y = (1.0f - t)*motion->pre.y + t*motion->mid.y;
decomp.z = (1.0f - t)*motion->pre.z + t*motion->mid.z;
decomp.w = (1.0f - t)*motion->pre.w + t*motion->mid.w;
}
else {
t = (t - 0.5f)*2.0f;
decomp.x = quat_interpolate(motion->mid.x, motion->post.x, t);
decomp.y = (1.0f - t)*motion->mid.y + t*motion->post.y;
decomp.z = (1.0f - t)*motion->mid.z + t*motion->post.z;
decomp.w = (1.0f - t)*motion->mid.w + t*motion->post.w;
}
/* compose rotation, translation, scale into matrix */
/* Compose rotation, translation, scale into matrix. */
transform_compose(tfm, &decomp);
}
@ -479,7 +449,7 @@ ccl_device_inline bool operator==(const MotionTransform& A, const MotionTransfor
}
float4 transform_to_quat(const Transform& tfm);
void transform_motion_decompose(DecomposedMotionTransform *decomp, const MotionTransform *motion, const Transform *mid);
void transform_motion_decompose(DecomposedTransform *decomp, const Transform *motion, size_t size);
Transform transform_from_viewplane(BoundBox2D& viewplane);
#endif

View File

@ -215,6 +215,18 @@ public:
return data_;
}
T* resize(size_t newsize, const T& value)
{
size_t oldsize = size();
resize(newsize);
for(size_t i = oldsize; i < size(); i++) {
data_[i] = value;
}
return data_;
}
void clear()
{
if(data_ != NULL) {