Cycles: Add support for cameras inside volume

Basically the title says it all, volume stack initialization now is aware that
camera might be inside of the volume. This gives quite noticeable render time
regressions in cases camera is in the volume (didn't measure them yet) because
this requires quite a few of ray-casting per camera ray in order to check which
objects we're inside. Not quite sure if this might be optimized.

But the good thing is that we can do quite a good job on detecting whether
camera is outside of any of the volumes and in this case there should be no
time penalty at all (apart from some extra checks during the sync state).

For now we're only doing rather simple AABB checks between the viewplane and
volume objects. This could give some false-positives, but this should be good
starting point.

Need to mention panoramic cameras here, for them it's only check for whether
there are volumes in the scene, which would lead to speed regressions even if
the camera is outside of the volumes. Would need to figure out proper check
for such cameras.

There are still quite a few of TODOs in the code, but the patch is good enough
to start playing around with it checking whether there are some obvious mistakes
somewhere.

Currently the feature is only available in the Experimental feature sey, need
to solve some of the TODOs and look into making things faster before considering
the feature is ready for the official feature set. This would still likely
happen in current release cycle.

Reviewers: brecht, juicyfruit, dingto

Differential Revision: https://developer.blender.org/D794
This commit is contained in:
Sergey Sharybin 2014-09-16 23:49:59 +06:00
parent ccc5983e2b
commit fe731686fb
10 changed files with 235 additions and 18 deletions

View File

@ -390,6 +390,7 @@ void BlenderSync::sync_camera(BL::RenderSettings b_render, BL::Object b_override
/* sync */
Camera *cam = scene->camera;
blender_camera_sync(cam, &bcam, width, height);
scene->camera->use_camera_in_volume = experimental;
}
void BlenderSync::sync_camera_motion(BL::Object b_ob, float motion_time)
@ -554,6 +555,7 @@ void BlenderSync::sync_view(BL::SpaceView3D b_v3d, BL::RegionView3D b_rv3d, int
blender_camera_border(&bcam, b_scene.render(), b_scene, b_v3d, b_rv3d, width, height);
blender_camera_sync(scene->camera, &bcam, width, height);
scene->camera->use_camera_in_volume = experimental;
}
BufferParams BlenderSync::get_buffer_params(BL::RenderSettings b_render, BL::Scene b_scene, BL::SpaceView3D b_v3d, BL::RegionView3D b_rv3d, Camera *cam, int width, int height)

View File

@ -33,7 +33,7 @@ ccl_device void compute_light_pass(KernelGlobals *kg, ShaderData *sd, PathRadian
path_radiance_init(&L_sample, kernel_data.film.use_light_pass);
/* init path state */
path_state_init(kg, &state, &rng, sample);
path_state_init(kg, &state, &rng, sample, NULL);
/* evaluate surface shader */
float rbsdf = path_state_rng_1D(kg, &rng, &state, PRNG_BSDF);

View File

@ -469,7 +469,7 @@ ccl_device float4 kernel_path_integrate(KernelGlobals *kg, RNG *rng, int sample,
path_radiance_init(&L, kernel_data.film.use_light_pass);
PathState state;
path_state_init(kg, &state, rng, sample);
path_state_init(kg, &state, rng, sample, &ray);
/* path iteration */
for(;;) {
@ -858,7 +858,7 @@ ccl_device float4 kernel_branched_path_integrate(KernelGlobals *kg, RNG *rng, in
path_radiance_init(&L, kernel_data.film.use_light_pass);
PathState state;
path_state_init(kg, &state, rng, sample);
path_state_init(kg, &state, rng, sample, &ray);
for(;;) {
/* intersect scene */

View File

@ -16,7 +16,7 @@
CCL_NAMESPACE_BEGIN
ccl_device_inline void path_state_init(KernelGlobals *kg, PathState *state, RNG *rng, int sample)
ccl_device_inline void path_state_init(KernelGlobals *kg, PathState *state, RNG *rng, int sample, Ray *ray)
{
state->flag = PATH_RAY_CAMERA|PATH_RAY_MIS_SKIP;
@ -41,7 +41,7 @@ ccl_device_inline void path_state_init(KernelGlobals *kg, PathState *state, RNG
if(kernel_data.integrator.use_volumes) {
/* initialize volume stack with volume we are inside of */
kernel_volume_stack_init(kg, state->volume_stack);
kernel_volume_stack_init(kg, ray, state->volume_stack);
/* seed RNG for cases where we can't use stratified samples */
state->rng_congruential = lcg_init(*rng + sample*0x51633e2d);
}

View File

@ -784,7 +784,7 @@ typedef struct KernelCamera {
/* anamorphic lens bokeh */
float inv_aperture_ratio;
int pad1;
int is_inside_volume;
int pad2;
/* more matrices */

View File

@ -954,17 +954,99 @@ ccl_device bool kernel_volume_use_decoupled(KernelGlobals *kg, bool heterogeneou
* This is an array of object/shared ID's that the current segment of the path
* is inside of. */
ccl_device void kernel_volume_stack_init(KernelGlobals *kg, VolumeStack *stack)
ccl_device void kernel_volume_stack_init(KernelGlobals *kg,
Ray *ray,
VolumeStack *stack)
{
/* todo: this assumes camera is always in air, need to detect when it isn't */
if(kernel_data.background.volume_shader == SHADER_NONE) {
stack[0].shader = SHADER_NONE;
/* NULL ray happens in the baker, does it need proper initializetion of
* camera in volume?
*/
if(!kernel_data.cam.is_inside_volume || ray == NULL) {
/* Camera is guaranteed to be in the air, only take background volume
* into account in this case.
*/
if(kernel_data.background.volume_shader != SHADER_NONE) {
stack[0].shader = kernel_data.background.volume_shader;
stack[0].object = PRIM_NONE;
stack[1].shader = SHADER_NONE;
}
else {
stack[0].shader = SHADER_NONE;
}
return;
}
else {
const float3 Pend = ray->P + ray->D*ray->t;
Ray volume_ray = *ray;
int stack_index = 0, enclosed_index = 0;
int enclosed_volumes[VOLUME_STACK_SIZE];
while(stack_index < VOLUME_STACK_SIZE - 1 &&
enclosed_index < VOLUME_STACK_SIZE - 1)
{
Intersection isect;
bool hit = scene_intersect(kg, &volume_ray, PATH_RAY_ALL_VISIBILITY,
&isect,
NULL, 0.0f, 0.0f);
if(!hit) {
break;
}
ShaderData sd;
shader_setup_from_ray(kg, &sd, &isect, &volume_ray, 0, 0);
if(sd.flag & SD_HAS_VOLUME) {
if(sd.flag & SD_BACKFACING) {
/* If ray exited the volume and never entered to that volume
* it means that camera is inside such a volume.
*/
bool is_enclosed = false;
for(int i = 0; i < enclosed_index; ++i) {
if(enclosed_volumes[i] == sd.object) {
is_enclosed = true;
break;
}
}
if(is_enclosed == false) {
stack[stack_index].object = sd.object;
stack[stack_index].shader = sd.shader;
++stack_index;
}
}
else {
/* If ray from camera enters the volume, this volume shouldn't
* be added to the stak on exit.
*/
enclosed_volumes[enclosed_index++] = sd.object;
}
}
/* Move ray forward. */
volume_ray.P = ray_offset(sd.P, -sd.Ng);
if(volume_ray.t != FLT_MAX) {
volume_ray.D = normalize_len(Pend - volume_ray.P, &volume_ray.t);
/* TODO(sergey): Find a faster way detecting that ray_offset moved
* us pass through the end point.
*/
if(dot(ray->D, volume_ray.D) < 0.0f) {
break;
}
}
}
/* stack_index of 0 means quick checks outside of the kernel gave false
* positive, nothing to worry about, just we've wasted quite a few of
* ticks just to come into conclusion that camera is in the air.
*
* In this case we're doing the same above -- check whether background has
* volume.
*/
if(stack_index == 0 && kernel_data.background.volume_shader == SHADER_NONE) {
stack[0].shader = kernel_data.background.volume_shader;
stack[0].object = PRIM_NONE;
stack[1].shader = SHADER_NONE;
}
else {
stack[stack_index].shader = SHADER_NONE;
}
}
ccl_device void kernel_volume_stack_enter_exit(KernelGlobals *kg, ShaderData *sd, VolumeStack *stack)
@ -1013,4 +1095,3 @@ ccl_device void kernel_volume_stack_enter_exit(KernelGlobals *kg, ShaderData *sd
}
CCL_NAMESPACE_END

View File

@ -15,14 +15,43 @@
*/
#include "camera.h"
#include "mesh.h"
#include "object.h"
#include "scene.h"
#include "device.h"
#include "util_foreach.h"
#include "util_vector.h"
CCL_NAMESPACE_BEGIN
namespace {
bool object_has_volume(Scene *scene, Object *object)
{
Mesh *mesh = object->mesh;
foreach(uint shader, mesh->used_shaders) {
if(scene->shaders[shader]->has_volume) {
return true;
}
}
return false;
}
bool scene_has_volume(Scene *scene)
{
for(size_t i = 0; i < scene->objects.size(); ++i) {
Object *object = scene->objects[i];
if(object_has_volume(scene, object)) {
return true;
}
}
return false;
}
} // namespace
Camera::Camera()
{
shuttertime = 1.0f;
@ -270,6 +299,40 @@ void Camera::device_update(Device *device, DeviceScene *dscene, Scene *scene)
need_device_update = false;
previous_need_motion = need_motion;
/* Camera in volume. */
kcam->is_inside_volume = 0;
if(use_camera_in_volume) {
if(type == CAMERA_PANORAMA) {
/* It's not clear how to do viewplace->object intersection for
* panoramic cameras, for now let's just check for whether there
* are any volumes in the scene.
*/
kcam->is_inside_volume = scene_has_volume(scene);
}
else {
/* TODO(sergey): Whole bunch of stuff here actually:
* - We do rather stupid check with object AABB to camera viewplane
* AABB intersection, which is quite fast to perform, but which
* could give some false-positives checks here, More grained check
* would help avoiding time wasted n the kernel to initialize the
* volume stack.
* - We could cache has_volume in the cache, would save quite a few
* CPU ticks when having loads of instanced meshes.
*/
BoundBox viewplane_boundbox = viewplane_bounds_get();
for(size_t i = 0; i < scene->objects.size(); ++i) {
Object *object = scene->objects[i];
if(object_has_volume(scene, object)) {
if(viewplane_boundbox.intersects(object->bounds)) {
/* TODO(sergey): Consider adding more grained check. */
kcam->is_inside_volume = 1;
break;
}
}
}
}
}
}
void Camera::device_free(Device *device, DeviceScene *dscene)
@ -313,5 +376,56 @@ void Camera::tag_update()
need_update = true;
}
float3 Camera::transform_raster_to_world(float raster_x, float raster_y)
{
float3 D, P;
if(type == CAMERA_PERSPECTIVE) {
D = transform_perspective(&rastertocamera,
make_float3(raster_x, raster_y, 0.0f));
P = make_float3(0.0f, 0.0f, 0.0f);
/* TODO(sergey): Aperture support? */
P = transform_point(&cameratoworld, P);
D = normalize(transform_direction(&cameratoworld, D));
/* TODO(sergey): Clipping is conditional in kernel, and hence it could
* be mistakes in here, currently leading to wrong camera-in-volume
* detection.
*/
P += nearclip * D;
}
else if (type == CAMERA_ORTHOGRAPHIC) {
D = make_float3(0.0f, 0.0f, 1.0f);
/* TODO(sergey): Aperture support? */
P = transform_perspective(&rastertocamera,
make_float3(raster_x, raster_y, 0.0f));
P = transform_point(&cameratoworld, P);
D = normalize(transform_direction(&cameratoworld, D));
}
else {
assert(!"unsupported camera type");
}
return P;
}
BoundBox Camera::viewplane_bounds_get()
{
assert(type != CAMERA_PANORAMA);
/* TODO(sergey): This is all rather stupid, but is there a way to perform
* checks we need in a more clear and smart fasion?
*/
BoundBox bounds = BoundBox::empty;
bounds.grow(transform_raster_to_world(0.0f, 0.0f));
bounds.grow(transform_raster_to_world(0.0f, (float)height));
bounds.grow(transform_raster_to_world((float)width, (float)height));
bounds.grow(transform_raster_to_world((float)width, 0.0f));
if(type == CAMERA_PERSPECTIVE) {
/* Center point has the most distancei in local Z axis,
* use it to construct bounding box/
*/
bounds.grow(transform_raster_to_world(0.5f*width, 0.5f*height));
}
return bounds;
}
CCL_NAMESPACE_END

View File

@ -102,6 +102,13 @@ public:
bool need_device_update;
int previous_need_motion;
/* Camera in volume. */
/* TODO(sergey): Get rid of this argument once
* cameras in volume considered fast enough for
* the regular kernel.
*/
bool use_camera_in_volume;
/* functions */
Camera();
~Camera();
@ -116,6 +123,9 @@ public:
bool modified(const Camera& cam);
bool motion_modified(const Camera& cam);
void tag_update();
BoundBox viewplane_bounds_get();
float3 transform_raster_to_world(float raster_x, float raster_y);
};
CCL_NAMESPACE_END

View File

@ -165,13 +165,13 @@ void Scene::device_update(Device *device_, Progress& progress)
if(progress.get_cancel()) return;
progress.set_status("Updating Camera");
camera->device_update(device, &dscene, this);
progress.set_status("Updating Objects");
object_manager->device_update(device, &dscene, this, progress);
if(progress.get_cancel()) return;
progress.set_status("Updating Objects");
object_manager->device_update(device, &dscene, this, progress);
progress.set_status("Updating Meshes");
mesh_manager->device_update(device, &dscene, this, progress);
if(progress.get_cancel()) return;
@ -185,8 +185,9 @@ void Scene::device_update(Device *device_, Progress& progress)
if(progress.get_cancel()) return;
progress.set_status("Updating Meshes");
mesh_manager->device_update(device, &dscene, this, progress);
/* TODO(sergey): Make sure camera is not needed above. */
progress.set_status("Updating Camera");
camera->device_update(device, &dscene, this);
if(progress.get_cancel()) return;

View File

@ -167,6 +167,15 @@ public:
return result;
}
__forceinline bool intersects(const BoundBox& other)
{
float3 center_diff = center() - other.center(),
total_size = (size() + other.size()) * 0.5f;
return fabsf(center_diff.x) <= total_size.x &&
fabsf(center_diff.y) <= total_size.y &&
fabsf(center_diff.z) <= total_size.z;
}
};
__forceinline BoundBox merge(const BoundBox& bbox, const float3& pt)