Libmv: Fix crash solving when having negative frames

Don't use linear array with frame as an index since it has the
following disadvantages:

- Requires every application to take care of frame remapping, which
  could be way more annoying than it sounds.

- Inefficient from memory point of view when solving part of a footage
  which is closer to the end of frame range.

Using map technically is slower from performance point of view, but
could not feel any difference as the actual computation is way more
complex than access of camera on individual frames.

Solves crash aspect of T72009
This commit is contained in:
Sergey Sharybin 2020-05-15 14:54:30 +02:00
parent 86580c437b
commit 2b36cf3118
Notes: blender-bot 2023-02-14 06:45:14 +01:00
Referenced by issue #72009, Blender crashes when solving motion tracking with negative frame positions.
4 changed files with 65 additions and 71 deletions

View File

@ -23,6 +23,7 @@
#ifndef LIBMV_AUTOTRACK_RECONSTRUCTION_H_
#define LIBMV_AUTOTRACK_RECONSTRUCTION_H_
#include "libmv/base/map.h"
#include "libmv/base/vector.h"
#include "libmv/numeric/numeric.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
@ -75,7 +76,7 @@ class Reconstruction {
vector<CameraIntrinsics*> camera_intrinsics_;
// Indexed by Marker::clip then by Marker::frame.
vector<vector<CameraPose> > camera_poses_;
vector<map<int, CameraPose>> camera_poses_;
// Indexed by Marker::track.
vector<Point> points_;

View File

@ -24,6 +24,7 @@
#include "ceres/ceres.h"
#include "ceres/rotation.h"
#include "libmv/base/map.h"
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/fundamental.h"
@ -407,47 +408,39 @@ void UnpackIntrinsicsFromArray(const double intrinsics_block[OFFSET_MAX],
// Get a vector of camera's rotations denoted by angle axis
// conjuncted with translations into single block
//
// Element with index i matches to a rotation+translation for
// Element with key i matches to a rotation+translation for
// camera at image i.
vector<Vec6> PackCamerasRotationAndTranslation(
const Tracks &tracks,
map<int, Vec6> PackCamerasRotationAndTranslation(
const EuclideanReconstruction &reconstruction) {
vector<Vec6> all_cameras_R_t;
int max_image = tracks.MaxImage();
map<int, Vec6> all_cameras_R_t;
all_cameras_R_t.resize(max_image + 1);
for (int i = 0; i <= max_image; i++) {
const EuclideanCamera *camera = reconstruction.CameraForImage(i);
if (!camera) {
continue;
}
ceres::RotationMatrixToAngleAxis(&camera->R(0, 0),
&all_cameras_R_t[i](0));
all_cameras_R_t[i].tail<3>() = camera->t;
vector<EuclideanCamera> all_cameras = reconstruction.AllCameras();
for (const EuclideanCamera& camera : all_cameras) {
Vec6 camera_R_t;
ceres::RotationMatrixToAngleAxis(&camera.R(0, 0), &camera_R_t(0));
camera_R_t.tail<3>() = camera.t;
all_cameras_R_t.insert(make_pair(camera.image, camera_R_t));
}
return all_cameras_R_t;
}
// Convert cameras rotations fro mangle axis back to rotation matrix.
void UnpackCamerasRotationAndTranslation(
const Tracks &tracks,
const vector<Vec6> &all_cameras_R_t,
const map<int, Vec6> &all_cameras_R_t,
EuclideanReconstruction *reconstruction) {
int max_image = tracks.MaxImage();
for (int i = 0; i <= max_image; i++) {
EuclideanCamera *camera = reconstruction->CameraForImage(i);
for (map<int, Vec6>::value_type image_and_camera_R_T : all_cameras_R_t) {
const int image = image_and_camera_R_T.first;
const Vec6& camera_R_t = image_and_camera_R_T.second;
EuclideanCamera *camera = reconstruction->CameraForImage(image);
if (!camera) {
continue;
}
ceres::AngleAxisToRotationMatrix(&all_cameras_R_t[i](0),
&camera->R(0, 0));
camera->t = all_cameras_R_t[i].tail<3>();
ceres::AngleAxisToRotationMatrix(&camera_R_t(0), &camera->R(0, 0));
camera->t = camera_R_t.tail<3>();
}
}
@ -476,7 +469,7 @@ void CRSMatrixToEigenMatrix(const ceres::CRSMatrix &crs_matrix,
void EuclideanBundlerPerformEvaluation(const Tracks &tracks,
EuclideanReconstruction *reconstruction,
vector<Vec6> *all_cameras_R_t,
map<int, Vec6> *all_cameras_R_t,
ceres::Problem *problem,
BundleEvaluation *evaluation) {
int max_track = tracks.MaxTrack();
@ -603,7 +596,7 @@ void AddResidualBlockToProblem(const CameraIntrinsics *invariant_intrinsics,
// are to be totally still here.
void EuclideanBundlePointsOnly(const CameraIntrinsics *invariant_intrinsics,
const vector<Marker> &markers,
vector<Vec6> &all_cameras_R_t,
map<int, Vec6> &all_cameras_R_t,
double intrinsics_block[OFFSET_MAX],
EuclideanReconstruction *reconstruction) {
ceres::Problem::Options problem_options;
@ -699,8 +692,8 @@ void EuclideanBundleCommonIntrinsics(
//
// Block for minimization has got the following structure:
// <3 elements for angle-axis> <3 elements for translation>
vector<Vec6> all_cameras_R_t =
PackCamerasRotationAndTranslation(tracks, *reconstruction);
map<int, Vec6> all_cameras_R_t =
PackCamerasRotationAndTranslation(*reconstruction);
// Parameterization used to restrict camera motion for modal solvers.
ceres::SubsetParameterization *constant_translation_parameterization = NULL;
@ -827,9 +820,7 @@ void EuclideanBundleCommonIntrinsics(
LG << "Final report:\n" << summary.FullReport();
// Copy rotations and translations back.
UnpackCamerasRotationAndTranslation(tracks,
all_cameras_R_t,
reconstruction);
UnpackCamerasRotationAndTranslation(all_cameras_R_t, reconstruction);
// Copy intrinsics back.
if (bundle_intrinsics != BUNDLE_NO_INTRINSICS)

View File

@ -27,14 +27,14 @@ namespace libmv {
EuclideanReconstruction::EuclideanReconstruction() {}
EuclideanReconstruction::EuclideanReconstruction(
const EuclideanReconstruction &other) {
cameras_ = other.cameras_;
image_to_cameras_map_ = other.image_to_cameras_map_;
points_ = other.points_;
}
EuclideanReconstruction &EuclideanReconstruction::operator=(
const EuclideanReconstruction &other) {
if (&other != this) {
cameras_ = other.cameras_;
image_to_cameras_map_ = other.image_to_cameras_map_;
points_ = other.points_;
}
return *this;
@ -44,12 +44,13 @@ void EuclideanReconstruction::InsertCamera(int image,
const Mat3 &R,
const Vec3 &t) {
LG << "InsertCamera " << image << ":\nR:\n"<< R << "\nt:\n" << t;
if (image >= cameras_.size()) {
cameras_.resize(image + 1);
}
cameras_[image].image = image;
cameras_[image].R = R;
cameras_[image].t = t;
EuclideanCamera camera;
camera.image = image;
camera.R = R;
camera.t = t;
image_to_cameras_map_.insert(make_pair(image, camera));
}
void EuclideanReconstruction::InsertPoint(int track, const Vec3 &X) {
@ -69,22 +70,18 @@ EuclideanCamera *EuclideanReconstruction::CameraForImage(int image) {
const EuclideanCamera *EuclideanReconstruction::CameraForImage(
int image) const {
if (image < 0 || image >= cameras_.size()) {
ImageToCameraMap::const_iterator it = image_to_cameras_map_.find(image);
if (it == image_to_cameras_map_.end()) {
return NULL;
}
const EuclideanCamera *camera = &cameras_[image];
if (camera->image == -1) {
return NULL;
}
return camera;
return &it->second;
}
vector<EuclideanCamera> EuclideanReconstruction::AllCameras() const {
vector<EuclideanCamera> cameras;
for (int i = 0; i < cameras_.size(); ++i) {
if (cameras_[i].image != -1) {
cameras.push_back(cameras_[i]);
}
for (const ImageToCameraMap::value_type& image_and_camera :
image_to_cameras_map_) {
cameras.push_back(image_and_camera.second);
}
return cameras;
}
@ -115,14 +112,14 @@ vector<EuclideanPoint> EuclideanReconstruction::AllPoints() const {
return points;
}
void ProjectiveReconstruction::InsertCamera(int image,
const Mat34 &P) {
void ProjectiveReconstruction::InsertCamera(int image, const Mat34 &P) {
LG << "InsertCamera " << image << ":\nP:\n"<< P;
if (image >= cameras_.size()) {
cameras_.resize(image + 1);
}
cameras_[image].image = image;
cameras_[image].P = P;
ProjectiveCamera camera;
camera.image = image;
camera.P = P;
image_to_cameras_map_.insert(make_pair(image, camera));
}
void ProjectiveReconstruction::InsertPoint(int track, const Vec4 &X) {
@ -142,22 +139,18 @@ ProjectiveCamera *ProjectiveReconstruction::CameraForImage(int image) {
const ProjectiveCamera *ProjectiveReconstruction::CameraForImage(
int image) const {
if (image < 0 || image >= cameras_.size()) {
return NULL;
ImageToCameraMap::const_iterator it = image_to_cameras_map_.find(image);
if (it == image_to_cameras_map_.end()) {
return NULL;
}
const ProjectiveCamera *camera = &cameras_[image];
if (camera->image == -1) {
return NULL;
}
return camera;
return &it->second;
}
vector<ProjectiveCamera> ProjectiveReconstruction::AllCameras() const {
vector<ProjectiveCamera> cameras;
for (int i = 0; i < cameras_.size(); ++i) {
if (cameras_[i].image != -1) {
cameras.push_back(cameras_[i]);
}
for (const ImageToCameraMap::value_type& image_and_camera :
image_to_cameras_map_) {
cameras.push_back(image_and_camera.second);
}
return cameras;
}

View File

@ -22,6 +22,7 @@
#define LIBMV_SIMPLE_PIPELINE_RECONSTRUCTION_H_
#include "libmv/base/vector.h"
#include "libmv/base/map.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
@ -120,7 +121,11 @@ class EuclideanReconstruction {
vector<EuclideanPoint> AllPoints() const;
private:
vector<EuclideanCamera> cameras_;
// Indexed by frame number.
typedef map<int, EuclideanCamera> ImageToCameraMap;
ImageToCameraMap image_to_cameras_map_;
// Insxed by track.
vector<EuclideanPoint> points_;
};
@ -208,7 +213,11 @@ class ProjectiveReconstruction {
vector<ProjectivePoint> AllPoints() const;
private:
vector<ProjectiveCamera> cameras_;
// Indexed by frame number.
typedef map<int, ProjectiveCamera> ImageToCameraMap;
ImageToCameraMap image_to_cameras_map_;
// Indexed by track.
vector<ProjectivePoint> points_;
};