Ceres: Update to the latest version

It brings all the performance improvements, bug fixes and stability improvements
which were done in the last year of Ceres development.
This commit is contained in:
Sergey Sharybin 2015-10-28 18:55:04 +05:00
parent f10db730bc
commit ced1c34f74
268 changed files with 4065 additions and 2697 deletions

View File

@ -80,7 +80,6 @@ set(SRC
internal/ceres/gradient_problem.cc
internal/ceres/gradient_problem_solver.cc
internal/ceres/implicit_schur_complement.cc
internal/ceres/incomplete_lq_factorization.cc
internal/ceres/iterative_schur_complement_solver.cc
internal/ceres/lapack.cc
internal/ceres/levenberg_marquardt_strategy.cc
@ -209,7 +208,6 @@ set(SRC
internal/ceres/graph_algorithms.h
internal/ceres/graph.h
internal/ceres/implicit_schur_complement.h
internal/ceres/incomplete_lq_factorization.h
internal/ceres/integral_types.h
internal/ceres/iterative_schur_complement_solver.h
internal/ceres/lapack.h
@ -270,6 +268,7 @@ if(WITH_LIBMV_SCHUR_SPECIALIZATIONS)
internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
@ -288,6 +287,7 @@ if(WITH_LIBMV_SCHUR_SPECIALIZATIONS)
internal/ceres/generated/schur_eliminator_2_2_d.cc
internal/ceres/generated/schur_eliminator_2_3_3.cc
internal/ceres/generated/schur_eliminator_2_3_4.cc
internal/ceres/generated/schur_eliminator_2_3_6.cc
internal/ceres/generated/schur_eliminator_2_3_9.cc
internal/ceres/generated/schur_eliminator_2_3_d.cc
internal/ceres/generated/schur_eliminator_2_4_3.cc

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# NOTE: This file is automatically generated by bundle.sh script
# If you're doing changes in this file, please update template

View File

@ -129,7 +129,7 @@ set(INC
)
set(INC_SYS
${EIGEN3_INCLUDE_DIRS}
\${EIGEN3_INCLUDE_DIRS}
)
set(SRC

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -30,13 +30,16 @@
// Default (empty) configuration options for Ceres.
//
// IMPORTANT: Most users of Ceres will not use this file, when compiling Ceres
// with CMake, CMake will configure a new config.h with the currently
// selected Ceres compile options and copy it into the source
// directory before compilation. However, for some users of Ceres
// who compile without CMake, this file ensures that Ceres will
// compile, with the user either specifying manually the Ceres
// compile options, or passing them directly through the compiler.
// IMPORTANT: Most users of Ceres will not use this file, when
// compiling Ceres with CMake, CMake will configure a new
// config.h with the currently selected Ceres compile
// options in <BUILD_DIR>/config, which will be added to
// the include path for compilation, and installed with the
// public Ceres headers. However, for some users of Ceres
// who compile without CMake (Android), this file ensures
// that Ceres will compile, with the user either specifying
// manually the Ceres compile options, or passing them
// directly through the compiler.
#ifndef CERES_PUBLIC_INTERNAL_CONFIG_H_
#define CERES_PUBLIC_INTERNAL_CONFIG_H_

View File

@ -157,8 +157,6 @@ internal/ceres/graph_algorithms.h
internal/ceres/graph.h
internal/ceres/implicit_schur_complement.cc
internal/ceres/implicit_schur_complement.h
internal/ceres/incomplete_lq_factorization.cc
internal/ceres/incomplete_lq_factorization.h
internal/ceres/integral_types.h
internal/ceres/iterative_schur_complement_solver.cc
internal/ceres/iterative_schur_complement_solver.h

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -33,9 +33,9 @@
#ifndef CERES_PUBLIC_AUTODIFF_LOCAL_PARAMETERIZATION_H_
#define CERES_PUBLIC_AUTODIFF_LOCAL_PARAMETERIZATION_H_
#include "ceres/local_parameterization.h"
#include "ceres/internal/autodiff.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/local_parameterization.h"
namespace ceres {

View File

@ -1,6 +1,6 @@
/* Ceres Solver - A fast non-linear least squares minimizer
* Copyright 2013 Google Inc. All rights reserved.
* http://code.google.com/p/ceres-solver/
* Copyright 2015 Google Inc. All rights reserved.
* http://ceres-solver.org/
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -78,7 +78,7 @@ class CERES_EXPORT ConditionedCostFunction : public CostFunction {
// functions, or not, depending on the ownership parameter. Conditioners
// may be NULL, in which case the corresponding residual is not modified.
ConditionedCostFunction(CostFunction* wrapped_cost_function,
const vector<CostFunction*>& conditioners,
const std::vector<CostFunction*>& conditioners,
Ownership ownership);
virtual ~ConditionedCostFunction();
@ -88,7 +88,7 @@ class CERES_EXPORT ConditionedCostFunction : public CostFunction {
private:
internal::scoped_ptr<CostFunction> wrapped_cost_function_;
vector<CostFunction*> conditioners_;
std::vector<CostFunction*> conditioners_;
Ownership ownership_;
};

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -115,7 +115,7 @@ class CERES_EXPORT CostFunction {
double* residuals,
double** jacobians) const = 0;
const vector<int32>& parameter_block_sizes() const {
const std::vector<int32>& parameter_block_sizes() const {
return parameter_block_sizes_;
}
@ -124,7 +124,7 @@ class CERES_EXPORT CostFunction {
}
protected:
vector<int32>* mutable_parameter_block_sizes() {
std::vector<int32>* mutable_parameter_block_sizes() {
return &parameter_block_sizes_;
}
@ -135,7 +135,7 @@ class CERES_EXPORT CostFunction {
private:
// Cost function signature metadata: number of inputs & their sizes,
// number of outputs (residuals).
vector<int32> parameter_block_sizes_;
std::vector<int32> parameter_block_sizes_;
int num_residuals_;
CERES_DISALLOW_COPY_AND_ASSIGN(CostFunction);
};

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -29,7 +29,7 @@
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// CostFunctionToFunctor is an adapter class that allows users to use
// CostFunction objects in templated functors which are to be used for
// SizedCostFunction objects in templated functors which are to be used for
// automatic differentiation. This allows the user to seamlessly mix
// analytic, numeric and automatic differentiation.
//
@ -37,7 +37,7 @@
//
// class IntrinsicProjection : public SizedCostFunction<2, 5, 3> {
// public:
// IntrinsicProjection(const double* observations);
// IntrinsicProjection(const double* observation);
// virtual bool Evaluate(double const* const* parameters,
// double* residuals,
// double** jacobians) const;
@ -62,10 +62,8 @@
// Then we can now do the following,
//
// struct CameraProjection {
// CameraProjection(double* observation) {
// intrinsic_projection_.reset(
// new CostFunctionToFunctor<2, 5, 3>(
// new IntrinsicProjection(observation_)));
// CameraProjection(const double* observation)
// : intrinsic_projection_(new IntrinsicProjection(observation)) {
// }
// template <typename T>
// bool operator()(const T* rotation,
@ -79,11 +77,11 @@
// // Note that we call intrinsic_projection_, just like it was
// // any other templated functor.
//
// return (*intrinsic_projection_)(intrinsics, transformed_point, residual);
// return intrinsic_projection_(intrinsics, transformed_point, residual);
// }
//
// private:
// scoped_ptr<CostFunctionToFunctor<2,5,3> > intrinsic_projection_;
// CostFunctionToFunctor<2,5,3> intrinsic_projection_;
// };
#ifndef CERES_PUBLIC_COST_FUNCTION_TO_FUNCTOR_H_
@ -93,6 +91,7 @@
#include <vector>
#include "ceres/cost_function.h"
#include "ceres/dynamic_cost_function_to_functor.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
@ -104,28 +103,29 @@ template <int kNumResiduals,
int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0>
class CostFunctionToFunctor {
public:
// Takes ownership of cost_function.
explicit CostFunctionToFunctor(CostFunction* cost_function)
: cost_function_(cost_function) {
: cost_functor_(cost_function) {
CHECK_NOTNULL(cost_function);
CHECK(kNumResiduals > 0 || kNumResiduals == DYNAMIC);
// This block breaks the 80 column rule to keep it somewhat readable.
CHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0)))
((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0))) // NOLINT
<< "Zero block cannot precede a non-zero block. Block sizes are "
<< "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", "
<< N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
<< N8 << ", " << N9;
const vector<int32>& parameter_block_sizes =
const std::vector<int32>& parameter_block_sizes =
cost_function->parameter_block_sizes();
const int num_parameter_blocks =
(N0 > 0) + (N1 > 0) + (N2 > 0) + (N3 > 0) + (N4 > 0) +
@ -160,7 +160,7 @@ class CostFunctionToFunctor {
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
return cost_function_->Evaluate(&x0, residuals, NULL);
return cost_functor_(&x0, residuals);
}
bool operator()(const double* x0,
@ -179,7 +179,7 @@ class CostFunctionToFunctor {
internal::FixedArray<const double*> parameter_blocks(2);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
return cost_functor_(parameter_blocks.get(), residuals);
}
bool operator()(const double* x0,
@ -200,7 +200,7 @@ class CostFunctionToFunctor {
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
return cost_functor_(parameter_blocks.get(), residuals);
}
bool operator()(const double* x0,
@ -223,7 +223,7 @@ class CostFunctionToFunctor {
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
return cost_functor_(parameter_blocks.get(), residuals);
}
bool operator()(const double* x0,
@ -248,7 +248,7 @@ class CostFunctionToFunctor {
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
parameter_blocks[4] = x4;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
return cost_functor_(parameter_blocks.get(), residuals);
}
bool operator()(const double* x0,
@ -275,7 +275,7 @@ class CostFunctionToFunctor {
parameter_blocks[3] = x3;
parameter_blocks[4] = x4;
parameter_blocks[5] = x5;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
return cost_functor_(parameter_blocks.get(), residuals);
}
bool operator()(const double* x0,
@ -304,7 +304,7 @@ class CostFunctionToFunctor {
parameter_blocks[4] = x4;
parameter_blocks[5] = x5;
parameter_blocks[6] = x6;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
return cost_functor_(parameter_blocks.get(), residuals);
}
bool operator()(const double* x0,
@ -335,7 +335,7 @@ class CostFunctionToFunctor {
parameter_blocks[5] = x5;
parameter_blocks[6] = x6;
parameter_blocks[7] = x7;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
return cost_functor_(parameter_blocks.get(), residuals);
}
bool operator()(const double* x0,
@ -368,7 +368,7 @@ class CostFunctionToFunctor {
parameter_blocks[6] = x6;
parameter_blocks[7] = x7;
parameter_blocks[8] = x8;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
return cost_functor_(parameter_blocks.get(), residuals);
}
bool operator()(const double* x0,
@ -403,7 +403,7 @@ class CostFunctionToFunctor {
parameter_blocks[7] = x7;
parameter_blocks[8] = x8;
parameter_blocks[9] = x9;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
return cost_functor_(parameter_blocks.get(), residuals);
}
template <typename JetT>
@ -418,7 +418,7 @@ class CostFunctionToFunctor {
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
return EvaluateWithJets(&x0, residuals);
return cost_functor_(&x0, residuals);
}
template <typename JetT>
@ -438,7 +438,7 @@ class CostFunctionToFunctor {
internal::FixedArray<const JetT*> jets(2);
jets[0] = x0;
jets[1] = x1;
return EvaluateWithJets(jets.get(), residuals);
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
@ -460,7 +460,7 @@ class CostFunctionToFunctor {
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
return EvaluateWithJets(jets.get(), residuals);
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
@ -484,7 +484,7 @@ class CostFunctionToFunctor {
jets[1] = x1;
jets[2] = x2;
jets[3] = x3;
return EvaluateWithJets(jets.get(), residuals);
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
@ -510,7 +510,7 @@ class CostFunctionToFunctor {
jets[2] = x2;
jets[3] = x3;
jets[4] = x4;
return EvaluateWithJets(jets.get(), residuals);
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
@ -538,7 +538,7 @@ class CostFunctionToFunctor {
jets[3] = x3;
jets[4] = x4;
jets[5] = x5;
return EvaluateWithJets(jets.get(), residuals);
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
@ -568,7 +568,7 @@ class CostFunctionToFunctor {
jets[4] = x4;
jets[5] = x5;
jets[6] = x6;
return EvaluateWithJets(jets.get(), residuals);
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
@ -600,7 +600,7 @@ class CostFunctionToFunctor {
jets[5] = x5;
jets[6] = x6;
jets[7] = x7;
return EvaluateWithJets(jets.get(), residuals);
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
@ -634,7 +634,7 @@ class CostFunctionToFunctor {
jets[6] = x6;
jets[7] = x7;
jets[8] = x8;
return EvaluateWithJets(jets.get(), residuals);
return cost_functor_(jets.get(), residuals);
}
template <typename JetT>
@ -670,79 +670,11 @@ class CostFunctionToFunctor {
jets[7] = x7;
jets[8] = x8;
jets[9] = x9;
return EvaluateWithJets(jets.get(), residuals);
return cost_functor_(jets.get(), residuals);
}
private:
template <typename JetT>
bool EvaluateWithJets(const JetT** inputs, JetT* output) const {
const int kNumParameters = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9;
const vector<int32>& parameter_block_sizes =
cost_function_->parameter_block_sizes();
const int num_parameter_blocks = parameter_block_sizes.size();
const int num_residuals = cost_function_->num_residuals();
internal::FixedArray<double> parameters(kNumParameters);
internal::FixedArray<double*> parameter_blocks(num_parameter_blocks);
internal::FixedArray<double> jacobians(num_residuals * kNumParameters);
internal::FixedArray<double*> jacobian_blocks(num_parameter_blocks);
internal::FixedArray<double> residuals(num_residuals);
// Build a set of arrays to get the residuals and jacobians from
// the CostFunction wrapped by this functor.
double* parameter_ptr = parameters.get();
double* jacobian_ptr = jacobians.get();
for (int i = 0; i < num_parameter_blocks; ++i) {
parameter_blocks[i] = parameter_ptr;
jacobian_blocks[i] = jacobian_ptr;
for (int j = 0; j < parameter_block_sizes[i]; ++j) {
*parameter_ptr++ = inputs[i][j].a;
}
jacobian_ptr += num_residuals * parameter_block_sizes[i];
}
if (!cost_function_->Evaluate(parameter_blocks.get(),
residuals.get(),
jacobian_blocks.get())) {
return false;
}
// Now that we have the incoming Jets, which are carrying the
// partial derivatives of each of the inputs w.r.t to some other
// underlying parameters. The derivative of the outputs of the
// cost function w.r.t to the same underlying parameters can now
// be computed by applying the chain rule.
//
// d output[i] d output[i] d input[j]
// -------------- = sum_j ----------- * ------------
// d parameter[k] d input[j] d parameter[k]
//
// d input[j]
// -------------- = inputs[j], so
// d parameter[k]
//
// outputJet[i] = sum_k jacobian[i][k] * inputJet[k]
//
// The following loop, iterates over the residuals, computing one
// output jet at a time.
for (int i = 0; i < num_residuals; ++i) {
output[i].a = residuals[i];
output[i].v.setZero();
for (int j = 0; j < num_parameter_blocks; ++j) {
const int32 block_size = parameter_block_sizes[j];
for (int k = 0; k < parameter_block_sizes[j]; ++k) {
output[i].v +=
jacobian_blocks[j][i * block_size + k] * inputs[j][k].v;
}
}
}
return true;
}
private:
internal::scoped_ptr<CostFunction> cost_function_;
DynamicCostFunctionToFunctor cost_functor_;
};
} // namespace ceres

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -183,7 +183,7 @@ class CovarianceImpl;
// Covariance::Options options;
// Covariance covariance(options);
//
// vector<pair<const double*, const double*> > covariance_blocks;
// std::vector<std::pair<const double*, const double*> > covariance_blocks;
// covariance_blocks.push_back(make_pair(x, x));
// covariance_blocks.push_back(make_pair(y, y));
// covariance_blocks.push_back(make_pair(x, y));
@ -353,10 +353,11 @@ class CERES_EXPORT Covariance {
// Covariance::Options for more on the conditions under which this
// function returns false.
bool Compute(
const vector<pair<const double*, const double*> >& covariance_blocks,
const std::vector<std::pair<const double*,
const double*> >& covariance_blocks,
Problem* problem);
// Return the block of the covariance matrix corresponding to
// Return the block of the cross-covariance matrix corresponding to
// parameter_block1 and parameter_block2.
//
// Compute must be called before the first call to
@ -373,6 +374,26 @@ class CERES_EXPORT Covariance {
const double* parameter_block2,
double* covariance_block) const;
// Return the block of the cross-covariance matrix corresponding to
// parameter_block1 and parameter_block2.
// Returns cross-covariance in the tangent space if a local
// parameterization is associated with either parameter block;
// else returns cross-covariance in the ambient space.
//
// Compute must be called before the first call to
// GetCovarianceBlock and the pair <parameter_block1,
// parameter_block2> OR the pair <parameter_block2,
// parameter_block1> must have been present in the vector
// covariance_blocks when Compute was called. Otherwise
// GetCovarianceBlock will return false.
//
// covariance_block must point to a memory location that can store a
// parameter_block1_local_size x parameter_block2_local_size matrix. The
// returned covariance will be a row-major matrix.
bool GetCovarianceBlockInTangentSpace(const double* parameter_block1,
const double* parameter_block2,
double* covariance_block) const;
private:
internal::scoped_ptr<internal::CovarianceImpl> impl_;
};

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -74,9 +74,9 @@ struct CERES_EXPORT CRSMatrix {
// cols = [ 1, 3, 1, 2, 3, 0, 1]
// values = [10, 4, 2, -3, 2, 1, 2]
vector<int> cols;
vector<int> rows;
vector<double> values;
std::vector<int> cols;
std::vector<int> rows;
std::vector<double> values;
};
} // namespace ceres

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -120,18 +120,18 @@ class DynamicAutoDiffCostFunction : public CostFunction {
0);
// Allocate scratch space for the strided evaluation.
vector<Jet<double, Stride> > input_jets(num_parameters);
vector<Jet<double, Stride> > output_jets(num_residuals());
std::vector<Jet<double, Stride> > input_jets(num_parameters);
std::vector<Jet<double, Stride> > output_jets(num_residuals());
// Make the parameter pack that is sent to the functor (reused).
vector<Jet<double, Stride>* > jet_parameters(num_parameter_blocks,
std::vector<Jet<double, Stride>* > jet_parameters(num_parameter_blocks,
static_cast<Jet<double, Stride>* >(NULL));
int num_active_parameters = 0;
// To handle constant parameters between non-constant parameter blocks, the
// start position --- a raw parameter index --- of each contiguous block of
// non-constant parameters is recorded in start_derivative_section.
vector<int> start_derivative_section;
std::vector<int> start_derivative_section;
bool in_derivative_section = false;
int parameter_cursor = 0;

View File

@ -0,0 +1,190 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
// dgossow@google.com (David Gossow)
//
// DynamicCostFunctionToFunctor allows users to use CostFunction
// objects in templated functors which are to be used for automatic
// differentiation. It works similar to CostFunctionToFunctor, with the
// difference that it allows you to wrap a cost function with dynamic numbers
// of parameters and residuals.
//
// For example, let us assume that
//
// class IntrinsicProjection : public CostFunction {
// public:
// IntrinsicProjection(const double* observation);
// virtual bool Evaluate(double const* const* parameters,
// double* residuals,
// double** jacobians) const;
// };
//
// is a cost function that implements the projection of a point in its
// local coordinate system onto its image plane and subtracts it from
// the observed point projection. It can compute its residual and
// either via analytic or numerical differentiation can compute its
// jacobians. The intrinsics are passed in as parameters[0] and the point as
// parameters[1].
//
// Now we would like to compose the action of this CostFunction with
// the action of camera extrinsics, i.e., rotation and
// translation. Say we have a templated function
//
// template<typename T>
// void RotateAndTranslatePoint(double const* const* parameters,
// double* residuals);
//
// Then we can now do the following,
//
// struct CameraProjection {
// CameraProjection(const double* observation)
// : intrinsic_projection_.(new IntrinsicProjection(observation)) {
// }
// template <typename T>
// bool operator()(T const* const* parameters,
// T* residual) const {
// const T* rotation = parameters[0];
// const T* translation = parameters[1];
// const T* intrinsics = parameters[2];
// const T* point = parameters[3];
// T transformed_point[3];
// RotateAndTranslatePoint(rotation, translation, point, transformed_point);
//
// // Note that we call intrinsic_projection_, just like it was
// // any other templated functor.
// const T* projection_parameters[2];
// projection_parameters[0] = intrinsics;
// projection_parameters[1] = transformed_point;
// return intrinsic_projection_(projection_parameters, residual);
// }
//
// private:
// DynamicCostFunctionToFunctor intrinsic_projection_;
// };
#ifndef CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_
#define CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_
#include <numeric>
#include <vector>
#include "ceres/cost_function.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
namespace ceres {
class DynamicCostFunctionToFunctor {
public:
// Takes ownership of cost_function.
explicit DynamicCostFunctionToFunctor(CostFunction* cost_function)
: cost_function_(cost_function) {
CHECK_NOTNULL(cost_function);
}
bool operator()(double const* const* parameters, double* residuals) const {
return cost_function_->Evaluate(parameters, residuals, NULL);
}
template <typename JetT>
bool operator()(JetT const* const* inputs, JetT* output) const {
const std::vector<int32>& parameter_block_sizes =
cost_function_->parameter_block_sizes();
const int num_parameter_blocks = parameter_block_sizes.size();
const int num_residuals = cost_function_->num_residuals();
const int num_parameters = std::accumulate(parameter_block_sizes.begin(),
parameter_block_sizes.end(), 0);
internal::FixedArray<double> parameters(num_parameters);
internal::FixedArray<double*> parameter_blocks(num_parameter_blocks);
internal::FixedArray<double> jacobians(num_residuals * num_parameters);
internal::FixedArray<double*> jacobian_blocks(num_parameter_blocks);
internal::FixedArray<double> residuals(num_residuals);
// Build a set of arrays to get the residuals and jacobians from
// the CostFunction wrapped by this functor.
double* parameter_ptr = parameters.get();
double* jacobian_ptr = jacobians.get();
for (int i = 0; i < num_parameter_blocks; ++i) {
parameter_blocks[i] = parameter_ptr;
jacobian_blocks[i] = jacobian_ptr;
for (int j = 0; j < parameter_block_sizes[i]; ++j) {
*parameter_ptr++ = inputs[i][j].a;
}
jacobian_ptr += num_residuals * parameter_block_sizes[i];
}
if (!cost_function_->Evaluate(parameter_blocks.get(),
residuals.get(),
jacobian_blocks.get())) {
return false;
}
// Now that we have the incoming Jets, which are carrying the
// partial derivatives of each of the inputs w.r.t to some other
// underlying parameters. The derivative of the outputs of the
// cost function w.r.t to the same underlying parameters can now
// be computed by applying the chain rule.
//
// d output[i] d output[i] d input[j]
// -------------- = sum_j ----------- * ------------
// d parameter[k] d input[j] d parameter[k]
//
// d input[j]
// -------------- = inputs[j], so
// d parameter[k]
//
// outputJet[i] = sum_k jacobian[i][k] * inputJet[k]
//
// The following loop, iterates over the residuals, computing one
// output jet at a time.
for (int i = 0; i < num_residuals; ++i) {
output[i].a = residuals[i];
output[i].v.setZero();
for (int j = 0; j < num_parameter_blocks; ++j) {
const int32 block_size = parameter_block_sizes[j];
for (int k = 0; k < parameter_block_sizes[j]; ++k) {
output[i].v +=
jacobian_blocks[j][i * block_size + k] * inputs[j][k].v;
}
}
}
return true;
}
private:
internal::scoped_ptr<CostFunction> cost_function_;
};
} // namespace ceres
#endif // CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -29,6 +29,7 @@
// Author: mierle@gmail.com (Keir Mierle)
// sameeragarwal@google.com (Sameer Agarwal)
// thadh@gmail.com (Thad Hughes)
// tbennun@gmail.com (Tal Ben-Nun)
//
// This numeric diff implementation differs from the one found in
// numeric_diff_cost_function.h by supporting numericdiff on cost
@ -41,7 +42,6 @@
// numeric diff; the expected interface for the cost functors is:
//
// struct MyCostFunctor {
// template<typename T>
// bool operator()(double const* const* parameters, double* residuals) const {
// // Use parameters[i] to access the i'th parameter block.
// }
@ -68,19 +68,37 @@
#include "ceres/internal/scoped_ptr.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/numeric_diff.h"
#include "ceres/numeric_diff_options.h"
#include "glog/logging.h"
namespace ceres {
template <typename CostFunctor, NumericDiffMethod method = CENTRAL>
template <typename CostFunctor, NumericDiffMethodType method = CENTRAL>
class DynamicNumericDiffCostFunction : public CostFunction {
public:
explicit DynamicNumericDiffCostFunction(const CostFunctor* functor,
Ownership ownership = TAKE_OWNERSHIP,
double relative_step_size = 1e-6)
explicit DynamicNumericDiffCostFunction(
const CostFunctor* functor,
Ownership ownership = TAKE_OWNERSHIP,
const NumericDiffOptions& options = NumericDiffOptions())
: functor_(functor),
ownership_(ownership),
relative_step_size_(relative_step_size) {
options_(options) {
}
// Deprecated. New users should avoid using this constructor. Instead, use the
// constructor with NumericDiffOptions.
DynamicNumericDiffCostFunction(
const CostFunctor* functor,
Ownership ownership,
double relative_step_size)
: functor_(functor),
ownership_(ownership),
options_() {
LOG(WARNING) << "This constructor is deprecated and will be removed in "
"a future version. Please use the NumericDiffOptions "
"constructor instead.";
options_.relative_step_size = relative_step_size;
}
virtual ~DynamicNumericDiffCostFunction() {
@ -100,11 +118,12 @@ class DynamicNumericDiffCostFunction : public CostFunction {
virtual bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const {
using internal::NumericDiff;
CHECK_GT(num_residuals(), 0)
<< "You must call DynamicNumericDiffCostFunction::SetNumResiduals() "
<< "before DynamicNumericDiffCostFunction::Evaluate().";
const vector<int32>& block_sizes = parameter_block_sizes();
const std::vector<int32>& block_sizes = parameter_block_sizes();
CHECK(!block_sizes.empty())
<< "You must call DynamicNumericDiffCostFunction::AddParameterBlock() "
<< "before DynamicNumericDiffCostFunction::Evaluate().";
@ -116,8 +135,8 @@ class DynamicNumericDiffCostFunction : public CostFunction {
// Create local space for a copy of the parameters which will get mutated.
int parameters_size = accumulate(block_sizes.begin(), block_sizes.end(), 0);
vector<double> parameters_copy(parameters_size);
vector<double*> parameters_references_copy(block_sizes.size());
std::vector<double> parameters_copy(parameters_size);
std::vector<double*> parameters_references_copy(block_sizes.size());
parameters_references_copy[0] = &parameters_copy[0];
for (int block = 1; block < block_sizes.size(); ++block) {
parameters_references_copy[block] = parameters_references_copy[block - 1]
@ -133,12 +152,18 @@ class DynamicNumericDiffCostFunction : public CostFunction {
for (int block = 0; block < block_sizes.size(); ++block) {
if (jacobians[block] != NULL &&
!EvaluateJacobianForParameterBlock(block_sizes[block],
block,
relative_step_size_,
!NumericDiff<CostFunctor, method, DYNAMIC,
DYNAMIC, DYNAMIC, DYNAMIC, DYNAMIC, DYNAMIC,
DYNAMIC, DYNAMIC, DYNAMIC, DYNAMIC, DYNAMIC,
DYNAMIC, DYNAMIC>::EvaluateJacobianForParameterBlock(
functor_.get(),
residuals,
options_,
this->num_residuals(),
block,
block_sizes[block],
&parameters_references_copy[0],
jacobians)) {
jacobians[block])) {
return false;
}
}
@ -146,91 +171,6 @@ class DynamicNumericDiffCostFunction : public CostFunction {
}
private:
bool EvaluateJacobianForParameterBlock(const int parameter_block_size,
const int parameter_block,
const double relative_step_size,
double const* residuals_at_eval_point,
double** parameters,
double** jacobians) const {
using Eigen::Map;
using Eigen::Matrix;
using Eigen::Dynamic;
using Eigen::RowMajor;
typedef Matrix<double, Dynamic, 1> ResidualVector;
typedef Matrix<double, Dynamic, 1> ParameterVector;
typedef Matrix<double, Dynamic, Dynamic, RowMajor> JacobianMatrix;
int num_residuals = this->num_residuals();
Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block],
num_residuals,
parameter_block_size);
// Mutate one element at a time and then restore.
Map<ParameterVector> x_plus_delta(parameters[parameter_block],
parameter_block_size);
ParameterVector x(x_plus_delta);
ParameterVector step_size = x.array().abs() * relative_step_size;
// To handle cases where a paremeter is exactly zero, instead use
// the mean step_size for the other dimensions.
double fallback_step_size = step_size.sum() / step_size.rows();
if (fallback_step_size == 0.0) {
// If all the parameters are zero, there's no good answer. Use the given
// relative step_size as absolute step_size and hope for the best.
fallback_step_size = relative_step_size;
}
// For each parameter in the parameter block, use finite
// differences to compute the derivative for that parameter.
for (int j = 0; j < parameter_block_size; ++j) {
if (step_size(j) == 0.0) {
// The parameter is exactly zero, so compromise and use the
// mean step_size from the other parameters. This can break in
// many cases, but it's hard to pick a good number without
// problem specific knowledge.
step_size(j) = fallback_step_size;
}
x_plus_delta(j) = x(j) + step_size(j);
ResidualVector residuals(num_residuals);
if (!EvaluateCostFunctor(parameters, &residuals[0])) {
// Something went wrong; bail.
return false;
}
// Compute this column of the jacobian in 3 steps:
// 1. Store residuals for the forward part.
// 2. Subtract residuals for the backward (or 0) part.
// 3. Divide out the run.
parameter_jacobian.col(j).matrix() = residuals;
double one_over_h = 1 / step_size(j);
if (method == CENTRAL) {
// Compute the function on the other side of x(j).
x_plus_delta(j) = x(j) - step_size(j);
if (!EvaluateCostFunctor(parameters, &residuals[0])) {
// Something went wrong; bail.
return false;
}
parameter_jacobian.col(j) -= residuals;
one_over_h /= 2;
} else {
// Forward difference only; reuse existing residuals evaluation.
parameter_jacobian.col(j) -=
Map<const ResidualVector>(residuals_at_eval_point, num_residuals);
}
x_plus_delta(j) = x(j); // Restore x_plus_delta.
// Divide out the run to get slope.
parameter_jacobian.col(j) *= one_over_h;
}
return true;
}
bool EvaluateCostFunctor(double const* const* parameters,
double* residuals) const {
return EvaluateCostFunctorImpl(functor_.get(),
@ -257,7 +197,7 @@ class DynamicNumericDiffCostFunction : public CostFunction {
internal::scoped_ptr<const CostFunctor> functor_;
Ownership ownership_;
const double relative_step_size_;
NumericDiffOptions options_;
};
} // namespace ceres

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -50,26 +50,9 @@ namespace ceres {
inline bool IsFinite (double x) { return _finite(x) != 0; }
inline bool IsInfinite(double x) { return _finite(x) == 0 && _isnan(x) == 0; }
inline bool IsNaN (double x) { return _isnan(x) != 0; }
inline bool IsNormal (double x) {
int classification = _fpclass(x);
return classification == _FPCLASS_NN ||
classification == _FPCLASS_PN;
}
#elif defined(ANDROID) && defined(_STLPORT_VERSION)
// On Android, when using the STLPort, the C++ isnan and isnormal functions
// are defined as macros.
inline bool IsNaN (double x) { return isnan(x); }
inline bool IsNormal (double x) { return isnormal(x); }
// On Android NDK r6, when using STLPort, the isinf and isfinite functions are
// not available, so reimplement them.
inline bool IsInfinite(double x) {
return x == std::numeric_limits<double>::infinity() ||
x == -std::numeric_limits<double>::infinity();
}
inline bool IsFinite(double x) {
return !isnan(x) && !IsInfinite(x);
inline bool IsNormal (double x) { // NOLINT
const int classification = _fpclass(x);
return (classification == _FPCLASS_NN || classification == _FPCLASS_PN);
}
# else

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -78,10 +78,10 @@ class GradientChecker {
// block.
// Derivatives as computed by the cost function.
vector<Matrix> term_jacobians;
std::vector<Matrix> term_jacobians;
// Derivatives as computed by finite differencing.
vector<Matrix> finite_difference_jacobians;
std::vector<Matrix> finite_difference_jacobians;
// Infinity-norm of term_jacobians - finite_difference_jacobians.
double error_jacobians;
@ -119,7 +119,7 @@ class GradientChecker {
// Do a consistency check between the term and the template parameters.
CHECK_EQ(M, term->num_residuals());
const int num_residuals = M;
const vector<int32>& block_sizes = term->parameter_block_sizes();
const std::vector<int32>& block_sizes = term->parameter_block_sizes();
const int num_blocks = block_sizes.size();
CHECK_LE(num_blocks, 5) << "Unable to test functions that take more "

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -81,7 +81,7 @@ class CERES_EXPORT GradientProblemSolver {
// Returns true if the options struct has a valid
// configuration. Returns false otherwise, and fills in *error
// with a message describing the problem.
bool IsValid(string* error) const;
bool IsValid(std::string* error) const;
// Minimizer options ----------------------------------------
LineSearchDirectionType line_search_direction_type;
@ -261,7 +261,7 @@ class CERES_EXPORT GradientProblemSolver {
// executed, then set update_state_every_iteration to true.
//
// The solver does NOT take ownership of these pointers.
vector<IterationCallback*> callbacks;
std::vector<IterationCallback*> callbacks;
};
struct CERES_EXPORT Summary {
@ -269,11 +269,11 @@ class CERES_EXPORT GradientProblemSolver {
// A brief one line description of the state of the solver after
// termination.
string BriefReport() const;
std::string BriefReport() const;
// A full multiline description of the state of the solver after
// termination.
string FullReport() const;
std::string FullReport() const;
bool IsSolutionUsable() const;
@ -281,7 +281,7 @@ class CERES_EXPORT GradientProblemSolver {
TerminationType termination_type;
// Reason why the solver terminated.
string message;
std::string message;
// Cost of the problem (value of the objective function) before
// the optimization.
@ -292,7 +292,7 @@ class CERES_EXPORT GradientProblemSolver {
double final_cost;
// IterationSummary for each minimizer iteration in order.
vector<IterationSummary> iterations;
std::vector<IterationSummary> iterations;
// Sum total of all time spent inside Ceres when Solve is called.
double total_time_in_seconds;
@ -303,6 +303,10 @@ class CERES_EXPORT GradientProblemSolver {
// Time (in seconds) spent evaluating the gradient.
double gradient_evaluation_time_in_seconds;
// Time (in seconds) spent minimizing the interpolating polynomial
// to compute the next candidate step size as part of a line search.
double line_search_polynomial_minimization_time_in_seconds;
// Number of parameters in the probem.
int num_parameters;

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -214,15 +214,15 @@ struct AutoDiff {
// This block breaks the 80 column rule to keep it somewhat readable.
DCHECK_GT(num_outputs, 0);
DCHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0)))
((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0))) // NOLINT
<< "Zero block cannot precede a non-zero block. Block sizes are "
<< "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", "
<< N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -28,8 +28,9 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
// mierle@gmail.com (Keir Mierle)
// tbennun@gmail.com (Tal Ben-Nun)
//
// Finite differencing routine used by NumericDiffCostFunction.
// Finite differencing routines used by NumericDiffCostFunction.
#ifndef CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_
#define CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_
@ -37,9 +38,12 @@
#include <cstring>
#include "Eigen/Dense"
#include "Eigen/StdVector"
#include "ceres/cost_function.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/internal/variadic_evaluate.h"
#include "ceres/numeric_diff_options.h"
#include "ceres/types.h"
#include "glog/logging.h"
@ -78,7 +82,7 @@ bool EvaluateImpl(const CostFunctor* functor,
// specializations for member functions. The alternative is to repeat the main
// class for differing numbers of parameters, which is also unfortunate.
template <typename CostFunctor,
NumericDiffMethod kMethod,
NumericDiffMethodType kMethod,
int kNumResiduals,
int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7, int N8, int N9,
@ -88,9 +92,11 @@ struct NumericDiff {
// Mutates parameters but must restore them before return.
static bool EvaluateJacobianForParameterBlock(
const CostFunctor* functor,
double const* residuals_at_eval_point,
const double relative_step_size,
const double* residuals_at_eval_point,
const NumericDiffOptions& options,
int num_residuals,
int parameter_block_index,
int parameter_block_size,
double **parameters,
double *jacobian) {
using Eigen::Map;
@ -98,8 +104,14 @@ struct NumericDiff {
using Eigen::RowMajor;
using Eigen::ColMajor;
const int NUM_RESIDUALS =
const int num_residuals_internal =
(kNumResiduals != ceres::DYNAMIC ? kNumResiduals : num_residuals);
const int parameter_block_index_internal =
(kParameterBlock != ceres::DYNAMIC ? kParameterBlock :
parameter_block_index);
const int parameter_block_size_internal =
(kParameterBlockSize != ceres::DYNAMIC ? kParameterBlockSize :
parameter_block_size);
typedef Matrix<double, kNumResiduals, 1> ResidualVector;
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
@ -115,73 +127,288 @@ struct NumericDiff {
JacobianMatrix;
Map<JacobianMatrix> parameter_jacobian(jacobian,
NUM_RESIDUALS,
kParameterBlockSize);
num_residuals_internal,
parameter_block_size_internal);
// Mutate 1 element at a time and then restore.
Map<ParameterVector> x_plus_delta(parameters[kParameterBlock],
kParameterBlockSize);
Map<ParameterVector> x_plus_delta(
parameters[parameter_block_index_internal],
parameter_block_size_internal);
ParameterVector x(x_plus_delta);
ParameterVector step_size = x.array().abs() * relative_step_size;
ParameterVector step_size = x.array().abs() *
((kMethod == RIDDERS) ? options.ridders_relative_initial_step_size :
options.relative_step_size);
// To handle cases where a parameter is exactly zero, instead use
// the mean step_size for the other dimensions. If all the
// parameters are zero, there's no good answer. Take
// relative_step_size as a guess and hope for the best.
const double fallback_step_size =
(step_size.sum() == 0)
? relative_step_size
: step_size.sum() / step_size.rows();
// It is not a good idea to make the step size arbitrarily
// small. This will lead to problems with round off and numerical
// instability when dividing by the step size. The general
// recommendation is to not go down below sqrt(epsilon).
double min_step_size = std::sqrt(std::numeric_limits<double>::epsilon());
// For Ridders' method, the initial step size is required to be large,
// thus ridders_relative_initial_step_size is used.
if (kMethod == RIDDERS) {
min_step_size = std::max(min_step_size,
options.ridders_relative_initial_step_size);
}
// For each parameter in the parameter block, use finite differences to
// compute the derivative for that parameter.
FixedArray<double> temp_residual_array(num_residuals_internal);
FixedArray<double> residual_array(num_residuals_internal);
Map<ResidualVector> residuals(residual_array.get(),
num_residuals_internal);
ResidualVector residuals(NUM_RESIDUALS);
for (int j = 0; j < kParameterBlockSize; ++j) {
const double delta =
(step_size(j) == 0.0) ? fallback_step_size : step_size(j);
for (int j = 0; j < parameter_block_size_internal; ++j) {
const double delta = std::max(min_step_size, step_size(j));
x_plus_delta(j) = x(j) + delta;
if (kMethod == RIDDERS) {
if (!EvaluateRiddersJacobianColumn(functor, j, delta,
options,
num_residuals_internal,
parameter_block_size_internal,
x.data(),
residuals_at_eval_point,
parameters,
x_plus_delta.data(),
temp_residual_array.get(),
residual_array.get())) {
return false;
}
} else {
if (!EvaluateJacobianColumn(functor, j, delta,
num_residuals_internal,
parameter_block_size_internal,
x.data(),
residuals_at_eval_point,
parameters,
x_plus_delta.data(),
temp_residual_array.get(),
residual_array.get())) {
return false;
}
}
parameter_jacobian.col(j).matrix() = residuals;
}
return true;
}
static bool EvaluateJacobianColumn(const CostFunctor* functor,
int parameter_index,
double delta,
int num_residuals,
int parameter_block_size,
const double* x_ptr,
const double* residuals_at_eval_point,
double** parameters,
double* x_plus_delta_ptr,
double* temp_residuals_ptr,
double* residuals_ptr) {
using Eigen::Map;
using Eigen::Matrix;
typedef Matrix<double, kNumResiduals, 1> ResidualVector;
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
Map<const ParameterVector> x(x_ptr, parameter_block_size);
Map<ParameterVector> x_plus_delta(x_plus_delta_ptr,
parameter_block_size);
Map<ResidualVector> residuals(residuals_ptr, num_residuals);
Map<ResidualVector> temp_residuals(temp_residuals_ptr, num_residuals);
// Mutate 1 element at a time and then restore.
x_plus_delta(parameter_index) = x(parameter_index) + delta;
if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
functor, parameters, residuals.data(), functor)) {
return false;
}
// Compute this column of the jacobian in 3 steps:
// 1. Store residuals for the forward part.
// 2. Subtract residuals for the backward (or 0) part.
// 3. Divide out the run.
double one_over_delta = 1.0 / delta;
if (kMethod == CENTRAL || kMethod == RIDDERS) {
// Compute the function on the other side of x(parameter_index).
x_plus_delta(parameter_index) = x(parameter_index) - delta;
if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
functor, parameters, residuals.data(), functor)) {
functor, parameters, temp_residuals.data(), functor)) {
return false;
}
// Compute this column of the jacobian in 3 steps:
// 1. Store residuals for the forward part.
// 2. Subtract residuals for the backward (or 0) part.
// 3. Divide out the run.
parameter_jacobian.col(j) = residuals;
residuals -= temp_residuals;
one_over_delta /= 2;
} else {
// Forward difference only; reuse existing residuals evaluation.
residuals -=
Map<const ResidualVector>(residuals_at_eval_point,
num_residuals);
}
double one_over_delta = 1.0 / delta;
if (kMethod == CENTRAL) {
// Compute the function on the other side of x(j).
x_plus_delta(j) = x(j) - delta;
// Restore x_plus_delta.
x_plus_delta(parameter_index) = x(parameter_index);
if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
functor, parameters, residuals.data(), functor)) {
return false;
}
// Divide out the run to get slope.
residuals *= one_over_delta;
parameter_jacobian.col(j) -= residuals;
one_over_delta /= 2;
} else {
// Forward difference only; reuse existing residuals evaluation.
parameter_jacobian.col(j) -=
Map<const ResidualVector>(residuals_at_eval_point, NUM_RESIDUALS);
return true;
}
// This numeric difference implementation uses adaptive differentiation
// on the parameters to obtain the Jacobian matrix. The adaptive algorithm
// is based on Ridders' method for adaptive differentiation, which creates
// a Romberg tableau from varying step sizes and extrapolates the
// intermediate results to obtain the current computational error.
//
// References:
// C.J.F. Ridders, Accurate computation of F'(x) and F'(x) F"(x), Advances
// in Engineering Software (1978), Volume 4, Issue 2, April 1982,
// Pages 75-76, ISSN 0141-1195,
// http://dx.doi.org/10.1016/S0141-1195(82)80057-0.
static bool EvaluateRiddersJacobianColumn(
const CostFunctor* functor,
int parameter_index,
double delta,
const NumericDiffOptions& options,
int num_residuals,
int parameter_block_size,
const double* x_ptr,
const double* residuals_at_eval_point,
double** parameters,
double* x_plus_delta_ptr,
double* temp_residuals_ptr,
double* residuals_ptr) {
using Eigen::Map;
using Eigen::Matrix;
using Eigen::aligned_allocator;
typedef Matrix<double, kNumResiduals, 1> ResidualVector;
typedef Matrix<double, kNumResiduals, Eigen::Dynamic> ResidualCandidateMatrix;
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
Map<const ParameterVector> x(x_ptr, parameter_block_size);
Map<ParameterVector> x_plus_delta(x_plus_delta_ptr,
parameter_block_size);
Map<ResidualVector> residuals(residuals_ptr, num_residuals);
Map<ResidualVector> temp_residuals(temp_residuals_ptr, num_residuals);
// In order for the algorithm to converge, the step size should be
// initialized to a value that is large enough to produce a significant
// change in the function.
// As the derivative is estimated, the step size decreases.
// By default, the step sizes are chosen so that the middle column
// of the Romberg tableau uses the input delta.
double current_step_size = delta *
pow(options.ridders_step_shrink_factor,
options.max_num_ridders_extrapolations / 2);
// Double-buffering temporary differential candidate vectors
// from previous step size.
ResidualCandidateMatrix stepsize_candidates_a(
num_residuals,
options.max_num_ridders_extrapolations);
ResidualCandidateMatrix stepsize_candidates_b(
num_residuals,
options.max_num_ridders_extrapolations);
ResidualCandidateMatrix* current_candidates = &stepsize_candidates_a;
ResidualCandidateMatrix* previous_candidates = &stepsize_candidates_b;
// Represents the computational error of the derivative. This variable is
// initially set to a large value, and is set to the difference between
// current and previous finite difference extrapolations.
// norm_error is supposed to decrease as the finite difference tableau
// generation progresses, serving both as an estimate for differentiation
// error and as a measure of differentiation numerical stability.
double norm_error = std::numeric_limits<double>::max();
// Loop over decreasing step sizes until:
// 1. Error is smaller than a given value (ridders_epsilon),
// 2. Maximal order of extrapolation reached, or
// 3. Extrapolation becomes numerically unstable.
for (int i = 0; i < options.max_num_ridders_extrapolations; ++i) {
// Compute the numerical derivative at this step size.
if (!EvaluateJacobianColumn(functor, parameter_index, current_step_size,
num_residuals,
parameter_block_size,
x.data(),
residuals_at_eval_point,
parameters,
x_plus_delta.data(),
temp_residuals.data(),
current_candidates->col(0).data())) {
// Something went wrong; bail.
return false;
}
x_plus_delta(j) = x(j); // Restore x_plus_delta.
// Divide out the run to get slope.
parameter_jacobian.col(j) *= one_over_delta;
// Store initial results.
if (i == 0) {
residuals = current_candidates->col(0);
}
// Shrink differentiation step size.
current_step_size /= options.ridders_step_shrink_factor;
// Extrapolation factor for Richardson acceleration method (see below).
double richardson_factor = options.ridders_step_shrink_factor *
options.ridders_step_shrink_factor;
for (int k = 1; k <= i; ++k) {
// Extrapolate the various orders of finite differences using
// the Richardson acceleration method.
current_candidates->col(k) =
(richardson_factor * current_candidates->col(k - 1) -
previous_candidates->col(k - 1)) / (richardson_factor - 1.0);
richardson_factor *= options.ridders_step_shrink_factor *
options.ridders_step_shrink_factor;
// Compute the difference between the previous value and the current.
double candidate_error = std::max(
(current_candidates->col(k) -
current_candidates->col(k - 1)).norm(),
(current_candidates->col(k) -
previous_candidates->col(k - 1)).norm());
// If the error has decreased, update results.
if (candidate_error <= norm_error) {
norm_error = candidate_error;
residuals = current_candidates->col(k);
// If the error is small enough, stop.
if (norm_error < options.ridders_epsilon) {
break;
}
}
}
// After breaking out of the inner loop, declare convergence.
if (norm_error < options.ridders_epsilon) {
break;
}
// Check to see if the current gradient estimate is numerically unstable.
// If so, bail out and return the last stable result.
if (i > 0) {
double tableau_error = (current_candidates->col(i) -
previous_candidates->col(i - 1)).norm();
// Compare current error to the chosen candidate's error.
if (tableau_error >= 2 * norm_error) {
break;
}
}
std::swap(current_candidates, previous_candidates);
}
return true;
}
};
template <typename CostFunctor,
NumericDiffMethod kMethod,
NumericDiffMethodType kMethod,
int kNumResiduals,
int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7, int N8, int N9,
@ -192,11 +419,22 @@ struct NumericDiff<CostFunctor, kMethod, kNumResiduals,
// Mutates parameters but must restore them before return.
static bool EvaluateJacobianForParameterBlock(
const CostFunctor* functor,
double const* residuals_at_eval_point,
const double relative_step_size,
const double* residuals_at_eval_point,
const NumericDiffOptions& options,
const int num_residuals,
const int parameter_block_index,
const int parameter_block_size,
double **parameters,
double *jacobian) {
// Silence unused parameter compiler warnings.
(void)functor;
(void)residuals_at_eval_point;
(void)options;
(void)num_residuals;
(void)parameter_block_index;
(void)parameter_block_size;
(void)parameters;
(void)jacobian;
LOG(FATAL) << "Control should never reach here.";
return true;
}

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -34,8 +34,6 @@
// This file needs to compile as c code.
#ifdef __cplusplus
#include <string>
#include "ceres/internal/config.h"
#if defined(CERES_TR1_MEMORY_HEADER)
@ -46,16 +44,6 @@
namespace ceres {
// It is unfortunate that this import of the entire standard namespace is
// necessary. The reasons are historical and won't be explained here, but
// suffice to say it is not a mistake and can't be removed without breaking
// things outside of the Ceres optimization package.
using namespace std;
// This is necessary to properly handle the case that there is a different
// "string" implementation in the global namespace.
using std::string;
#if defined(CERES_TR1_SHARED_PTR)
using std::tr1::shared_ptr;
#else

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -35,6 +35,7 @@
#include <stddef.h>
#include "ceres/jet.h"
#include "ceres/types.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
#include "glog/logging.h"
@ -176,6 +177,17 @@ struct VariadicEvaluate<Functor, T, N0, 0, 0, 0, 0, 0, 0, 0, 0, 0> {
}
};
// Template instantiation for dynamically-sized functors.
template<typename Functor, typename T>
struct VariadicEvaluate<Functor, T, ceres::DYNAMIC, ceres::DYNAMIC,
ceres::DYNAMIC, ceres::DYNAMIC, ceres::DYNAMIC,
ceres::DYNAMIC, ceres::DYNAMIC, ceres::DYNAMIC,
ceres::DYNAMIC, ceres::DYNAMIC> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input, output);
}
};
} // namespace internal
} // namespace ceres

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -106,8 +106,8 @@
// Jet<double, 2> y(1); // Pick the 1st dual number for y.
// Jet<double, 2> z = f(x, y);
//
// LOG(INFO) << "df/dx = " << z.a[0]
// << "df/dy = " << z.a[1];
// LOG(INFO) << "df/dx = " << z.v[0]
// << "df/dy = " << z.v[1];
//
// Most users should not use Jet objects directly; a wrapper around Jet objects,
// which makes computing the derivative, gradient, or jacobian of templated
@ -482,6 +482,41 @@ Jet<T, N> tanh(const Jet<T, N>& f) {
return Jet<T, N>(tanh_a, tmp * f.v);
}
// Bessel functions of the first kind with integer order equal to 0, 1, n.
inline double BesselJ0(double x) { return j0(x); }
inline double BesselJ1(double x) { return j1(x); }
inline double BesselJn(int n, double x) { return jn(n, x); }
// For the formulae of the derivatives of the Bessel functions see the book:
// Olver, Lozier, Boisvert, Clark, NIST Handbook of Mathematical Functions,
// Cambridge University Press 2010.
//
// Formulae are also available at http://dlmf.nist.gov
// See formula http://dlmf.nist.gov/10.6#E3
// j0(a + h) ~= j0(a) - j1(a) h
template <typename T, int N> inline
Jet<T, N> BesselJ0(const Jet<T, N>& f) {
return Jet<T, N>(BesselJ0(f.a),
-BesselJ1(f.a) * f.v);
}
// See formula http://dlmf.nist.gov/10.6#E1
// j1(a + h) ~= j1(a) + 0.5 ( j0(a) - j2(a) ) h
template <typename T, int N> inline
Jet<T, N> BesselJ1(const Jet<T, N>& f) {
return Jet<T, N>(BesselJ1(f.a),
T(0.5) * (BesselJ0(f.a) - BesselJn(2, f.a)) * f.v);
}
// See formula http://dlmf.nist.gov/10.6#E1
// j_n(a + h) ~= j_n(a) + 0.5 ( j_{n-1}(a) - j_{n+1}(a) ) h
template <typename T, int N> inline
Jet<T, N> BesselJn(int n, const Jet<T, N>& f) {
return Jet<T, N>(BesselJn(n, f.a),
T(0.5) * (BesselJn(n - 1, f.a) - BesselJn(n + 1, f.a)) * f.v);
}
// Jet Classification. It is not clear what the appropriate semantics are for
// these classifications. This picks that IsFinite and isnormal are "all"
// operations, i.e. all elements of the jet must be finite for the jet itself
@ -573,22 +608,101 @@ Jet<T, N> pow(const Jet<T, N>& f, double g) {
}
// pow -- base is a constant, exponent is a differentiable function.
// (a)^(p+dp) ~= a^p + a^p log(a) dp
// We have various special cases, see the comment for pow(Jet, Jet) for
// analysis:
//
// 1. For f > 0 we have: (f)^(g + dg) ~= f^g + f^g log(f) dg
//
// 2. For f == 0 and g > 0 we have: (f)^(g + dg) ~= f^g
//
// 3. For f < 0 and integer g we have: (f)^(g + dg) ~= f^g but if dg
// != 0, the derivatives are not defined and we return NaN.
template <typename T, int N> inline
Jet<T, N> pow(double f, const Jet<T, N>& g) {
if (f == 0 && g.a > 0) {
// Handle case 2.
return Jet<T, N>(T(0.0));
}
if (f < 0 && g.a == floor(g.a)) {
// Handle case 3.
Jet<T, N> ret(pow(f, g.a));
for (int i = 0; i < N; i++) {
if (g.v[i] != T(0.0)) {
// Return a NaN when g.v != 0.
ret.v[i] = std::numeric_limits<T>::quiet_NaN();
}
}
return ret;
}
// Handle case 1.
T const tmp = pow(f, g.a);
return Jet<T, N>(tmp, log(f) * tmp * g.v);
}
// pow -- both base and exponent are differentiable functions. This has a
// variety of special cases that require careful handling.
//
// 1. For f > 0:
// (f + df)^(g + dg) ~= f^g + f^(g - 1) * (g * df + f * log(f) * dg)
// The numerical evaluation of f * log(f) for f > 0 is well behaved, even for
// extremely small values (e.g. 1e-99).
//
// 2. For f == 0 and g > 1: (f + df)^(g + dg) ~= 0
// This cases is needed because log(0) can not be evaluated in the f > 0
// expression. However the function f*log(f) is well behaved around f == 0
// and its limit as f-->0 is zero.
//
// 3. For f == 0 and g == 1: (f + df)^(g + dg) ~= 0 + df
//
// 4. For f == 0 and 0 < g < 1: The value is finite but the derivatives are not.
//
// 5. For f == 0 and g < 0: The value and derivatives of f^g are not finite.
//
// 6. For f == 0 and g == 0: The C standard incorrectly defines 0^0 to be 1
// "because there are applications that can exploit this definition". We
// (arbitrarily) decree that derivatives here will be nonfinite, since that
// is consistent with the behavior for f == 0, g < 0 and 0 < g < 1.
// Practically any definition could have been justified because mathematical
// consistency has been lost at this point.
//
// 7. For f < 0, g integer, dg == 0: (f + df)^(g + dg) ~= f^g + g * f^(g - 1) df
// This is equivalent to the case where f is a differentiable function and g
// is a constant (to first order).
//
// 8. For f < 0, g integer, dg != 0: The value is finite but the derivatives are
// not, because any change in the value of g moves us away from the point
// with a real-valued answer into the region with complex-valued answers.
//
// 9. For f < 0, g noninteger: The value and derivatives of f^g are not finite.
// pow -- both base and exponent are differentiable functions.
// (a+da)^(b+db) ~= a^b + b * a^(b-1) da + a^b log(a) * db
template <typename T, int N> inline
Jet<T, N> pow(const Jet<T, N>& f, const Jet<T, N>& g) {
if (f.a == 0 && g.a >= 1) {
// Handle cases 2 and 3.
if (g.a > 1) {
return Jet<T, N>(T(0.0));
}
return f;
}
if (f.a < 0 && g.a == floor(g.a)) {
// Handle cases 7 and 8.
T const tmp = g.a * pow(f.a, g.a - T(1.0));
Jet<T, N> ret(pow(f.a, g.a), tmp * f.v);
for (int i = 0; i < N; i++) {
if (g.v[i] != T(0.0)) {
// Return a NaN when g.v != 0.
ret.v[i] = std::numeric_limits<T>::quiet_NaN();
}
}
return ret;
}
// Handle the remaining cases. For cases 4,5,6,9 we allow the log() function
// to generate -HUGE_VAL or NaN, since those cases result in a nonfinite
// derivative.
T const tmp1 = pow(f.a, g.a);
T const tmp2 = g.a * pow(f.a, g.a - T(1.0));
T const tmp3 = tmp1 * log(f.a);
return Jet<T, N>(tmp1, tmp2 * f.v + tmp3 * g.v);
}

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -34,6 +34,7 @@
#include <vector>
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/internal/disable_warnings.h"
namespace ceres {
@ -173,7 +174,7 @@ class CERES_EXPORT IdentityParameterization : public LocalParameterization {
class CERES_EXPORT SubsetParameterization : public LocalParameterization {
public:
explicit SubsetParameterization(int size,
const vector<int>& constant_parameters);
const std::vector<int>& constant_parameters);
virtual ~SubsetParameterization() {}
virtual bool Plus(const double* x,
const double* delta,
@ -191,7 +192,7 @@ class CERES_EXPORT SubsetParameterization : public LocalParameterization {
private:
const int local_size_;
vector<int> constancy_mask_;
std::vector<char> constancy_mask_;
};
// Plus(x, delta) = [cos(|delta|), sin(|delta|) delta / |delta|] * x
@ -210,6 +211,89 @@ class CERES_EXPORT QuaternionParameterization : public LocalParameterization {
virtual int LocalSize() const { return 3; }
};
// This provides a parameterization for homogeneous vectors which are commonly
// used in Structure for Motion problems. One example where they are used is
// in representing points whose triangulation is ill-conditioned. Here
// it is advantageous to use an over-parameterization since homogeneous vectors
// can represent points at infinity.
//
// The plus operator is defined as
// Plus(x, delta) =
// [sin(0.5 * |delta|) * delta / |delta|, cos(0.5 * |delta|)] * x
// with * defined as an operator which applies the update orthogonal to x to
// remain on the sphere. We assume that the last element of x is the scalar
// component. The size of the homogeneous vector is required to be greater than
// 1.
class CERES_EXPORT HomogeneousVectorParameterization :
public LocalParameterization {
public:
explicit HomogeneousVectorParameterization(int size);
virtual ~HomogeneousVectorParameterization() {}
virtual bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const;
virtual bool ComputeJacobian(const double* x,
double* jacobian) const;
virtual int GlobalSize() const { return size_; }
virtual int LocalSize() const { return size_ - 1; }
private:
const int size_;
};
// Construct a local parameterization by taking the Cartesian product
// of a number of other local parameterizations. This is useful, when
// a parameter block is the cartesian product of two or more
// manifolds. For example the parameters of a camera consist of a
// rotation and a translation, i.e., SO(3) x R^3.
//
// Currently this class supports taking the cartesian product of up to
// four local parameterizations.
//
// Example usage:
//
// ProductParameterization product_param(new QuaterionionParameterization(),
// new IdentityParameterization(3));
//
// is the local parameterization for a rigid transformation, where the
// rotation is represented using a quaternion.
class CERES_EXPORT ProductParameterization : public LocalParameterization {
public:
//
// NOTE: All the constructors take ownership of the input local
// parameterizations.
//
ProductParameterization(LocalParameterization* local_param1,
LocalParameterization* local_param2);
ProductParameterization(LocalParameterization* local_param1,
LocalParameterization* local_param2,
LocalParameterization* local_param3);
ProductParameterization(LocalParameterization* local_param1,
LocalParameterization* local_param2,
LocalParameterization* local_param3,
LocalParameterization* local_param4);
virtual ~ProductParameterization();
virtual bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const;
virtual bool ComputeJacobian(const double* x,
double* jacobian) const;
virtual int GlobalSize() const { return global_size_; }
virtual int LocalSize() const { return local_size_; }
private:
void Init();
std::vector<LocalParameterization*> local_params_;
int local_size_;
int global_size_;
int buffer_size_;
};
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -274,10 +274,28 @@ class CERES_EXPORT TolerantLoss : public LossFunction {
const double a_, b_, c_;
};
// This is the Tukey biweight loss function which aggressively
// attempts to suppress large errors.
//
// The term is computed as:
//
// rho(s) = a^2 / 6 * (1 - (1 - s / a^2)^3 ) for s <= a^2,
// rho(s) = a^2 / 6 for s > a^2.
//
// At s = 0: rho = [0, 0.5, -1 / a^2]
class CERES_EXPORT TukeyLoss : public ceres::LossFunction {
public:
explicit TukeyLoss(double a) : a_squared_(a * a) { }
virtual void Evaluate(double, double*) const;
private:
const double a_squared_;
};
// Composition of two loss functions. The error is the result of first
// evaluating g followed by f to yield the composition f(g(s)).
// The loss functions must not be NULL.
class ComposedLoss : public LossFunction {
class CERES_EXPORT ComposedLoss : public LossFunction {
public:
explicit ComposedLoss(const LossFunction* f, Ownership ownership_f,
const LossFunction* g, Ownership ownership_g);
@ -340,6 +358,9 @@ class CERES_EXPORT ScaledLoss : public LossFunction {
// whose scale can be mutated after an optimization problem has been
// constructed.
//
// Since we treat the a NULL Loss function as the Identity loss
// function, rho = NULL is a valid input.
//
// Example usage
//
// Problem problem;
@ -376,8 +397,14 @@ class CERES_EXPORT LossFunctionWrapper : public LossFunction {
}
virtual void Evaluate(double sq_norm, double out[3]) const {
CHECK_NOTNULL(rho_.get());
rho_->Evaluate(sq_norm, out);
if (rho_.get() == NULL) {
out[0] = sq_norm;
out[1] = 1.0;
out[2] = 0.0;
}
else {
rho_->Evaluate(sq_norm, out);
}
}
void Reset(LossFunction* rho, Ownership ownership) {
@ -396,6 +423,6 @@ class CERES_EXPORT LossFunctionWrapper : public LossFunction {
} // namespace ceres
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/reenable_warnings.h"
#endif // CERES_PUBLIC_LOSS_FUNCTION_H_

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -132,7 +132,7 @@
//
// ALTERNATE INTERFACE
//
// For a variety of reason, including compatibility with legacy code,
// For a variety of reasons, including compatibility with legacy code,
// NumericDiffCostFunction can also take CostFunction objects as
// input. The following describes how.
//
@ -165,6 +165,7 @@
#include "ceres/cost_function.h"
#include "ceres/internal/numeric_diff.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/numeric_diff_options.h"
#include "ceres/sized_cost_function.h"
#include "ceres/types.h"
#include "glog/logging.h"
@ -172,7 +173,7 @@
namespace ceres {
template <typename CostFunctor,
NumericDiffMethod method = CENTRAL,
NumericDiffMethodType method = CENTRAL,
int kNumResiduals = 0, // Number of residuals, or ceres::DYNAMIC
int N0 = 0, // Number of parameters in block 0.
int N1 = 0, // Number of parameters in block 1.
@ -189,13 +190,14 @@ class NumericDiffCostFunction
N0, N1, N2, N3, N4,
N5, N6, N7, N8, N9> {
public:
NumericDiffCostFunction(CostFunctor* functor,
Ownership ownership = TAKE_OWNERSHIP,
int num_residuals = kNumResiduals,
const double relative_step_size = 1e-6)
:functor_(functor),
ownership_(ownership),
relative_step_size_(relative_step_size) {
NumericDiffCostFunction(
CostFunctor* functor,
Ownership ownership = TAKE_OWNERSHIP,
int num_residuals = kNumResiduals,
const NumericDiffOptions& options = NumericDiffOptions())
: functor_(functor),
ownership_(ownership),
options_(options) {
if (kNumResiduals == DYNAMIC) {
SizedCostFunction<kNumResiduals,
N0, N1, N2, N3, N4,
@ -204,6 +206,29 @@ class NumericDiffCostFunction
}
}
// Deprecated. New users should avoid using this constructor. Instead, use the
// constructor with NumericDiffOptions.
NumericDiffCostFunction(CostFunctor* functor,
Ownership ownership,
int num_residuals,
const double relative_step_size)
:functor_(functor),
ownership_(ownership),
options_() {
LOG(WARNING) << "This constructor is deprecated and will be removed in "
"a future version. Please use the NumericDiffOptions "
"constructor instead.";
if (kNumResiduals == DYNAMIC) {
SizedCostFunction<kNumResiduals,
N0, N1, N2, N3, N4,
N5, N6, N7, N8, N9>
::set_num_residuals(num_residuals);
}
options_.relative_step_size = relative_step_size;
}
~NumericDiffCostFunction() {
if (ownership_ != TAKE_OWNERSHIP) {
functor_.release();
@ -250,25 +275,25 @@ class NumericDiffCostFunction
if (N8) parameters_reference_copy[8] = parameters_reference_copy[7] + N7;
if (N9) parameters_reference_copy[9] = parameters_reference_copy[8] + N8;
#define COPY_PARAMETER_BLOCK(block) \
#define CERES_COPY_PARAMETER_BLOCK(block) \
if (N ## block) memcpy(parameters_reference_copy[block], \
parameters[block], \
sizeof(double) * N ## block); // NOLINT
COPY_PARAMETER_BLOCK(0);
COPY_PARAMETER_BLOCK(1);
COPY_PARAMETER_BLOCK(2);
COPY_PARAMETER_BLOCK(3);
COPY_PARAMETER_BLOCK(4);
COPY_PARAMETER_BLOCK(5);
COPY_PARAMETER_BLOCK(6);
COPY_PARAMETER_BLOCK(7);
COPY_PARAMETER_BLOCK(8);
COPY_PARAMETER_BLOCK(9);
CERES_COPY_PARAMETER_BLOCK(0);
CERES_COPY_PARAMETER_BLOCK(1);
CERES_COPY_PARAMETER_BLOCK(2);
CERES_COPY_PARAMETER_BLOCK(3);
CERES_COPY_PARAMETER_BLOCK(4);
CERES_COPY_PARAMETER_BLOCK(5);
CERES_COPY_PARAMETER_BLOCK(6);
CERES_COPY_PARAMETER_BLOCK(7);
CERES_COPY_PARAMETER_BLOCK(8);
CERES_COPY_PARAMETER_BLOCK(9);
#undef COPY_PARAMETER_BLOCK
#undef CERES_COPY_PARAMETER_BLOCK
#define EVALUATE_JACOBIAN_FOR_BLOCK(block) \
#define CERES_EVALUATE_JACOBIAN_FOR_BLOCK(block) \
if (N ## block && jacobians[block] != NULL) { \
if (!NumericDiff<CostFunctor, \
method, \
@ -278,28 +303,30 @@ class NumericDiffCostFunction
N ## block >::EvaluateJacobianForParameterBlock( \
functor_.get(), \
residuals, \
relative_step_size_, \
options_, \
SizedCostFunction<kNumResiduals, \
N0, N1, N2, N3, N4, \
N5, N6, N7, N8, N9>::num_residuals(), \
block, \
N ## block, \
parameters_reference_copy.get(), \
jacobians[block])) { \
return false; \
} \
}
EVALUATE_JACOBIAN_FOR_BLOCK(0);
EVALUATE_JACOBIAN_FOR_BLOCK(1);
EVALUATE_JACOBIAN_FOR_BLOCK(2);
EVALUATE_JACOBIAN_FOR_BLOCK(3);
EVALUATE_JACOBIAN_FOR_BLOCK(4);
EVALUATE_JACOBIAN_FOR_BLOCK(5);
EVALUATE_JACOBIAN_FOR_BLOCK(6);
EVALUATE_JACOBIAN_FOR_BLOCK(7);
EVALUATE_JACOBIAN_FOR_BLOCK(8);
EVALUATE_JACOBIAN_FOR_BLOCK(9);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(0);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(1);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(2);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(3);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(4);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(5);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(6);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(7);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(8);
CERES_EVALUATE_JACOBIAN_FOR_BLOCK(9);
#undef EVALUATE_JACOBIAN_FOR_BLOCK
#undef CERES_EVALUATE_JACOBIAN_FOR_BLOCK
return true;
}
@ -307,7 +334,7 @@ class NumericDiffCostFunction
private:
internal::scoped_ptr<CostFunctor> functor_;
Ownership ownership_;
const double relative_step_size_;
NumericDiffOptions options_;
};
} // namespace ceres

View File

@ -0,0 +1,79 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: tbennun@gmail.com (Tal Ben-Nun)
//
#ifndef CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_
#define CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_
namespace ceres {
// Options pertaining to numeric differentiation (e.g., convergence criteria,
// step sizes).
struct CERES_EXPORT NumericDiffOptions {
NumericDiffOptions() {
relative_step_size = 1e-6;
ridders_relative_initial_step_size = 1e-2;
max_num_ridders_extrapolations = 10;
ridders_epsilon = 1e-12;
ridders_step_shrink_factor = 2.0;
}
// Numeric differentiation step size (multiplied by parameter block's
// order of magnitude). If parameters are close to zero, the step size
// is set to sqrt(machine_epsilon).
double relative_step_size;
// Initial step size for Ridders adaptive numeric differentiation (multiplied
// by parameter block's order of magnitude).
// If parameters are close to zero, Ridders' method sets the step size
// directly to this value. This parameter is separate from
// "relative_step_size" in order to set a different default value.
//
// Note: For Ridders' method to converge, the step size should be initialized
// to a value that is large enough to produce a significant change in the
// function. As the derivative is estimated, the step size decreases.
double ridders_relative_initial_step_size;
// Maximal number of adaptive extrapolations (sampling) in Ridders' method.
int max_num_ridders_extrapolations;
// Convergence criterion on extrapolation error for Ridders adaptive
// differentiation. The available error estimation methods are defined in
// NumericDiffErrorType and set in the "ridders_error_method" field.
double ridders_epsilon;
// The factor in which to shrink the step size with each extrapolation in
// Ridders' method.
double ridders_step_shrink_factor;
};
} // namespace ceres
#endif // CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -63,7 +63,8 @@ class OrderedGroups {
return false;
}
typename map<T, int>::const_iterator it = element_to_group_.find(element);
typename std::map<T, int>::const_iterator it =
element_to_group_.find(element);
if (it != element_to_group_.end()) {
if (it->second == group) {
// Element is already in the right group, nothing to do.
@ -107,7 +108,7 @@ class OrderedGroups {
// Bulk remove elements. The return value indicates the number of
// elements successfully removed.
int Remove(const vector<T>& elements) {
int Remove(const std::vector<T>& elements) {
if (NumElements() == 0 || elements.size() == 0) {
return 0;
}
@ -121,14 +122,18 @@ class OrderedGroups {
// Reverse the order of the groups in place.
void Reverse() {
typename map<int, set<T> >::reverse_iterator it =
if (NumGroups() == 0) {
return;
}
typename std::map<int, std::set<T> >::reverse_iterator it =
group_to_elements_.rbegin();
map<int, set<T> > new_group_to_elements;
std::map<int, std::set<T> > new_group_to_elements;
new_group_to_elements[it->first] = it->second;
int new_group_id = it->first + 1;
for (++it; it != group_to_elements_.rend(); ++it) {
for (typename set<T>::const_iterator element_it = it->second.begin();
for (typename std::set<T>::const_iterator element_it = it->second.begin();
element_it != it->second.end();
++element_it) {
element_to_group_[*element_it] = new_group_id;
@ -143,7 +148,8 @@ class OrderedGroups {
// Return the group id for the element. If the element is not a
// member of any group, return -1.
int GroupId(const T element) const {
typename map<T, int>::const_iterator it = element_to_group_.find(element);
typename std::map<T, int>::const_iterator it =
element_to_group_.find(element);
if (it == element_to_group_.end()) {
return -1;
}
@ -151,14 +157,15 @@ class OrderedGroups {
}
bool IsMember(const T element) const {
typename map<T, int>::const_iterator it = element_to_group_.find(element);
typename std::map<T, int>::const_iterator it =
element_to_group_.find(element);
return (it != element_to_group_.end());
}
// This function always succeeds, i.e., implicitly there exists a
// group for every integer.
int GroupSize(const int group) const {
typename map<int, set<T> >::const_iterator it =
typename std::map<int, std::set<T> >::const_iterator it =
group_to_elements_.find(group);
return (it == group_to_elements_.end()) ? 0 : it->second.size();
}
@ -180,17 +187,17 @@ class OrderedGroups {
return group_to_elements_.begin()->first;
}
const map<int, set<T> >& group_to_elements() const {
const std::map<int, std::set<T> >& group_to_elements() const {
return group_to_elements_;
}
const map<T, int>& element_to_group() const {
const std::map<T, int>& element_to_group() const {
return element_to_group_;
}
private:
map<int, set<T> > group_to_elements_;
map<T, int> element_to_group_;
std::map<int, std::set<T> > group_to_elements_;
std::map<T, int> element_to_group_;
};
// Typedef for the most commonly used version of OrderedGroups.

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -211,9 +211,10 @@ class CERES_EXPORT Problem {
// problem.AddResidualBlock(new MyUnaryCostFunction(...), NULL, x1);
// problem.AddResidualBlock(new MyBinaryCostFunction(...), NULL, x2, x1);
//
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
const vector<double*>& parameter_blocks);
ResidualBlockId AddResidualBlock(
CostFunction* cost_function,
LossFunction* loss_function,
const std::vector<double*>& parameter_blocks);
// Convenience methods for adding residuals with a small number of
// parameters. This is the common case. Instead of specifying the
@ -356,17 +357,17 @@ class CERES_EXPORT Problem {
// Fills the passed parameter_blocks vector with pointers to the
// parameter blocks currently in the problem. After this call,
// parameter_block.size() == NumParameterBlocks.
void GetParameterBlocks(vector<double*>* parameter_blocks) const;
void GetParameterBlocks(std::vector<double*>* parameter_blocks) const;
// Fills the passed residual_blocks vector with pointers to the
// residual blocks currently in the problem. After this call,
// residual_blocks.size() == NumResidualBlocks.
void GetResidualBlocks(vector<ResidualBlockId>* residual_blocks) const;
void GetResidualBlocks(std::vector<ResidualBlockId>* residual_blocks) const;
// Get all the parameter blocks that depend on the given residual block.
void GetParameterBlocksForResidualBlock(
const ResidualBlockId residual_block,
vector<double*>* parameter_blocks) const;
std::vector<double*>* parameter_blocks) const;
// Get the CostFunction for the given residual block.
const CostFunction* GetCostFunctionForResidualBlock(
@ -385,7 +386,7 @@ class CERES_EXPORT Problem {
// block will incur a scan of the entire Problem object.
void GetResidualBlocksForParameterBlock(
const double* values,
vector<ResidualBlockId>* residual_blocks) const;
std::vector<ResidualBlockId>* residual_blocks) const;
// Options struct to control Problem::Evaluate.
struct EvaluateOptions {
@ -408,18 +409,17 @@ class CERES_EXPORT Problem {
// used to add parameter blocks to the Problem. These parameter
// block should NOT point to new memory locations. Bad things will
// happen otherwise.
vector<double*> parameter_blocks;
std::vector<double*> parameter_blocks;
// The set of residual blocks to evaluate. This vector determines
// the order in which the residuals occur, and how the rows of the
// jacobian are ordered. If residual_blocks is empty, then it is
// assumed to be equal to the vector containing all the residual
// blocks. If this vector is empty, then it is assumed to be equal
// to a vector containing ALL the residual blocks. Generally
// speaking the residual blocks will occur in the order in which
// they were added to the problem. But, this may change if the
// user removes any residual blocks from the problem.
vector<ResidualBlockId> residual_blocks;
// assumed to be equal to the vector containing ALL the residual
// blocks. Generally speaking the residual blocks will occur in
// the order in which they were added to the problem. But, this
// may change if the user removes any residual blocks from the
// problem.
std::vector<ResidualBlockId> residual_blocks;
// Even though the residual blocks in the problem may contain loss
// functions, setting apply_loss_function to false will turn off
@ -463,8 +463,8 @@ class CERES_EXPORT Problem {
// columns in the jacobian).
bool Evaluate(const EvaluateOptions& options,
double* cost,
vector<double>* residuals,
vector<double>* gradient,
std::vector<double>* residuals,
std::vector<double>* gradient,
CRSMatrix* jacobian);
private:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -47,6 +47,7 @@
#include <algorithm>
#include <cmath>
#include <limits>
#include "glog/logging.h"
namespace ceres {
@ -94,6 +95,17 @@ void AngleAxisToQuaternion(const T* angle_axis, T* quaternion);
template<typename T>
void QuaternionToAngleAxis(const T* quaternion, T* angle_axis);
// Conversions between 3x3 rotation matrix (in column major order) and
// quaternion rotation representations. Templated for use with
// autodifferentiation.
template <typename T>
void RotationMatrixToQuaternion(const T* R, T* quaternion);
template <typename T, int row_stride, int col_stride>
void RotationMatrixToQuaternion(
const MatrixAdapter<const T, row_stride, col_stride>& R,
T* quaternion);
// Conversions between 3x3 rotation matrix (in column major order) and
// axis-angle rotation representations. Templated for use with
// autodifferentiation.
@ -141,11 +153,11 @@ void EulerAnglesToRotationMatrix(
// the cross-product matrix of [a b c]. Together with the property that
// R(q1 * q2) = R(q1) * R(q2) this uniquely defines the mapping from q to R.
//
// The rotation matrix is row-major.
//
// No normalization of the quaternion is performed, i.e.
// R = ||q||^2 * Q, where Q is an orthonormal matrix
// such that det(Q) = 1 and Q*Q' = I
//
// WARNING: The rotation matrix is ROW MAJOR
template <typename T> inline
void QuaternionToScaledRotation(const T q[4], T R[3 * 3]);
@ -156,6 +168,8 @@ void QuaternionToScaledRotation(
// Same as above except that the rotation matrix is normalized by the
// Frobenius norm, so that R * R' = I (and det(R) = 1).
//
// WARNING: The rotation matrix is ROW MAJOR
template <typename T> inline
void QuaternionToRotation(const T q[4], T R[3 * 3]);
@ -294,6 +308,46 @@ inline void QuaternionToAngleAxis(const T* quaternion, T* angle_axis) {
}
}
template <typename T>
void RotationMatrixToQuaternion(const T* R, T* angle_axis) {
RotationMatrixToQuaternion(ColumnMajorAdapter3x3(R), angle_axis);
}
// This algorithm comes from "Quaternion Calculus and Fast Animation",
// Ken Shoemake, 1987 SIGGRAPH course notes
template <typename T, int row_stride, int col_stride>
void RotationMatrixToQuaternion(
const MatrixAdapter<const T, row_stride, col_stride>& R,
T* quaternion) {
const T trace = R(0, 0) + R(1, 1) + R(2, 2);
if (trace >= 0.0) {
T t = sqrt(trace + T(1.0));
quaternion[0] = T(0.5) * t;
t = T(0.5) / t;
quaternion[1] = (R(2, 1) - R(1, 2)) * t;
quaternion[2] = (R(0, 2) - R(2, 0)) * t;
quaternion[3] = (R(1, 0) - R(0, 1)) * t;
} else {
int i = 0;
if (R(1, 1) > R(0, 0)) {
i = 1;
}
if (R(2, 2) > R(i, i)) {
i = 2;
}
const int j = (i + 1) % 3;
const int k = (j + 1) % 3;
T t = sqrt(R(i, i) - R(j, j) - R(k, k) + T(1.0));
quaternion[i + 1] = T(0.5) * t;
t = T(0.5) / t;
quaternion[0] = (R(k, j) - R(j, k)) * t;
quaternion[j + 1] = (R(j, i) + R(i, j)) * t;
quaternion[k + 1] = (R(k, i) + R(i, k)) * t;
}
}
// The conversion of a rotation matrix to the angle-axis form is
// numerically problematic when then rotation angle is close to zero
// or to Pi. The following implementation detects when these two cases
@ -308,80 +362,10 @@ template <typename T, int row_stride, int col_stride>
void RotationMatrixToAngleAxis(
const MatrixAdapter<const T, row_stride, col_stride>& R,
T* angle_axis) {
// x = k * 2 * sin(theta), where k is the axis of rotation.
angle_axis[0] = R(2, 1) - R(1, 2);
angle_axis[1] = R(0, 2) - R(2, 0);
angle_axis[2] = R(1, 0) - R(0, 1);
static const T kOne = T(1.0);
static const T kTwo = T(2.0);
// Since the right hand side may give numbers just above 1.0 or
// below -1.0 leading to atan misbehaving, we threshold.
T costheta = std::min(std::max((R(0, 0) + R(1, 1) + R(2, 2) - kOne) / kTwo,
T(-1.0)),
kOne);
// sqrt is guaranteed to give non-negative results, so we only
// threshold above.
T sintheta = std::min(sqrt(angle_axis[0] * angle_axis[0] +
angle_axis[1] * angle_axis[1] +
angle_axis[2] * angle_axis[2]) / kTwo,
kOne);
// Use the arctan2 to get the right sign on theta
const T theta = atan2(sintheta, costheta);
// Case 1: sin(theta) is large enough, so dividing by it is not a
// problem. We do not use abs here, because while jets.h imports
// std::abs into the namespace, here in this file, abs resolves to
// the int version of the function, which returns zero always.
//
// We use a threshold much larger then the machine epsilon, because
// if sin(theta) is small, not only do we risk overflow but even if
// that does not occur, just dividing by a small number will result
// in numerical garbage. So we play it safe.
static const double kThreshold = 1e-12;
if ((sintheta > kThreshold) || (sintheta < -kThreshold)) {
const T r = theta / (kTwo * sintheta);
for (int i = 0; i < 3; ++i) {
angle_axis[i] *= r;
}
return;
}
// Case 2: theta ~ 0, means sin(theta) ~ theta to a good
// approximation.
if (costheta > 0.0) {
const T kHalf = T(0.5);
for (int i = 0; i < 3; ++i) {
angle_axis[i] *= kHalf;
}
return;
}
// Case 3: theta ~ pi, this is the hard case. Since theta is large,
// and sin(theta) is small. Dividing by theta by sin(theta) will
// either give an overflow or worse still numerically meaningless
// results. Thus we use an alternate more complicated formula
// here.
// Since cos(theta) is negative, division by (1-cos(theta)) cannot
// overflow.
const T inv_one_minus_costheta = kOne / (kOne - costheta);
// We now compute the absolute value of coordinates of the axis
// vector using the diagonal entries of R. To resolve the sign of
// these entries, we compare the sign of angle_axis[i]*sin(theta)
// with the sign of sin(theta). If they are the same, then
// angle_axis[i] should be positive, otherwise negative.
for (int i = 0; i < 3; ++i) {
angle_axis[i] = theta * sqrt((R(i, i) - costheta) * inv_one_minus_costheta);
if (((sintheta < 0.0) && (angle_axis[i] > 0.0)) ||
((sintheta > 0.0) && (angle_axis[i] < 0.0))) {
angle_axis[i] = -angle_axis[i];
}
}
T quaternion[4];
RotationMatrixToQuaternion(R, quaternion);
QuaternionToAngleAxis(quaternion, angle_axis);
return;
}
template <typename T>

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -55,15 +55,15 @@ class SizedCostFunction : public CostFunction {
// This block breaks the 80 column rule to keep it somewhat readable.
CHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0)))
((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) || // NOLINT
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0))) // NOLINT
<< "Zero block cannot precede a non-zero block. Block sizes are "
<< "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", "
<< N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
@ -71,19 +71,19 @@ class SizedCostFunction : public CostFunction {
set_num_residuals(kNumResiduals);
#define ADD_PARAMETER_BLOCK(N) \
#define CERES_ADD_PARAMETER_BLOCK(N) \
if (N) mutable_parameter_block_sizes()->push_back(N);
ADD_PARAMETER_BLOCK(N0);
ADD_PARAMETER_BLOCK(N1);
ADD_PARAMETER_BLOCK(N2);
ADD_PARAMETER_BLOCK(N3);
ADD_PARAMETER_BLOCK(N4);
ADD_PARAMETER_BLOCK(N5);
ADD_PARAMETER_BLOCK(N6);
ADD_PARAMETER_BLOCK(N7);
ADD_PARAMETER_BLOCK(N8);
ADD_PARAMETER_BLOCK(N9);
#undef ADD_PARAMETER_BLOCK
CERES_ADD_PARAMETER_BLOCK(N0);
CERES_ADD_PARAMETER_BLOCK(N1);
CERES_ADD_PARAMETER_BLOCK(N2);
CERES_ADD_PARAMETER_BLOCK(N3);
CERES_ADD_PARAMETER_BLOCK(N4);
CERES_ADD_PARAMETER_BLOCK(N5);
CERES_ADD_PARAMETER_BLOCK(N6);
CERES_ADD_PARAMETER_BLOCK(N7);
CERES_ADD_PARAMETER_BLOCK(N8);
CERES_ADD_PARAMETER_BLOCK(N9);
#undef CERES_ADD_PARAMETER_BLOCK
}
virtual ~SizedCostFunction() { }

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -92,7 +92,7 @@ class CERES_EXPORT Solver {
gradient_tolerance = 1e-10;
parameter_tolerance = 1e-8;
#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE) && !defined(CERES_ENABLE_LGPL_CODE)
#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE) && !defined(CERES_ENABLE_LGPL_CODE) // NOLINT
linear_solver_type = DENSE_QR;
#else
linear_solver_type = SPARSE_NORMAL_CHOLESKY;
@ -104,7 +104,8 @@ class CERES_EXPORT Solver {
// Choose a default sparse linear algebra library in the order:
//
// SUITE_SPARSE > CX_SPARSE > EIGEN_SPARSE
// SUITE_SPARSE > CX_SPARSE > EIGEN_SPARSE > NO_SPARSE
sparse_linear_algebra_library_type = NO_SPARSE;
#if !defined(CERES_NO_SUITESPARSE)
sparse_linear_algebra_library_type = SUITE_SPARSE;
#else
@ -140,7 +141,7 @@ class CERES_EXPORT Solver {
// Returns true if the options struct has a valid
// configuration. Returns false otherwise, and fills in *error
// with a message describing the problem.
bool IsValid(string* error) const;
bool IsValid(std::string* error) const;
// Minimizer options ----------------------------------------
@ -676,13 +677,13 @@ class CERES_EXPORT Solver {
// List of iterations at which the minimizer should dump the trust
// region problem. Useful for testing and benchmarking. If empty
// (default), no problems are dumped.
vector<int> trust_region_minimizer_iterations_to_dump;
std::vector<int> trust_region_minimizer_iterations_to_dump;
// Directory to which the problems should be written to. Should be
// non-empty if trust_region_minimizer_iterations_to_dump is
// non-empty and trust_region_problem_dump_format_type is not
// CONSOLE.
string trust_region_problem_dump_directory;
std::string trust_region_problem_dump_directory;
DumpFormatType trust_region_problem_dump_format_type;
// Finite differences options ----------------------------------------------
@ -746,7 +747,7 @@ class CERES_EXPORT Solver {
// executed, then set update_state_every_iteration to true.
//
// The solver does NOT take ownership of these pointers.
vector<IterationCallback*> callbacks;
std::vector<IterationCallback*> callbacks;
};
struct CERES_EXPORT Summary {
@ -754,11 +755,11 @@ class CERES_EXPORT Solver {
// A brief one line description of the state of the solver after
// termination.
string BriefReport() const;
std::string BriefReport() const;
// A full multiline description of the state of the solver after
// termination.
string FullReport() const;
std::string FullReport() const;
bool IsSolutionUsable() const;
@ -768,7 +769,7 @@ class CERES_EXPORT Solver {
TerminationType termination_type;
// Reason why the solver terminated.
string message;
std::string message;
// Cost of the problem (value of the objective function) before
// the optimization.
@ -784,7 +785,7 @@ class CERES_EXPORT Solver {
double fixed_cost;
// IterationSummary for each minimizer iteration in order.
vector<IterationSummary> iterations;
std::vector<IterationSummary> iterations;
// Number of minimizer iterations in which the step was
// accepted. Unless use_non_monotonic_steps is true this is also
@ -832,6 +833,26 @@ class CERES_EXPORT Solver {
// Time (in seconds) spent doing inner iterations.
double inner_iteration_time_in_seconds;
// Cumulative timing information for line searches performed as part of the
// solve. Note that in addition to the case when the Line Search minimizer
// is used, the Trust Region minimizer also uses a line search when
// solving a constrained problem.
// Time (in seconds) spent evaluating the univariate cost function as part
// of a line search.
double line_search_cost_evaluation_time_in_seconds;
// Time (in seconds) spent evaluating the gradient of the univariate cost
// function as part of a line search.
double line_search_gradient_evaluation_time_in_seconds;
// Time (in seconds) spent minimizing the interpolating polynomial
// to compute the next candidate step size as part of a line search.
double line_search_polynomial_minimization_time_in_seconds;
// Total time (in seconds) spent performing line searches.
double line_search_total_time_in_seconds;
// Number of parameter blocks in the problem.
int num_parameter_blocks;
@ -871,6 +892,9 @@ class CERES_EXPORT Solver {
// Number of residuals in the reduced problem.
int num_residuals_reduced;
// Is the reduced problem bounds constrained.
bool is_constrained;
// Number of threads specified by the user for Jacobian and
// residual evaluation.
int num_threads_given;
@ -902,7 +926,7 @@ class CERES_EXPORT Solver {
// Size of the elimination groups given by the user as hints to
// the linear solver.
vector<int> linear_solver_ordering_given;
std::vector<int> linear_solver_ordering_given;
// Size of the parameter groups used by the solver when ordering
// the columns of the Jacobian. This maybe different from
@ -910,7 +934,7 @@ class CERES_EXPORT Solver {
// linear_solver_ordering_given blank and asked for an automatic
// ordering, or if the problem contains some constant or inactive
// parameter blocks.
vector<int> linear_solver_ordering_used;
std::vector<int> linear_solver_ordering_used;
// True if the user asked for inner iterations to be used as part
// of the optimization.
@ -924,7 +948,7 @@ class CERES_EXPORT Solver {
// Size of the parameter groups given by the user for performing
// inner iterations.
vector<int> inner_iteration_ordering_given;
std::vector<int> inner_iteration_ordering_given;
// Size of the parameter groups given used by the solver for
// performing inner iterations. This maybe different from
@ -932,7 +956,7 @@ class CERES_EXPORT Solver {
// inner_iteration_ordering_given blank and asked for an automatic
// ordering, or if the problem contains some constant or inactive
// parameter blocks.
vector<int> inner_iteration_ordering_used;
std::vector<int> inner_iteration_ordering_used;
// Type of the preconditioner requested by the user.
PreconditionerType preconditioner_type_given;

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -157,7 +157,12 @@ enum SparseLinearAlgebraLibraryType {
// Eigen's sparse linear algebra routines. In particular Ceres uses
// the Simplicial LDLT routines.
EIGEN_SPARSE
EIGEN_SPARSE,
// No sparse linear solver should be used. This does not necessarily
// imply that Ceres was built without any sparse library, although that
// is the likely use case, merely that one should not be used.
NO_SPARSE
};
enum DenseLinearAlgebraLibraryType {
@ -392,9 +397,19 @@ enum DimensionType {
DYNAMIC = -1
};
enum NumericDiffMethod {
// The differentiation method used to compute numerical derivatives in
// NumericDiffCostFunction and DynamicNumericDiffCostFunction.
enum NumericDiffMethodType {
// Compute central finite difference: f'(x) ~ (f(x+h) - f(x-h)) / 2h.
CENTRAL,
FORWARD
// Compute forward finite difference: f'(x) ~ (f(x+h) - f(x)) / h.
FORWARD,
// Adaptive numerical differentiation using Ridders' method. Provides more
// accurate and robust derivatives at the expense of additional cost
// function evaluations.
RIDDERS
};
enum LineSearchInterpolationType {
@ -411,67 +426,73 @@ enum CovarianceAlgorithmType {
CERES_EXPORT const char* LinearSolverTypeToString(
LinearSolverType type);
CERES_EXPORT bool StringToLinearSolverType(string value,
CERES_EXPORT bool StringToLinearSolverType(std::string value,
LinearSolverType* type);
CERES_EXPORT const char* PreconditionerTypeToString(PreconditionerType type);
CERES_EXPORT bool StringToPreconditionerType(string value,
CERES_EXPORT bool StringToPreconditionerType(std::string value,
PreconditionerType* type);
CERES_EXPORT const char* VisibilityClusteringTypeToString(
VisibilityClusteringType type);
CERES_EXPORT bool StringToVisibilityClusteringType(string value,
CERES_EXPORT bool StringToVisibilityClusteringType(std::string value,
VisibilityClusteringType* type);
CERES_EXPORT const char* SparseLinearAlgebraLibraryTypeToString(
SparseLinearAlgebraLibraryType type);
CERES_EXPORT bool StringToSparseLinearAlgebraLibraryType(
string value,
std::string value,
SparseLinearAlgebraLibraryType* type);
CERES_EXPORT const char* DenseLinearAlgebraLibraryTypeToString(
DenseLinearAlgebraLibraryType type);
CERES_EXPORT bool StringToDenseLinearAlgebraLibraryType(
string value,
std::string value,
DenseLinearAlgebraLibraryType* type);
CERES_EXPORT const char* TrustRegionStrategyTypeToString(
TrustRegionStrategyType type);
CERES_EXPORT bool StringToTrustRegionStrategyType(string value,
CERES_EXPORT bool StringToTrustRegionStrategyType(std::string value,
TrustRegionStrategyType* type);
CERES_EXPORT const char* DoglegTypeToString(DoglegType type);
CERES_EXPORT bool StringToDoglegType(string value, DoglegType* type);
CERES_EXPORT bool StringToDoglegType(std::string value, DoglegType* type);
CERES_EXPORT const char* MinimizerTypeToString(MinimizerType type);
CERES_EXPORT bool StringToMinimizerType(string value, MinimizerType* type);
CERES_EXPORT bool StringToMinimizerType(std::string value, MinimizerType* type);
CERES_EXPORT const char* LineSearchDirectionTypeToString(
LineSearchDirectionType type);
CERES_EXPORT bool StringToLineSearchDirectionType(string value,
CERES_EXPORT bool StringToLineSearchDirectionType(std::string value,
LineSearchDirectionType* type);
CERES_EXPORT const char* LineSearchTypeToString(LineSearchType type);
CERES_EXPORT bool StringToLineSearchType(string value, LineSearchType* type);
CERES_EXPORT bool StringToLineSearchType(std::string value, LineSearchType* type);
CERES_EXPORT const char* NonlinearConjugateGradientTypeToString(
NonlinearConjugateGradientType type);
CERES_EXPORT bool StringToNonlinearConjugateGradientType(
string value,
std::string value,
NonlinearConjugateGradientType* type);
CERES_EXPORT const char* LineSearchInterpolationTypeToString(
LineSearchInterpolationType type);
CERES_EXPORT bool StringToLineSearchInterpolationType(
string value,
std::string value,
LineSearchInterpolationType* type);
CERES_EXPORT const char* CovarianceAlgorithmTypeToString(
CovarianceAlgorithmType type);
CERES_EXPORT bool StringToCovarianceAlgorithmType(
string value,
std::string value,
CovarianceAlgorithmType* type);
CERES_EXPORT const char* NumericDiffMethodTypeToString(
NumericDiffMethodType type);
CERES_EXPORT bool StringToNumericDiffMethodType(
std::string value,
NumericDiffMethodType* type);
CERES_EXPORT const char* TerminationTypeToString(TerminationType type);
CERES_EXPORT bool IsSchurType(LinearSolverType type);

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -32,9 +32,8 @@
#define CERES_PUBLIC_VERSION_H_
#define CERES_VERSION_MAJOR 1
#define CERES_VERSION_MINOR 10
#define CERES_VERSION_MINOR 11
#define CERES_VERSION_REVISION 0
#define CERES_VERSION_ABI 1
// Classic CPP stringifcation; the extra level of indirection allows the
// preprocessor to expand the macro before being converted to a string.

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -41,6 +41,8 @@
namespace ceres {
namespace internal {
using std::string;
// It is a near impossibility that user code generates this exact
// value in normal operation, thus we will use it to fill arrays
// before passing them to user code. If on return an element of the
@ -71,7 +73,7 @@ int FindInvalidValue(const int size, const double* x) {
}
return size;
};
}
void InvalidateArray(const int size, double* x) {
if (x != NULL) {

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -43,6 +43,7 @@
#ifndef CERES_INTERNAL_ARRAY_UTILS_H_
#define CERES_INTERNAL_ARRAY_UTILS_H_
#include <string>
#include "ceres/internal/port.h"
namespace ceres {
@ -63,7 +64,7 @@ int FindInvalidValue(const int size, const double* x);
// Utility routine to print an array of doubles to a string. If the
// array pointer is NULL, it is treated as an array of zeros.
void AppendArrayToString(const int size, const double* x, string* result);
void AppendArrayToString(const int size, const double* x, std::string* result);
extern const double kImpossibleValue;

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -32,6 +32,7 @@
#include "ceres/internal/port.h"
#include "glog/logging.h"
#ifndef CERES_NO_LAPACK
extern "C" void dsyrk_(char* uplo,
char* trans,
int* n,
@ -42,6 +43,7 @@ extern "C" void dsyrk_(char* uplo,
double* beta,
double* c,
int* ldc);
#endif
namespace ceres {
namespace internal {

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -43,7 +43,7 @@ namespace internal {
BlockJacobiPreconditioner::BlockJacobiPreconditioner(
const BlockSparseMatrix& A) {
const CompressedRowBlockStructure* bs = A.block_structure();
vector<int> blocks(bs->cols.size());
std::vector<int> blocks(bs->cols.size());
for (int i = 0; i < blocks.size(); ++i) {
blocks[i] = bs->cols[i].size;
}
@ -60,7 +60,7 @@ bool BlockJacobiPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
m_->SetZero();
for (int i = 0; i < bs->rows.size(); ++i) {
const int row_block_size = bs->rows[i].block.size;
const vector<Cell>& cells = bs->rows[i].cells;
const std::vector<Cell>& cells = bs->rows[i].cells;
for (int j = 0; j < cells.size(); ++j) {
const int block_id = cells[j].block_id;
const int col_block_size = bs->cols[block_id].size;

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -41,6 +41,9 @@
namespace ceres {
namespace internal {
using std::vector;
namespace {
// Given the residual block ordering, build a lookup table to determine which
@ -163,8 +166,7 @@ SparseMatrix* BlockJacobianWriter::CreateJacobian() const {
}
// Construct the cells in each row.
const vector<ResidualBlock*>& residual_blocks =
program_->residual_blocks();
const vector<ResidualBlock*>& residual_blocks = program_->residual_blocks();
int row_block_position = 0;
bs->rows.resize(residual_blocks.size());
for (int i = 0; i < residual_blocks.size(); ++i) {

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -115,10 +115,10 @@ class BlockJacobianWriter {
//
// which indicates that dr/dx is located at values_[0], and dr/dz is at
// values_[12]. See BlockEvaluatePreparer::Prepare()'s comments about 'j'.
vector<int*> jacobian_layout_;
std::vector<int*> jacobian_layout_;
// The pointers in jacobian_layout_ point directly into this vector.
vector<int> jacobian_layout_storage_;
std::vector<int> jacobian_layout_storage_;
};
} // namespace internal

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -39,7 +39,7 @@ namespace ceres {
namespace internal {
BlockRandomAccessDenseMatrix::BlockRandomAccessDenseMatrix(
const vector<int>& blocks) {
const std::vector<int>& blocks) {
const int num_blocks = blocks.size();
block_layout_.resize(num_blocks, 0);
num_rows_ = 0;

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -56,7 +56,7 @@ class BlockRandomAccessDenseMatrix : public BlockRandomAccessMatrix {
public:
// blocks is a vector of block sizes. The resulting matrix has
// blocks.size() * blocks.size() cells.
explicit BlockRandomAccessDenseMatrix(const vector<int>& blocks);
explicit BlockRandomAccessDenseMatrix(const std::vector<int>& blocks);
// The destructor is not thread safe. It assumes that no one is
// modifying any cells when the matrix is being destroyed.
@ -85,7 +85,7 @@ class BlockRandomAccessDenseMatrix : public BlockRandomAccessMatrix {
private:
int num_rows_;
vector<int> block_layout_;
std::vector<int> block_layout_;
scoped_array<double> values_;
scoped_array<CellInfo> cell_infos_;

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -45,6 +45,8 @@
namespace ceres {
namespace internal {
using std::vector;
// TODO(sameeragarwal): Drop the dependence on TripletSparseMatrix.
BlockRandomAccessDiagonalMatrix::BlockRandomAccessDiagonalMatrix(

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -52,7 +52,7 @@ namespace internal {
class BlockRandomAccessDiagonalMatrix : public BlockRandomAccessMatrix {
public:
// blocks is an array of block sizes.
explicit BlockRandomAccessDiagonalMatrix(const vector<int>& blocks);
explicit BlockRandomAccessDiagonalMatrix(const std::vector<int>& blocks);
// The destructor is not thread safe. It assumes that no one is
// modifying any cells when the matrix is being destroyed.
@ -85,8 +85,8 @@ class BlockRandomAccessDiagonalMatrix : public BlockRandomAccessMatrix {
private:
// row/column block sizes.
const vector<int> blocks_;
vector<CellInfo*> layout_;
const std::vector<int> blocks_;
std::vector<CellInfo*> layout_;
// The underlying matrix object which actually stores the cells.
scoped_ptr<TripletSparseMatrix> tsm_;

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -44,6 +44,11 @@
namespace ceres {
namespace internal {
using std::make_pair;
using std::pair;
using std::set;
using std::vector;
BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
const vector<int>& blocks,
const set<pair<int, int> >& block_pairs)

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -57,8 +57,9 @@ class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
// blocks is an array of block sizes. block_pairs is a set of
// <row_block_id, col_block_id> pairs to identify the non-zero cells
// of this matrix.
BlockRandomAccessSparseMatrix(const vector<int>& blocks,
const set<pair<int, int> >& block_pairs);
BlockRandomAccessSparseMatrix(
const std::vector<int>& blocks,
const std::set<std::pair<int, int> >& block_pairs);
// The destructor is not thread safe. It assumes that no one is
// modifying any cells when the matrix is being destroyed.
@ -103,8 +104,8 @@ class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
const int64 kMaxRowBlocks;
// row/column block sizes.
const vector<int> blocks_;
vector<int> block_positions_;
const std::vector<int> blocks_;
std::vector<int> block_positions_;
// A mapping from <row_block_id, col_block_id> to the position in
// the values array of tsm_ where the block is stored.
@ -114,7 +115,7 @@ class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
// In order traversal of contents of the matrix. This allows us to
// implement a matrix-vector which is 20% faster than using the
// iterator in the Layout object instead.
vector<pair<pair<int, int>, double*> > cell_values_;
std::vector<std::pair<std::pair<int, int>, double*> > cell_values_;
// The underlying matrix object which actually stores the cells.
scoped_ptr<TripletSparseMatrix> tsm_;

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -42,6 +42,8 @@
namespace ceres {
namespace internal {
using std::vector;
BlockSparseMatrix::~BlockSparseMatrix() {}
BlockSparseMatrix::BlockSparseMatrix(
@ -82,7 +84,7 @@ BlockSparseMatrix::BlockSparseMatrix(
}
void BlockSparseMatrix::SetZero() {
fill(values_.get(), values_.get() + num_nonzeros_, 0.0);
std::fill(values_.get(), values_.get() + num_nonzeros_, 0.0);
}
void BlockSparseMatrix::RightMultiply(const double* x, double* y) const {

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -71,20 +71,20 @@ bool CellLessThan(const Cell& lhs, const Cell& rhs);
struct CompressedList {
Block block;
vector<Cell> cells;
std::vector<Cell> cells;
};
typedef CompressedList CompressedRow;
typedef CompressedList CompressedColumn;
struct CompressedRowBlockStructure {
vector<Block> cols;
vector<CompressedRow> rows;
std::vector<Block> cols;
std::vector<CompressedRow> rows;
};
struct CompressedColumnBlockStructure {
vector<Block> rows;
vector<CompressedColumn> cols;
std::vector<Block> rows;
std::vector<CompressedColumn> cols;
};
} // namespace internal

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -37,6 +37,8 @@
namespace ceres {
namespace internal {
using std::string;
StateUpdatingCallback::StateUpdatingCallback(Program* program,
double* parameters)
: program_(program), parameters_(parameters) {}
@ -78,27 +80,27 @@ CallbackReturnType LoggingCallback::operator()(
summary.cumulative_time_in_seconds);
} else if (minimizer_type == TRUST_REGION) {
if (summary.iteration == 0) {
output = "iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time\n";
output = "iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time\n"; // NOLINT
}
const char* kReportRowFormat =
"% 4d % 8e % 3.2e % 3.2e % 3.2e % 3.2e % 3.2e % 4d % 3.2e % 3.2e";
"% 4d % 8e % 3.2e % 3.2e % 3.2e % 3.2e % 3.2e % 4d % 3.2e % 3.2e"; // NOLINT
output += StringPrintf(kReportRowFormat,
summary.iteration,
summary.cost,
summary.cost_change,
summary.gradient_max_norm,
summary.step_norm,
summary.relative_decrease,
summary.trust_region_radius,
summary.linear_solver_iterations,
summary.iteration_time_in_seconds,
summary.cumulative_time_in_seconds);
summary.iteration,
summary.cost,
summary.cost_change,
summary.gradient_max_norm,
summary.step_norm,
summary.relative_decrease,
summary.trust_region_radius,
summary.linear_solver_iterations,
summary.iteration_time_in_seconds,
summary.cumulative_time_in_seconds);
} else {
LOG(FATAL) << "Unknown minimizer type.";
}
if (log_to_stdout_) {
cout << output << endl;
std::cout << output << std::endl;
} else {
VLOG(1) << output;
}

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -45,6 +45,8 @@
namespace ceres {
namespace internal {
using std::vector;
typedef HashMap<int, int> IntMap;
typedef HashSet<int> IntSet;

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -102,7 +102,7 @@ struct CanonicalViewsClusteringOptions;
void ComputeCanonicalViewsClustering(
const CanonicalViewsClusteringOptions& options,
const WeightedGraph<int>& graph,
vector<int>* centers,
std::vector<int>* centers,
HashMap<int, int>* membership);
struct CanonicalViewsClusteringOptions {

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -69,7 +69,6 @@
#include <utility>
#include "ceres/integral_types.h"
#include "ceres/internal/port.h"
// Some systems don't have access to unordered_map/unordered_set. In
// that case, substitute the hash map/set with normal map/set. The

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -38,12 +38,15 @@
namespace ceres {
namespace internal {
void CompressedColumnScalarMatrixToBlockMatrix(const int* scalar_rows,
const int* scalar_cols,
const vector<int>& row_blocks,
const vector<int>& col_blocks,
vector<int>* block_rows,
vector<int>* block_cols) {
using std::vector;
void CompressedColumnScalarMatrixToBlockMatrix(
const int* scalar_rows,
const int* scalar_cols,
const vector<int>& row_blocks,
const vector<int>& col_blocks,
vector<int>* block_rows,
vector<int>* block_cols) {
CHECK_NOTNULL(block_rows)->clear();
CHECK_NOTNULL(block_cols)->clear();
const int num_row_blocks = row_blocks.size();
@ -66,9 +69,10 @@ void CompressedColumnScalarMatrixToBlockMatrix(const int* scalar_rows,
for (int col_block = 0; col_block < num_col_blocks; ++col_block) {
int column_size = 0;
for (int idx = scalar_cols[c]; idx < scalar_cols[c + 1]; ++idx) {
vector<int>::const_iterator it = lower_bound(row_block_starts.begin(),
row_block_starts.end(),
scalar_rows[idx]);
vector<int>::const_iterator it =
std::lower_bound(row_block_starts.begin(),
row_block_starts.end(),
scalar_rows[idx]);
// Since we are using lower_bound, it will return the row id
// where the row block starts. For everything but the first row
// of the block, where these values will be the same, we can

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -47,19 +47,21 @@ namespace internal {
// and column block j, then it is expected that A contains at least
// one non-zero entry corresponding to the top left entry of c_ij,
// as that entry is used to detect the presence of a non-zero c_ij.
void CompressedColumnScalarMatrixToBlockMatrix(const int* scalar_rows,
const int* scalar_cols,
const vector<int>& row_blocks,
const vector<int>& col_blocks,
vector<int>* block_rows,
vector<int>* block_cols);
void CompressedColumnScalarMatrixToBlockMatrix(
const int* scalar_rows,
const int* scalar_cols,
const std::vector<int>& row_blocks,
const std::vector<int>& col_blocks,
std::vector<int>* block_rows,
std::vector<int>* block_cols);
// Given a set of blocks and a permutation of these blocks, compute
// the corresponding "scalar" ordering, where the scalar ordering of
// size sum(blocks).
void BlockOrderingToScalarOrdering(const vector<int>& blocks,
const vector<int>& block_ordering,
vector<int>* scalar_ordering);
void BlockOrderingToScalarOrdering(
const std::vector<int>& blocks,
const std::vector<int>& block_ordering,
std::vector<int>* scalar_ordering);
// Solve the linear system
//
@ -120,7 +122,7 @@ void SolveRTRWithSparseRHS(IntegerType num_cols,
const double* values,
const int rhs_nonzero_index,
double* solution) {
fill(solution, solution + num_cols, 0.0);
std::fill(solution, solution + num_cols, 0.0);
solution[rhs_nonzero_index] = 1.0 / values[cols[rhs_nonzero_index + 1] - 1];
for (IntegerType c = rhs_nonzero_index + 1; c < num_cols; ++c) {

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -30,6 +30,9 @@
#include "ceres/compressed_row_jacobian_writer.h"
#include <utility>
#include <vector>
#include "ceres/casts.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/parameter_block.h"
@ -40,6 +43,10 @@
namespace ceres {
namespace internal {
using std::make_pair;
using std::pair;
using std::vector;
void CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
const Program* program, CompressedRowSparseMatrix* jacobian) {
const vector<ParameterBlock*>& parameter_blocks =
@ -214,9 +221,9 @@ void CompressedRowJacobianWriter::Write(int residual_id,
double* column_block_begin =
jacobian_values + jacobian_rows[residual_offset + r] + col_pos;
copy(block_row_begin,
block_row_begin + parameter_block_size,
column_block_begin);
std::copy(block_row_begin,
block_row_begin + parameter_block_size,
column_block_begin);
}
col_pos += parameter_block_size;
}

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -33,6 +33,9 @@
#ifndef CERES_INTERNAL_COMPRESSED_ROW_JACOBIAN_WRITER_H_
#define CERES_INTERNAL_COMPRESSED_ROW_JACOBIAN_WRITER_H_
#include <utility>
#include <vector>
#include "ceres/evaluator.h"
#include "ceres/scratch_evaluate_preparer.h"
@ -80,7 +83,7 @@ class CompressedRowJacobianWriter {
static void GetOrderedParameterBlocks(
const Program* program,
int residual_id,
vector<pair<int, int> >* evaluated_jacobian_blocks);
std::vector<std::pair<int, int> >* evaluated_jacobian_blocks);
// JacobianWriter interface.

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -40,6 +40,9 @@
namespace ceres {
namespace internal {
using std::vector;
namespace {
// Helper functor used by the constructor for reordering the contents
@ -155,7 +158,7 @@ CompressedRowSparseMatrix::~CompressedRowSparseMatrix() {
}
void CompressedRowSparseMatrix::SetZero() {
fill(values_.begin(), values_.end(), 0);
std::fill(values_.begin(), values_.end(), 0);
}
void CompressedRowSparseMatrix::RightMultiply(const double* x,
@ -184,7 +187,7 @@ void CompressedRowSparseMatrix::LeftMultiply(const double* x, double* y) const {
void CompressedRowSparseMatrix::SquaredColumnNorm(double* x) const {
CHECK_NOTNULL(x);
fill(x, x + num_cols_, 0.0);
std::fill(x, x + num_cols_, 0.0);
for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
x[cols_[idx]] += values_[idx] * values_[idx];
}
@ -238,26 +241,38 @@ void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
<< "The matrix being appended has: " << m.row_blocks().size()
<< " row blocks.";
if (m.num_rows() == 0) {
return;
}
if (cols_.size() < num_nonzeros() + m.num_nonzeros()) {
cols_.resize(num_nonzeros() + m.num_nonzeros());
values_.resize(num_nonzeros() + m.num_nonzeros());
}
// Copy the contents of m into this matrix.
copy(m.cols(), m.cols() + m.num_nonzeros(), &cols_[num_nonzeros()]);
copy(m.values(), m.values() + m.num_nonzeros(), &values_[num_nonzeros()]);
DCHECK_LT(num_nonzeros(), cols_.size());
if (m.num_nonzeros() > 0) {
std::copy(m.cols(), m.cols() + m.num_nonzeros(), &cols_[num_nonzeros()]);
std::copy(m.values(),
m.values() + m.num_nonzeros(),
&values_[num_nonzeros()]);
}
rows_.resize(num_rows_ + m.num_rows() + 1);
// new_rows = [rows_, m.row() + rows_[num_rows_]]
fill(rows_.begin() + num_rows_,
rows_.begin() + num_rows_ + m.num_rows() + 1,
rows_[num_rows_]);
std::fill(rows_.begin() + num_rows_,
rows_.begin() + num_rows_ + m.num_rows() + 1,
rows_[num_rows_]);
for (int r = 0; r < m.num_rows() + 1; ++r) {
rows_[num_rows_ + r] += m.rows()[r];
}
num_rows_ += m.num_rows();
row_blocks_.insert(row_blocks_.end(), m.row_blocks().begin(), m.row_blocks().end());
row_blocks_.insert(row_blocks_.end(),
m.row_blocks().begin(),
m.row_blocks().end());
}
void CompressedRowSparseMatrix::ToTextFile(FILE* file) const {
@ -329,7 +344,7 @@ CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
int* rows = matrix->mutable_rows();
int* cols = matrix->mutable_cols();
double* values = matrix->mutable_values();
fill(values, values + num_nonzeros, 0.0);
std::fill(values, values + num_nonzeros, 0.0);
int idx_cursor = 0;
int col_cursor = 0;
@ -481,8 +496,9 @@ CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
const CompressedRowSparseMatrix& m,
vector<int>* program) {
CHECK_NOTNULL(program)->clear();
CHECK_GT(m.num_nonzeros(), 0) << "Congratulations, "
<< "you found a bug in Ceres. Please report it.";
CHECK_GT(m.num_nonzeros(), 0)
<< "Congratulations, "
<< "you found a bug in Ceres. Please report it.";
vector<ProductTerm> product;
const vector<int>& row_blocks = m.row_blocks();
@ -495,7 +511,9 @@ CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
// Compute the lower triangular part of the product.
for (int idx1 = m.rows()[r]; idx1 < m.rows()[r + 1]; ++idx1) {
for (int idx2 = m.rows()[r]; idx2 <= idx1; ++idx2) {
product.push_back(ProductTerm(m.cols()[idx1], m.cols()[idx2], product.size()));
product.push_back(ProductTerm(m.cols()[idx1],
m.cols()[idx2],
product.size()));
}
}
row_block_begin = row_block_end;

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -109,11 +109,11 @@ class CompressedRowSparseMatrix : public SparseMatrix {
const int* rows() const { return &rows_[0]; }
int* mutable_rows() { return &rows_[0]; }
const vector<int>& row_blocks() const { return row_blocks_; }
vector<int>* mutable_row_blocks() { return &row_blocks_; }
const std::vector<int>& row_blocks() const { return row_blocks_; }
std::vector<int>* mutable_row_blocks() { return &row_blocks_; }
const vector<int>& col_blocks() const { return col_blocks_; }
vector<int>* mutable_col_blocks() { return &col_blocks_; }
const std::vector<int>& col_blocks() const { return col_blocks_; }
std::vector<int>* mutable_col_blocks() { return &col_blocks_; }
// Destructive array resizing method.
void SetMaxNumNonZeros(int num_nonzeros);
@ -129,7 +129,7 @@ class CompressedRowSparseMatrix : public SparseMatrix {
static CompressedRowSparseMatrix* CreateBlockDiagonalMatrix(
const double* diagonal,
const vector<int>& blocks);
const std::vector<int>& blocks);
// Compute the sparsity structure of the product m.transpose() * m
// and create a CompressedRowSparseMatrix corresponding to it.
@ -147,30 +147,30 @@ class CompressedRowSparseMatrix : public SparseMatrix {
// this information for each row in the row block.
static CompressedRowSparseMatrix* CreateOuterProductMatrixAndProgram(
const CompressedRowSparseMatrix& m,
vector<int>* program);
std::vector<int>* program);
// Compute the values array for the expression m.transpose() * m,
// where the matrix used to store the result and a program have been
// created using the CreateOuterProductMatrixAndProgram function
// above.
static void ComputeOuterProduct(const CompressedRowSparseMatrix& m,
const vector<int>& program,
const std::vector<int>& program,
CompressedRowSparseMatrix* result);
private:
int num_rows_;
int num_cols_;
vector<int> rows_;
vector<int> cols_;
vector<double> values_;
std::vector<int> rows_;
std::vector<int> cols_;
std::vector<double> values_;
// If the matrix has an underlying block structure, then it can also
// carry with it row and column block sizes. This is auxilliary and
// optional information for use by algorithms operating on the
// matrix. The class itself does not make use of this information in
// any way.
vector<int> row_blocks_;
vector<int> col_blocks_;
std::vector<int> row_blocks_;
std::vector<int> col_blocks_;
CERES_DISALLOW_COPY_AND_ASSIGN(CompressedRowSparseMatrix);
};

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -45,7 +45,7 @@ namespace ceres {
// the one it's wrapping.
ConditionedCostFunction::ConditionedCostFunction(
CostFunction* wrapped_cost_function,
const vector<CostFunction*>& conditioners,
const std::vector<CostFunction*>& conditioners,
Ownership ownership)
: wrapped_cost_function_(wrapped_cost_function),
conditioners_(conditioners),

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -114,7 +114,6 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
double Q0 = -1.0 * xref.dot(bref + r);
for (summary.num_iterations = 1;; ++summary.num_iterations) {
// Apply preconditioner
if (per_solve_options.preconditioner != NULL) {
z.setZero();
@ -129,7 +128,7 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
summary.termination_type = LINEAR_SOLVER_FAILURE;
summary.message = StringPrintf("Numerical failure. rho = r'z = %e.", rho);
break;
};
}
if (summary.num_iterations == 1) {
p = z;
@ -138,7 +137,8 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
if (IsZeroOrInfinity(beta)) {
summary.termination_type = LINEAR_SOLVER_FAILURE;
summary.message = StringPrintf(
"Numerical failure. beta = rho_n / rho_{n-1} = %e.", beta);
"Numerical failure. beta = rho_n / rho_{n-1} = %e, "
"rho_n = %e, rho_{n-1} = %e", beta, rho, last_rho);
break;
}
p = z + beta * p;
@ -149,8 +149,11 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
A->RightMultiply(p.data(), q.data());
const double pq = p.dot(q);
if ((pq <= 0) || IsInfinite(pq)) {
summary.termination_type = LINEAR_SOLVER_FAILURE;
summary.message = StringPrintf("Numerical failure. p'q = %e.", pq);
summary.termination_type = LINEAR_SOLVER_NO_CONVERGENCE;
summary.message = StringPrintf(
"Matrix is indefinite, no more progress can be made. "
"p'q = %e. |p| = %e, |q| = %e",
pq, p.norm(), q.norm());
break;
}
@ -158,7 +161,8 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
if (IsInfinite(alpha)) {
summary.termination_type = LINEAR_SOLVER_FAILURE;
summary.message =
StringPrintf("Numerical failure. alpha = rho / pq = %e", alpha);
StringPrintf("Numerical failure. alpha = rho / pq = %e, "
"rho = %e, pq = %e.", alpha, rho, pq);
break;
}
@ -210,9 +214,11 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
summary.num_iterations >= options_.min_num_iterations) {
summary.termination_type = LINEAR_SOLVER_SUCCESS;
summary.message =
StringPrintf("Convergence: zeta = %e < %e",
StringPrintf("Iteration: %d Convergence: zeta = %e < %e. |r| = %e",
summary.num_iterations,
zeta,
per_solve_options.q_tolerance);
per_solve_options.q_tolerance,
r.norm());
break;
}
Q0 = Q1;
@ -223,7 +229,10 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
summary.num_iterations >= options_.min_num_iterations) {
summary.termination_type = LINEAR_SOLVER_SUCCESS;
summary.message =
StringPrintf("Convergence. |r| = %e <= %e.", norm_r, tol_r);
StringPrintf("Iteration: %d Convergence. |r| = %e <= %e.",
summary.num_iterations,
norm_r,
tol_r);
break;
}
@ -233,7 +242,7 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
}
return summary;
};
}
} // namespace internal
} // namespace ceres

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -48,11 +48,17 @@
#include "ceres/solver.h"
#include "ceres/trust_region_minimizer.h"
#include "ceres/trust_region_strategy.h"
#include "ceres/parameter_block_ordering.h"
namespace ceres {
namespace internal {
using std::map;
using std::max;
using std::min;
using std::set;
using std::string;
using std::vector;
CoordinateDescentMinimizer::~CoordinateDescentMinimizer() {
}
@ -244,7 +250,7 @@ bool CoordinateDescentMinimizer::IsOrderingValid(
// Verify that each group is an independent set
map<int, set<double*> >::const_iterator it = group_to_elements.begin();
for ( ; it != group_to_elements.end(); ++it) {
for (; it != group_to_elements.end(); ++it) {
if (!program.IsParameterBlockSetIndependent(it->second)) {
*message =
StringPrintf("The user-provided "

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -60,7 +60,7 @@ class CoordinateDescentMinimizer : public Minimizer {
bool Init(const Program& program,
const ProblemImpl::ParameterMap& parameter_map,
const ParameterBlockOrdering& ordering,
string* error);
std::string* error);
// Minimizer interface.
virtual ~CoordinateDescentMinimizer();
@ -71,7 +71,7 @@ class CoordinateDescentMinimizer : public Minimizer {
// Verify that each group in the ordering forms an independent set.
static bool IsOrderingValid(const Program& program,
const ParameterBlockOrdering& ordering,
string* message);
std::string* message);
// Find a recursive decomposition of the Hessian matrix as a set
// of independent sets of decreasing size and invert it. This
@ -85,13 +85,13 @@ class CoordinateDescentMinimizer : public Minimizer {
double* parameters,
Solver::Summary* summary);
vector<ParameterBlock*> parameter_blocks_;
vector<vector<ResidualBlock*> > residual_blocks_;
std::vector<ParameterBlock*> parameter_blocks_;
std::vector<std::vector<ResidualBlock*> > residual_blocks_;
// The optimization is performed in rounds. In each round all the
// parameter blocks that form one independent set are optimized in
// parallel. This array, marks the boundaries of the independent
// sets in parameter_blocks_.
vector<int> independent_set_offsets_;
std::vector<int> independent_set_offsets_;
Evaluator::Options evaluator_options_;
};

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -38,6 +38,9 @@
namespace ceres {
using std::pair;
using std::vector;
Covariance::Covariance(const Covariance::Options& options) {
impl_.reset(new internal::CovarianceImpl(options));
}
@ -54,9 +57,20 @@ bool Covariance::Compute(
bool Covariance::GetCovarianceBlock(const double* parameter_block1,
const double* parameter_block2,
double* covariance_block) const {
return impl_->GetCovarianceBlock(parameter_block1,
parameter_block2,
covariance_block);
return impl_->GetCovarianceBlockInTangentOrAmbientSpace(parameter_block1,
parameter_block2,
true, // ambient
covariance_block);
}
bool Covariance::GetCovarianceBlockInTangentSpace(
const double* parameter_block1,
const double* parameter_block2,
double* covariance_block) const {
return impl_->GetCovarianceBlockInTangentOrAmbientSpace(parameter_block1,
parameter_block2,
false, // tangent
covariance_block);
}
} // namespace ceres

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -38,26 +38,11 @@
#include <cstdlib>
#include <utility>
#include <vector>
#include "Eigen/SparseCore"
// Suppress unused local variable warning from Eigen Ordering.h #included by
// SparseQR in Eigen 3.2.0. This was fixed in Eigen 3.2.1, but 3.2.0 is still
// widely used (Ubuntu 14.04), and Ceres won't compile otherwise due to -Werror.
#if defined(_MSC_VER)
#pragma warning( push )
#pragma warning( disable : 4189 )
#else
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
#endif
#include "Eigen/SparseQR"
#if defined(_MSC_VER)
#pragma warning( pop )
#else
#pragma GCC diagnostic pop
#endif
#include "Eigen/SVD"
#include "ceres/compressed_col_sparse_matrix_utils.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/covariance.h"
@ -73,6 +58,12 @@
namespace ceres {
namespace internal {
using std::make_pair;
using std::map;
using std::pair;
using std::swap;
using std::vector;
typedef vector<pair<const double*, const double*> > CovarianceBlocks;
CovarianceImpl::CovarianceImpl(const Covariance::Options& options)
@ -106,9 +97,11 @@ bool CovarianceImpl::Compute(const CovarianceBlocks& covariance_blocks,
return is_valid_;
}
bool CovarianceImpl::GetCovarianceBlock(const double* original_parameter_block1,
const double* original_parameter_block2,
double* covariance_block) const {
bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
const double* original_parameter_block1,
const double* original_parameter_block2,
bool lift_covariance_to_ambient_space,
double* covariance_block) const {
CHECK(is_computed_)
<< "Covariance::GetCovarianceBlock called before Covariance::Compute";
CHECK(is_valid_)
@ -137,7 +130,7 @@ bool CovarianceImpl::GetCovarianceBlock(const double* original_parameter_block1,
const double* parameter_block2 = original_parameter_block2;
const bool transpose = parameter_block1 > parameter_block2;
if (transpose) {
std::swap(parameter_block1, parameter_block2);
swap(parameter_block1, parameter_block2);
}
// Find where in the covariance matrix the block is located.
@ -181,14 +174,17 @@ bool CovarianceImpl::GetCovarianceBlock(const double* original_parameter_block1,
block1_size,
row_size);
// Fast path when there are no local parameterizations.
if (local_param1 == NULL && local_param2 == NULL) {
// Fast path when there are no local parameterizations or if the
// user does not want it lifted to the ambient space.
if ((local_param1 == NULL && local_param2 == NULL) ||
!lift_covariance_to_ambient_space) {
if (transpose) {
MatrixRef(covariance_block, block2_size, block1_size) =
cov.block(0, offset, block1_size, block2_size).transpose();
MatrixRef(covariance_block, block2_local_size, block1_local_size) =
cov.block(0, offset, block1_local_size,
block2_local_size).transpose();
} else {
MatrixRef(covariance_block, block1_size, block2_size) =
cov.block(0, offset, block1_size, block2_size);
MatrixRef(covariance_block, block1_local_size, block2_local_size) =
cov.block(0, offset, block1_local_size, block2_local_size);
}
return true;
}
@ -257,7 +253,8 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
problem->GetParameterBlocks(&all_parameter_blocks);
const ProblemImpl::ParameterMap& parameter_map = problem->parameter_map();
constant_parameter_blocks_.clear();
vector<double*>& active_parameter_blocks = evaluate_options_.parameter_blocks;
vector<double*>& active_parameter_blocks =
evaluate_options_.parameter_blocks;
active_parameter_blocks.clear();
for (int i = 0; i < all_parameter_blocks.size(); ++i) {
double* parameter_block = all_parameter_blocks[i];
@ -270,7 +267,7 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
}
}
sort(active_parameter_blocks.begin(), active_parameter_blocks.end());
std::sort(active_parameter_blocks.begin(), active_parameter_blocks.end());
// Compute the number of rows. Map each parameter block to the
// first row corresponding to it in the covariance matrix using the
@ -609,8 +606,8 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD() {
sqrt(options_.min_reciprocal_condition_number);
const bool automatic_truncation = (options_.null_space_rank < 0);
const int max_rank = min(num_singular_values,
num_singular_values - options_.null_space_rank);
const int max_rank = std::min(num_singular_values,
num_singular_values - options_.null_space_rank);
// Compute the squared inverse of the singular values. Truncate the
// computation based on min_singular_value_ratio and
@ -687,7 +684,7 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR() {
qr_solver(sparse_jacobian);
event_logger.AddEvent("QRDecomposition");
if(qr_solver.info() != Eigen::Success) {
if (qr_solver.info() != Eigen::Success) {
LOG(ERROR) << "Eigen::SparseQR decomposition failed.";
return false;
}

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -41,7 +41,6 @@
#include "ceres/suitesparse.h"
namespace ceres {
namespace internal {
class CompressedRowSparseMatrix;
@ -52,15 +51,19 @@ class CovarianceImpl {
~CovarianceImpl();
bool Compute(
const vector<pair<const double*, const double*> >& covariance_blocks,
const std::vector<std::pair<const double*,
const double*> >& covariance_blocks,
ProblemImpl* problem);
bool GetCovarianceBlock(const double* parameter_block1,
const double* parameter_block2,
double* covariance_block) const;
bool GetCovarianceBlockInTangentOrAmbientSpace(
const double* parameter_block1,
const double* parameter_block2,
bool lift_covariance_to_ambient_space,
double* covariance_block) const;
bool ComputeCovarianceSparsity(
const vector<pair<const double*, const double*> >& covariance_blocks,
const std::vector<std::pair<const double*,
const double*> >& covariance_blocks,
ProblemImpl* problem);
bool ComputeCovarianceValues();
@ -78,8 +81,8 @@ class CovarianceImpl {
Problem::EvaluateOptions evaluate_options_;
bool is_computed_;
bool is_valid_;
map<const double*, int> parameter_block_to_row_index_;
set<const double*> constant_parameter_blocks_;
std::map<const double*, int> parameter_block_to_row_index_;
std::set<const double*> constant_parameter_blocks_;
scoped_ptr<CompressedRowSparseMatrix> covariance_matrix_;
};

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -38,13 +38,14 @@
#include <vector>
#include "ceres/compressed_col_sparse_matrix_utils.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/internal/port.h"
#include "ceres/triplet_sparse_matrix.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
using std::vector;
CXSparse::CXSparse() : scratch_(NULL), scratch_size_(0) {
}
@ -128,7 +129,7 @@ cs_dis* CXSparse::BlockAnalyzeCholesky(cs_di* A,
int* ordering = cs_amd(1, &block_matrix);
vector<int> block_ordering(num_row_blocks, -1);
copy(ordering, ordering + num_row_blocks, &block_ordering[0]);
std::copy(ordering, ordering + num_row_blocks, &block_ordering[0]);
cs_free(ordering);
vector<int> scalar_ordering;
@ -191,7 +192,7 @@ cs_di* CXSparse::CreateSparseMatrix(TripletSparseMatrix* tsm) {
void CXSparse::ApproximateMinimumDegreeOrdering(cs_di* A, int* ordering) {
int* cs_ordering = cs_amd(1, A);
copy(cs_ordering, cs_ordering + A->m, ordering);
std::copy(cs_ordering, cs_ordering + A->m, ordering);
cs_free(cs_ordering);
}

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -107,8 +107,8 @@ class CXSparse {
// The returned matrix should be deallocated with Free when not used
// anymore.
cs_dis* BlockAnalyzeCholesky(cs_di* A,
const vector<int>& row_blocks,
const vector<int>& col_blocks);
const std::vector<int>& row_blocks,
const std::vector<int>& col_blocks);
// Compute an fill-reducing approximate minimum degree ordering of
// the matrix A. ordering should be non-NULL and should point to
@ -133,8 +133,7 @@ typedef void cs_dis;
class CXSparse {
public:
void Free(void*) {};
void Free(void* arg) {}
};
#endif // CERES_NO_CXSPARSE

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
@ -70,10 +70,7 @@ class DenseJacobianWriter {
int residual_offset,
double **jacobians,
SparseMatrix* jacobian) {
DenseSparseMatrix* dense_jacobian;
if (jacobian != NULL) {
dense_jacobian = down_cast<DenseSparseMatrix*>(jacobian);
}
DenseSparseMatrix* dense_jacobian = down_cast<DenseSparseMatrix*>(jacobian);
const ResidualBlock* residual_block =
program_->residual_blocks()[residual_id];
int num_parameter_blocks = residual_block->NumParameterBlocks();

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

View File

@ -1,6 +1,6 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:

Some files were not shown because too many files have changed in this diff Show More