gtsam 4.1.1
gtsam
ShonanAveraging.h
Go to the documentation of this file.
1/* ----------------------------------------------------------------------------
2
3 * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
4 * Atlanta, Georgia 30332-0415
5 * All Rights Reserved
6 * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
7
8 * See LICENSE for the license information
9
10 * -------------------------------------------------------------------------- */
11
19#pragma once
20
21#include <gtsam/base/Matrix.h>
22#include <gtsam/base/Vector.h>
23#include <gtsam/dllexport.h>
24#include <gtsam/geometry/Rot2.h>
25#include <gtsam/geometry/Rot3.h>
31#include <gtsam/slam/dataset.h>
32
33#include <Eigen/Sparse>
34#include <map>
35#include <string>
36#include <type_traits>
37#include <utility>
38#include <vector>
39
40namespace gtsam {
41class NonlinearFactorGraph;
42class LevenbergMarquardtOptimizer;
43
45template <size_t d>
46struct GTSAM_EXPORT ShonanAveragingParameters {
47 // Select Rot2 or Rot3 interface based template parameter d
48 using Rot = typename std::conditional<d == 2, Rot2, Rot3>::type;
49 using Anchor = std::pair<size_t, Rot>;
50
51 // Parameters themselves:
54 Anchor anchor;
55 double alpha;
56 double beta;
57 double gamma;
62
64 LevenbergMarquardtParams::CeresDefaults(),
65 const std::string &method = "JACOBI",
66 double optimalityThreshold = -1e-4,
67 double alpha = 0.0, double beta = 1.0,
68 double gamma = 0.0);
69
70 LevenbergMarquardtParams getLMParams() const { return lm; }
71
72 void setOptimalityThreshold(double value) { optimalityThreshold = value; }
73 double getOptimalityThreshold() const { return optimalityThreshold; }
74
75 void setAnchor(size_t index, const Rot &value) { anchor = {index, value}; }
76 std::pair<size_t, Rot> getAnchor() const { return anchor; }
77
78 void setAnchorWeight(double value) { alpha = value; }
79 double getAnchorWeight() const { return alpha; }
80
81 void setKarcherWeight(double value) { beta = value; }
82 double getKarcherWeight() const { return beta; }
83
84 void setGaugesWeight(double value) { gamma = value; }
85 double getGaugesWeight() const { return gamma; }
86
87 void setUseHuber(bool value) { useHuber = value; }
88 bool getUseHuber() const { return useHuber; }
89
90 void setCertifyOptimality(bool value) { certifyOptimality = value; }
91 bool getCertifyOptimality() const { return certifyOptimality; }
92
94 void print(const std::string &s = "") const {
95 std::cout << (s.empty() ? s : s + " ");
96 std::cout << " ShonanAveragingParameters: " << std::endl;
97 std::cout << " alpha: " << alpha << std::endl;
98 std::cout << " beta: " << beta << std::endl;
99 std::cout << " gamma: " << gamma << std::endl;
100 std::cout << " useHuber: " << useHuber << std::endl;
101 }
102};
103
104using ShonanAveragingParameters2 = ShonanAveragingParameters<2>;
105using ShonanAveragingParameters3 = ShonanAveragingParameters<3>;
106
122template <size_t d>
123class GTSAM_EXPORT ShonanAveraging {
124 public:
125 using Sparse = Eigen::SparseMatrix<double>;
126
127 // Define the Parameters type and use its typedef of the rotation type:
129 using Rot = typename Parameters::Rot;
130
131 // We store SO(d) BetweenFactors to get noise model
132 using Measurements = std::vector<BinaryMeasurement<Rot>>;
133
134 private:
135 Parameters parameters_;
136 Measurements measurements_;
137 size_t nrUnknowns_;
138 Sparse D_; // Sparse (diagonal) degree matrix
139 Sparse Q_; // Sparse measurement matrix, == \tilde{R} in Eriksson18cvpr
140 Sparse L_; // connection Laplacian L = D - Q, needed for optimality check
141
146 Sparse buildQ() const;
147
149 Sparse buildD() const;
150
151 public:
154
157 ShonanAveraging(const Measurements &measurements,
158 const Parameters &parameters = Parameters());
159
163
165 size_t nrUnknowns() const { return nrUnknowns_; }
166
168 size_t nrMeasurements() const { return measurements_.size(); }
169
171 const BinaryMeasurement<Rot> &measurement(size_t k) const {
172 return measurements_[k];
173 }
174
181 Measurements makeNoiseModelRobust(const Measurements &measurements,
182 double k = 1.345) const {
183 Measurements robustMeasurements;
184 for (auto &measurement : measurements) {
185 auto model = measurement.noiseModel();
186 const auto &robust =
187 boost::dynamic_pointer_cast<noiseModel::Robust>(model);
188
189 SharedNoiseModel robust_model;
190 // Check if the noise model is already robust
191 if (robust) {
192 robust_model = model;
193 } else {
194 // make robust
195 robust_model = noiseModel::Robust::Create(
196 noiseModel::mEstimator::Huber::Create(k), model);
197 }
198 BinaryMeasurement<Rot> meas(measurement.key1(), measurement.key2(),
199 measurement.measured(), robust_model);
200 robustMeasurements.push_back(meas);
201 }
202 return robustMeasurements;
203 }
204
206 const Rot &measured(size_t k) const { return measurements_[k].measured(); }
207
209 const KeyVector &keys(size_t k) const { return measurements_[k].keys(); }
210
214
215 Sparse D() const { return D_; }
216 Matrix denseD() const { return Matrix(D_); }
217 Sparse Q() const { return Q_; }
218 Matrix denseQ() const { return Matrix(Q_); }
219 Sparse L() const { return L_; }
220 Matrix denseL() const { return Matrix(L_); }
221
223 Sparse computeLambda(const Matrix &S) const;
224
226 Matrix computeLambda_(const Values &values) const {
227 return Matrix(computeLambda(values));
228 }
229
231 Matrix computeLambda_(const Matrix &S) const {
232 return Matrix(computeLambda(S));
233 }
234
236 Sparse computeA(const Values &values) const;
237
239 Sparse computeA(const Matrix &S) const;
240
242 Matrix computeA_(const Values &values) const {
243 return Matrix(computeA(values));
244 }
245
247 static Matrix StiefelElementMatrix(const Values &values);
248
253 double computeMinEigenValue(const Values &values,
254 Vector *minEigenVector = nullptr) const;
255
260 double computeMinEigenValueAP(const Values &values,
261 Vector *minEigenVector = nullptr) const;
262
264 Values roundSolutionS(const Matrix &S) const;
265
267 static VectorValues TangentVectorValues(size_t p, const Vector &v);
268
270 Matrix riemannianGradient(size_t p, const Values &values) const;
271
276 static Values LiftwithDescent(size_t p, const Values &values,
277 const Vector &minEigenVector);
278
286 Values initializeWithDescent(
287 size_t p, const Values &values, const Vector &minEigenVector,
288 double minEigenValue, double gradienTolerance = 1e-2,
289 double preconditionedGradNormTolerance = 1e-4) const;
293
298 NonlinearFactorGraph buildGraphAt(size_t p) const;
299
304 Values initializeRandomlyAt(size_t p, std::mt19937 &rng) const;
305
307 Values initializeRandomlyAt(size_t p) const;
308
313 double costAt(size_t p, const Values &values) const;
314
320 Sparse computeLambda(const Values &values) const;
321
327 std::pair<double, Vector> computeMinEigenVector(const Values &values) const;
328
333 bool checkOptimality(const Values &values) const;
334
341 boost::shared_ptr<LevenbergMarquardtOptimizer> createOptimizerAt(
342 size_t p, const Values &initial) const;
343
350 Values tryOptimizingAt(size_t p, const Values &initial) const;
351
356 Values projectFrom(size_t p, const Values &values) const;
357
362 Values roundSolution(const Values &values) const;
363
365 template <class T>
366 static Values LiftTo(size_t p, const Values &values) {
367 Values result;
368 for (const auto it : values.filter<T>()) {
369 result.insert(it.key, SOn::Lift(p, it.value.matrix()));
370 }
371 return result;
372 }
373
377
382 double cost(const Values &values) const;
383
391 Values initializeRandomly(std::mt19937 &rng) const;
392
394 Values initializeRandomly() const;
395
403 std::pair<Values, double> run(const Values &initialEstimate, size_t pMin = d,
404 size_t pMax = 10) const;
406
416 template <typename T>
417 inline std::vector<BinaryMeasurement<T>> maybeRobust(
418 const std::vector<BinaryMeasurement<T>> &measurements,
419 bool useRobustModel = false) const {
420 return useRobustModel ? makeNoiseModelRobust(measurements) : measurements;
421 }
422};
423
424// Subclasses for d=2 and d=3 that explicitly instantiate, as well as provide a
425// convenience interface with file access.
426
427class GTSAM_EXPORT ShonanAveraging2 : public ShonanAveraging<2> {
428 public:
429 ShonanAveraging2(const Measurements &measurements,
430 const Parameters &parameters = Parameters());
431 explicit ShonanAveraging2(std::string g2oFile,
432 const Parameters &parameters = Parameters());
433 ShonanAveraging2(const BetweenFactorPose2s &factors,
434 const Parameters &parameters = Parameters());
435};
436
437class GTSAM_EXPORT ShonanAveraging3 : public ShonanAveraging<3> {
438 public:
439 ShonanAveraging3(const Measurements &measurements,
440 const Parameters &parameters = Parameters());
441 explicit ShonanAveraging3(std::string g2oFile,
442 const Parameters &parameters = Parameters());
443
444 // TODO(frank): Deprecate after we land pybind wrapper
445 ShonanAveraging3(const BetweenFactorPose3s &factors,
446 const Parameters &parameters = Parameters());
447};
448} // namespace gtsam
typedef and functions to augment Eigen's MatrixXd
typedef and functions to augment Eigen's VectorXd
2D rotation
3D rotation represented as a rotation matrix or quaternion
accelerated power method for fast eigenvalue and eigenvector computation
Power method for fast eigenvalue and eigenvector computation.
Factor Graph Values.
Parameters for Levenberg-Marquardt trust-region scheme.
Binary measurement represents a measurement between two keys in a graph. A binary measurement is simi...
utility functions for loading datasets
Global functions in a separate testing namespace.
Definition: chartTesting.h:28
FastVector< Key > KeyVector
Define collection type once and for all - also used in wrappers.
Definition: Key.h:86
noiseModel::Base::shared_ptr SharedNoiseModel
Note, deliberately not in noiseModel namespace.
Definition: NoiseModel.h:736
static SO Lift(size_t n, const Eigen::MatrixBase< Derived > &R)
Named constructor from lower dimensional matrix.
Definition: SOn.h:99
This class represents a collection of vector-valued variables associated each with a unique integer i...
Definition: VectorValues.h:74
Parameters for Levenberg-Marquardt optimization.
Definition: LevenbergMarquardtParams.h:35
A non-linear factor graph is a graph of non-Gaussian, i.e.
Definition: NonlinearFactorGraph.h:78
A non-templated config holding any types of Manifold-group elements.
Definition: Values.h:63
Filtered< Value > filter(const std::function< bool(Key)> &filterFcn)
Return a filtered view of this Values class, without copying any data.
Definition: Values-inl.h:239
Parameters governing optimization etc.
Definition: ShonanAveraging.h:46
double alpha
weight of anchor-based prior (default 0)
Definition: ShonanAveraging.h:55
void print(const std::string &s="") const
Print the parameters and flags used for rotation averaging.
Definition: ShonanAveraging.h:94
LevenbergMarquardtParams lm
LM parameters.
Definition: ShonanAveraging.h:52
bool useHuber
if enabled, the Huber loss is used (default false)
Definition: ShonanAveraging.h:59
double optimalityThreshold
threshold used in checkOptimality
Definition: ShonanAveraging.h:53
double beta
weight of Karcher-based prior (default 1)
Definition: ShonanAveraging.h:56
double gamma
weight of gauge-fixing factors (default 0)
Definition: ShonanAveraging.h:57
Anchor anchor
pose to use as anchor if not Karcher
Definition: ShonanAveraging.h:54
bool certifyOptimality
if enabled solution optimality is certified (default true)
Definition: ShonanAveraging.h:61
Class that implements Shonan Averaging from our ECCV'20 paper.
Definition: ShonanAveraging.h:123
Sparse D() const
Sparse version of D.
Definition: ShonanAveraging.h:215
const BinaryMeasurement< Rot > & measurement(size_t k) const
k^th binary measurement
Definition: ShonanAveraging.h:171
Measurements makeNoiseModelRobust(const Measurements &measurements, double k=1.345) const
Update factors to use robust Huber loss.
Definition: ShonanAveraging.h:181
Values roundSolutionS(const Matrix &S) const
Project pxdN Stiefel manifold matrix S to Rot3^N.
std::vector< BinaryMeasurement< T > > maybeRobust(const std::vector< BinaryMeasurement< T > > &measurements, bool useRobustModel=false) const
Helper function to convert measurements to robust noise model if flag is set.
Definition: ShonanAveraging.h:417
Sparse L() const
Sparse version of L.
Definition: ShonanAveraging.h:219
const Rot & measured(size_t k) const
k^th measurement, as a Rot.
Definition: ShonanAveraging.h:206
Sparse Q() const
Sparse version of Q.
Definition: ShonanAveraging.h:217
Matrix denseD() const
Dense version of D.
Definition: ShonanAveraging.h:216
size_t nrUnknowns() const
Return number of unknowns.
Definition: ShonanAveraging.h:165
Values projectFrom(size_t p, const Values &values) const
Project from SO(p) to Rot2 or Rot3 Values should be of type SO(p)
Matrix denseL() const
Dense version of L.
Definition: ShonanAveraging.h:220
Matrix computeLambda_(const Matrix &S) const
Dense versions of computeLambda for wrapper/testing.
Definition: ShonanAveraging.h:231
Matrix denseQ() const
Dense version of Q.
Definition: ShonanAveraging.h:218
const KeyVector & keys(size_t k) const
Keys for k^th measurement, as a vector of Key values.
Definition: ShonanAveraging.h:209
size_t nrMeasurements() const
Return number of measurements.
Definition: ShonanAveraging.h:168
Matrix computeLambda_(const Values &values) const
Dense versions of computeLambda for wrapper/testing.
Definition: ShonanAveraging.h:226
Matrix computeA_(const Values &values) const
Dense version of computeA for wrapper/testing.
Definition: ShonanAveraging.h:242
static Values LiftTo(size_t p, const Values &values)
Lift Values of type T to SO(p)
Definition: ShonanAveraging.h:366
Definition: ShonanAveraging.h:427
Definition: ShonanAveraging.h:437