1   /* Copyright 2002-2025 CS GROUP
2    * Licensed to CS GROUP (CS) under one or more
3    * contributor license agreements.  See the NOTICE file distributed with
4    * this work for additional information regarding copyright ownership.
5    * CS licenses this file to You under the Apache License, Version 2.0
6    * (the "License"); you may not use this file except in compliance with
7    * the License.  You may obtain a copy of the License at
8    *
9    *   http://www.apache.org/licenses/LICENSE-2.0
10   *
11   * Unless required by applicable law or agreed to in writing, software
12   * distributed under the License is distributed on an "AS IS" BASIS,
13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14   * See the License for the specific language governing permissions and
15   * limitations under the License.
16   */
17  package org.orekit.estimation.measurements.gnss;
18  
19  import java.util.SortedSet;
20  import java.util.TreeSet;
21  
22  import org.hipparchus.util.FastMath;
23  
24  /** Decorrelation/reduction engine for LAMBDA method.
25   * <p>
26   * This class implements PJG Teunissen Least Square Ambiguity Decorrelation
27   * Adjustment (LAMBDA) method, as described in both the 1996 paper <a
28   * href="https://www.researchgate.net/publication/2790708_The_LAMBDA_method_for_integer_ambiguity_estimation_implementation_aspects">
29   * The LAMBDA method for integer ambiguity estimation: implementation aspects</a> by
30   * Paul de Jonge and Christian Tiberius and on the 2005 paper <a
31   * href="https://www.researchgate.net/publication/225518977_MLAMBDA_a_modified_LAMBDA_method_for_integer_least-squares_estimation">
32   * A modified LAMBDA method for integer least-squares estimation</a> by X.-W Chang, X. Yang
33   * and T. Zhou, Journal of Geodesy 79(9):552-565, DOI: 10.1007/s00190-005-0004-x
34   * </p>
35   * <p>
36   * It slightly departs on the original LAMBDA method as it does implement
37   * the following improvements proposed in the de Jonge and Tiberius 1996 paper
38   * that vastly speed up the search:
39   * </p>
40   * <ul>
41   *   <li>alternate search starting from the middle and expanding outwards</li>
42   *   <li>automatic shrinking of ellipsoid during the search</li>
43   * </ul>
44   * @see AmbiguitySolver
45   * @author Luc Maisonobe
46   * @since 10.0
47   */
48  public class LambdaMethod extends AbstractLambdaMethod {
49  
50      /** Margin factor to apply to estimated search limit parameter. */
51      private static final double CHI2_MARGIN_FACTOR = 1.1;
52  
53      /** Empty constructor.
54       * <p>
55       * This constructor is not strictly necessary, but it prevents spurious
56       * javadoc warnings with JDK 18 and later.
57       * </p>
58       * @since 12.0
59       */
60      public LambdaMethod() {
61          // nothing to do
62      }
63  
64      /** {@inheritDoc} */
65      @Override
66      protected void ltdlDecomposition() {
67  
68          // get references to the diagonal and lower triangular parts
69          final double[] diag = getDiagReference();
70          final double[] low  = getLowReference();
71  
72          // perform Lᵀ.D.L decomposition
73          for (int k = diag.length - 1; k >= 0; --k) {
74              final double inv = 1.0 / diag[k];
75              for (int i = 0; i < k; ++i) {
76                  final double lki = low[lIndex(k, i)];
77                  for (int j = 0; j < i; ++j) {
78                      low[lIndex(i, j)] -= lki * low[lIndex(k, j)] * inv;
79                  }
80                  diag[i] -= lki * lki * inv;
81              }
82              for (int j = 0; j < k; ++j) {
83                  low[lIndex(k, j)] *= inv;
84              }
85          }
86  
87      }
88  
89      /** {@inheritDoc} */
90      @Override
91      protected void reduction() {
92  
93          // get references to the diagonal and lower triangular parts
94          final double[] diag = getDiagReference();
95          final double[] low  = getLowReference();
96          final int n = diag.length;
97  
98          int kSaved = n - 2;
99          int k0 = kSaved;
100         while (k0 >= 0) {
101             final int k1 = k0 + 1;
102             if (k0 <= kSaved) {
103                 for (int i = k0 + 1; i < n; ++i) {
104                     integerGaussTransformation(i, k0);
105                 }
106             }
107             final double lk1k0 = low[lIndex(k1, k0)];
108             final double delta = diag[k0] + lk1k0 * lk1k0 * diag[k1];
109             if (delta < diag[k1]) {
110                 permutation(k0, delta);
111                 kSaved = k0;
112                 k0 = n - 2;
113             } else {
114                 k0--;
115             }
116         }
117     }
118 
119     /** {@inheritDoc} */
120     @Override
121     protected void discreteSearch() {
122 
123         // get references to the diagonal part
124         final double[] diag = getDiagReference();
125         final int n = diag.length;
126 
127         // estimate search domain limit
128         final long[]   fixed   = new long[n];
129         final double[] offsets = new double[n];
130         final double[] left    = new double[n];
131         final double[] right   = new double[n];
132 
133         final int maxSolutions = getMaxSolution();
134         final double[] decorrelated = getDecorrelatedReference();
135 
136         final AlternatingSampler[] samplers = new AlternatingSampler[n];
137 
138         // set up top level sampling for last ambiguity
139         double chi2 = estimateChi2(fixed, offsets);
140         right[n - 1] = chi2 / diag[n - 1];
141         int index = n - 1;
142         while (index < n) {
143             if (index == -1) {
144 
145                 // all ambiguities have been fixed
146                 final double squaredNorm = chi2 - diag[0] * (right[0] - left[0]);
147                 addSolution(fixed, squaredNorm);
148                 final int size = getSolutionsSize();
149                 if (size >= maxSolutions) {
150 
151                     // shrink the ellipsoid
152                     chi2 = squaredNorm;
153                     right[n - 1] = chi2 / diag[n - 1];
154                     for (int i = n - 1; i > 0; --i) {
155                         samplers[i].setRadius(FastMath.sqrt(right[i]));
156                         right[i - 1] = diag[i] / diag[i - 1] * (right[i] - left[i]);
157                     }
158                     samplers[0].setRadius(FastMath.sqrt(right[0]));
159 
160                     if (size > maxSolutions) {
161                         removeSolution();
162                     }
163 
164                 }
165 
166                 ++index;
167 
168             } else {
169 
170                 if (samplers[index] == null) {
171                     // we start exploring a new ambiguity
172                     samplers[index] = new AlternatingSampler(conditionalEstimate(index, offsets), FastMath.sqrt(right[index]));
173                 } else {
174                     // continue exploring the same ambiguity
175                     samplers[index].generateNext();
176                 }
177 
178                 if (samplers[index].inRange()) {
179                     fixed[index]       = samplers[index].getCurrent();
180                     offsets[index]     = fixed[index] - decorrelated[index];
181                     final double delta = fixed[index] - samplers[index].getMidPoint();
182                     left[index]        = delta * delta;
183                     if (index > 0) {
184                         right[index - 1]   = diag[index] / diag[index - 1] * (right[index] - left[index]);
185                     }
186 
187                     // go down one level
188                     --index;
189 
190                 } else {
191 
192                     // we have completed exploration of this ambiguity range
193                     samplers[index] = null;
194 
195                     // go up one level
196                     ++index;
197 
198                 }
199 
200             }
201         }
202 
203     }
204 
205     /** {@inheritDoc} */
206     @Override
207     protected void inverseDecomposition() {
208 
209         // get references to the diagonal and lower triangular parts
210         final double[] diag = getDiagReference();
211         final double[] low  = getLowReference();
212         final int n = diag.length;
213 
214         // we rely on the following equation, where a low triangular
215         // matrix L of dimension n is split into sub-matrices of dimensions
216         // k, l and m with k + l + m = n
217         //
218         // [  A  |      |    ]  [        A⁻¹        |         |       ]   [  Iₖ  |      |     ]
219         // [  B  |  Iₗ  |    ]  [       -BA⁻¹       |   Iₗ    |       ] = [      |  Iₗ  |     ]
220         // [  C  |  D   |  E ]  [ E⁻¹ (DB - C) A⁻¹  | -E⁻¹D   |  E⁻¹  ]   [      |      |  Iₘ ]
221         //
222         // considering we have already computed A⁻¹ (i.e. inverted rows 0 to k-1
223         // of L), and using l = 1 in the previous expression (i.e. the middle matrix
224         // is only one row), we see that elements 0 to k-1 of row k are given by -BA⁻¹
225         // and that element k is I₁ = 1. We can therefore invert L row by row and we
226         // obtain an inverse matrix L⁻¹ which is a low triangular matrix with unit
227         // diagonal. A⁻¹ is therefore also a low triangular matrix with unit diagonal.
228         // This property is used in the loops below to speed up the computation of -BA⁻¹
229         final double[] row = new double[n - 1];
230         diag[0] = 1.0 / diag[0];
231         for (int k = 1; k < n; ++k) {
232 
233             // compute row k of lower triangular part, by computing -BA⁻¹
234             final int iK = lIndex(k, 0);
235             System.arraycopy(low, iK, row, 0, k);
236             for (int j = 0; j < k; ++j) {
237                 double sum = row[j];
238                 for (int l = j + 1; l < k; ++l) {
239                     sum += row[l] * low[lIndex(l, j)];
240                 }
241                 low[iK + j] = -sum;
242             }
243 
244             // diagonal part
245             diag[k] = 1.0 / diag[k];
246 
247         }
248 
249     }
250 
251     /** Compute a safe estimate of search limit parameter χ².
252      * <p>
253      * The estimate is based on section 4.11 in de Jonge and Tiberius 1996,
254      * computing χ² such that it includes at least a few preset grid points
255      * </p>
256      * @param fixed placeholder for test fixed ambiguities
257      * @param offsets placeholder for offsets between fixed ambiguities and float ambiguities
258      * @return safe estimate of search limit parameter χ²
259      */
260     private double estimateChi2(final long[] fixed, final double[] offsets) {
261 
262         // get references to the diagonal part
263         final double[] diag = getDiagReference();
264         final int n = diag.length;
265         // maximum number of solutions seeked
266         final int maxSolutions = getMaxSolution();
267         // get references to the decorrelated ambiguities
268         final double[] decorrelated = getDecorrelatedReference();
269 
270 
271         // initialize test points, assuming ambiguities have been fully decorrelated
272         final AlternatingSampler[] samplers = new AlternatingSampler[n];
273         for (int i = 0; i < n; ++i) {
274             samplers[i] = new AlternatingSampler(decorrelated[i], 0.0);
275         }
276 
277         final SortedSet<Double> squaredNorms = new TreeSet<>();
278 
279         // first test point at center
280         for (int i = 0; i < n; ++i) {
281             fixed[i] = samplers[i].getCurrent();
282         }
283         squaredNorms.add(squaredNorm(fixed, offsets));
284 
285         while (squaredNorms.size() < maxSolutions) {
286             // add a series of grid points, each shifted from center along a different axis
287             final int remaining = FastMath.min(n, maxSolutions - squaredNorms.size());
288             for (int i = 0; i < remaining; ++i) {
289                 final long saved = fixed[i];
290                 samplers[i].generateNext();
291                 fixed[i] = samplers[i].getCurrent();
292                 squaredNorms.add(squaredNorm(fixed, offsets));
293                 fixed[i] = saved;
294             }
295         }
296 
297         // select a limit ensuring at least the needed number of grid points are in the search domain
298         int count = 0;
299         for (final double s : squaredNorms) {
300             if (++count == maxSolutions) {
301                 return s * CHI2_MARGIN_FACTOR;
302             }
303         }
304 
305         // never reached
306         return Double.NaN;
307 
308     }
309 
310     /** Compute squared norm of a set of fixed ambiguities.
311      * @param fixed fixed ambiguities
312      * @param offsets placeholder for offsets between fixed ambiguities and float ambiguities
313      * @return squared norm of a set of fixed ambiguities
314      */
315     private double squaredNorm(final long[] fixed, final double[] offsets) {
316         // get references to the lower triangular part and the decorrelated ambiguities
317         final double[] diag = getDiagReference();
318         final double[] decorrelated = getDecorrelatedReference();
319         double n2 = 0;
320         for (int i = diag.length - 1; i >= 0; --i) {
321             offsets[i] = fixed[i] - decorrelated[i];
322             final double delta = fixed[i] - conditionalEstimate(i, offsets);
323             n2 += diag[i] * delta * delta;
324         }
325         return n2;
326     }
327 
328     /** Compute conditional estimate of an ambiguity.
329      * <p>
330      * This corresponds to equation 4.4 in de Jonge and Tiberius 1996
331      * </p>
332      * @param i index of the ambiguity
333      * @param offsets offsets between already fixed ambiguities and float ambiguities
334      * @return conditional estimate of ambiguity â<sub>i|i+1...n</sub>
335      */
336     private double conditionalEstimate(final int i, final double[] offsets) {
337         // get references to the diagonal and lower triangular parts
338         final double[] diag = getDiagReference();
339         final double[] low  = getLowReference();
340         // get references to the decorrelated ambiguities
341         final double[] decorrelated = getDecorrelatedReference();
342 
343         double conditional = decorrelated[i];
344         for (int j = i + 1; j < diag.length; ++j) {
345             conditional -= low[lIndex(j, i)] * offsets[j];
346         }
347         return conditional;
348     }
349 
350 }