001 /*
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements. See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License. You may obtain a copy of the License at
008 *
009 * http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017
018 package org.apache.commons.math.ode.nonstiff;
019
020 import java.util.Arrays;
021
022 import org.apache.commons.math.linear.Array2DRowRealMatrix;
023 import org.apache.commons.math.linear.RealMatrixPreservingVisitor;
024 import org.apache.commons.math.ode.DerivativeException;
025 import org.apache.commons.math.ode.FirstOrderDifferentialEquations;
026 import org.apache.commons.math.ode.IntegratorException;
027 import org.apache.commons.math.ode.sampling.NordsieckStepInterpolator;
028 import org.apache.commons.math.ode.sampling.StepHandler;
029 import org.apache.commons.math.util.FastMath;
030
031
032 /**
033 * This class implements implicit Adams-Moulton integrators for Ordinary
034 * Differential Equations.
035 *
036 * <p>Adams-Moulton methods (in fact due to Adams alone) are implicit
037 * multistep ODE solvers. This implementation is a variation of the classical
038 * one: it uses adaptive stepsize to implement error control, whereas
039 * classical implementations are fixed step size. The value of state vector
040 * at step n+1 is a simple combination of the value at step n and of the
041 * derivatives at steps n+1, n, n-1 ... Since y'<sub>n+1</sub> is needed to
042 * compute y<sub>n+1</sub>,another method must be used to compute a first
043 * estimate of y<sub>n+1</sub>, then compute y'<sub>n+1</sub>, then compute
044 * a final estimate of y<sub>n+1</sub> using the following formulas. Depending
045 * on the number k of previous steps one wants to use for computing the next
046 * value, different formulas are available for the final estimate:</p>
047 * <ul>
048 * <li>k = 1: y<sub>n+1</sub> = y<sub>n</sub> + h y'<sub>n+1</sub></li>
049 * <li>k = 2: y<sub>n+1</sub> = y<sub>n</sub> + h (y'<sub>n+1</sub>+y'<sub>n</sub>)/2</li>
050 * <li>k = 3: y<sub>n+1</sub> = y<sub>n</sub> + h (5y'<sub>n+1</sub>+8y'<sub>n</sub>-y'<sub>n-1</sub>)/12</li>
051 * <li>k = 4: y<sub>n+1</sub> = y<sub>n</sub> + h (9y'<sub>n+1</sub>+19y'<sub>n</sub>-5y'<sub>n-1</sub>+y'<sub>n-2</sub>)/24</li>
052 * <li>...</li>
053 * </ul>
054 *
055 * <p>A k-steps Adams-Moulton method is of order k+1.</p>
056 *
057 * <h3>Implementation details</h3>
058 *
059 * <p>We define scaled derivatives s<sub>i</sub>(n) at step n as:
060 * <pre>
061 * s<sub>1</sub>(n) = h y'<sub>n</sub> for first derivative
062 * s<sub>2</sub>(n) = h<sup>2</sup>/2 y''<sub>n</sub> for second derivative
063 * s<sub>3</sub>(n) = h<sup>3</sup>/6 y'''<sub>n</sub> for third derivative
064 * ...
065 * s<sub>k</sub>(n) = h<sup>k</sup>/k! y(k)<sub>n</sub> for k<sup>th</sup> derivative
066 * </pre></p>
067 *
068 * <p>The definitions above use the classical representation with several previous first
069 * derivatives. Lets define
070 * <pre>
071 * q<sub>n</sub> = [ s<sub>1</sub>(n-1) s<sub>1</sub>(n-2) ... s<sub>1</sub>(n-(k-1)) ]<sup>T</sup>
072 * </pre>
073 * (we omit the k index in the notation for clarity). With these definitions,
074 * Adams-Moulton methods can be written:
075 * <ul>
076 * <li>k = 1: y<sub>n+1</sub> = y<sub>n</sub> + s<sub>1</sub>(n+1)</li>
077 * <li>k = 2: y<sub>n+1</sub> = y<sub>n</sub> + 1/2 s<sub>1</sub>(n+1) + [ 1/2 ] q<sub>n+1</sub></li>
078 * <li>k = 3: y<sub>n+1</sub> = y<sub>n</sub> + 5/12 s<sub>1</sub>(n+1) + [ 8/12 -1/12 ] q<sub>n+1</sub></li>
079 * <li>k = 4: y<sub>n+1</sub> = y<sub>n</sub> + 9/24 s<sub>1</sub>(n+1) + [ 19/24 -5/24 1/24 ] q<sub>n+1</sub></li>
080 * <li>...</li>
081 * </ul></p>
082 *
083 * <p>Instead of using the classical representation with first derivatives only (y<sub>n</sub>,
084 * s<sub>1</sub>(n+1) and q<sub>n+1</sub>), our implementation uses the Nordsieck vector with
085 * higher degrees scaled derivatives all taken at the same step (y<sub>n</sub>, s<sub>1</sub>(n)
086 * and r<sub>n</sub>) where r<sub>n</sub> is defined as:
087 * <pre>
088 * r<sub>n</sub> = [ s<sub>2</sub>(n), s<sub>3</sub>(n) ... s<sub>k</sub>(n) ]<sup>T</sup>
089 * </pre>
090 * (here again we omit the k index in the notation for clarity)
091 * </p>
092 *
093 * <p>Taylor series formulas show that for any index offset i, s<sub>1</sub>(n-i) can be
094 * computed from s<sub>1</sub>(n), s<sub>2</sub>(n) ... s<sub>k</sub>(n), the formula being exact
095 * for degree k polynomials.
096 * <pre>
097 * s<sub>1</sub>(n-i) = s<sub>1</sub>(n) + ∑<sub>j</sub> j (-i)<sup>j-1</sup> s<sub>j</sub>(n)
098 * </pre>
099 * The previous formula can be used with several values for i to compute the transform between
100 * classical representation and Nordsieck vector. The transform between r<sub>n</sub>
101 * and q<sub>n</sub> resulting from the Taylor series formulas above is:
102 * <pre>
103 * q<sub>n</sub> = s<sub>1</sub>(n) u + P r<sub>n</sub>
104 * </pre>
105 * where u is the [ 1 1 ... 1 ]<sup>T</sup> vector and P is the (k-1)×(k-1) matrix built
106 * with the j (-i)<sup>j-1</sup> terms:
107 * <pre>
108 * [ -2 3 -4 5 ... ]
109 * [ -4 12 -32 80 ... ]
110 * P = [ -6 27 -108 405 ... ]
111 * [ -8 48 -256 1280 ... ]
112 * [ ... ]
113 * </pre></p>
114 *
115 * <p>Using the Nordsieck vector has several advantages:
116 * <ul>
117 * <li>it greatly simplifies step interpolation as the interpolator mainly applies
118 * Taylor series formulas,</li>
119 * <li>it simplifies step changes that occur when discrete events that truncate
120 * the step are triggered,</li>
121 * <li>it allows to extend the methods in order to support adaptive stepsize.</li>
122 * </ul></p>
123 *
124 * <p>The predicted Nordsieck vector at step n+1 is computed from the Nordsieck vector at step
125 * n as follows:
126 * <ul>
127 * <li>Y<sub>n+1</sub> = y<sub>n</sub> + s<sub>1</sub>(n) + u<sup>T</sup> r<sub>n</sub></li>
128 * <li>S<sub>1</sub>(n+1) = h f(t<sub>n+1</sub>, Y<sub>n+1</sub>)</li>
129 * <li>R<sub>n+1</sub> = (s<sub>1</sub>(n) - S<sub>1</sub>(n+1)) P<sup>-1</sup> u + P<sup>-1</sup> A P r<sub>n</sub></li>
130 * </ul>
131 * where A is a rows shifting matrix (the lower left part is an identity matrix):
132 * <pre>
133 * [ 0 0 ... 0 0 | 0 ]
134 * [ ---------------+---]
135 * [ 1 0 ... 0 0 | 0 ]
136 * A = [ 0 1 ... 0 0 | 0 ]
137 * [ ... | 0 ]
138 * [ 0 0 ... 1 0 | 0 ]
139 * [ 0 0 ... 0 1 | 0 ]
140 * </pre>
141 * From this predicted vector, the corrected vector is computed as follows:
142 * <ul>
143 * <li>y<sub>n+1</sub> = y<sub>n</sub> + S<sub>1</sub>(n+1) + [ -1 +1 -1 +1 ... ±1 ] r<sub>n+1</sub></li>
144 * <li>s<sub>1</sub>(n+1) = h f(t<sub>n+1</sub>, y<sub>n+1</sub>)</li>
145 * <li>r<sub>n+1</sub> = R<sub>n+1</sub> + (s<sub>1</sub>(n+1) - S<sub>1</sub>(n+1)) P<sup>-1</sup> u</li>
146 * </ul>
147 * where the upper case Y<sub>n+1</sub>, S<sub>1</sub>(n+1) and R<sub>n+1</sub> represent the
148 * predicted states whereas the lower case y<sub>n+1</sub>, s<sub>n+1</sub> and r<sub>n+1</sub>
149 * represent the corrected states.</p>
150 *
151 * <p>The P<sup>-1</sup>u vector and the P<sup>-1</sup> A P matrix do not depend on the state,
152 * they only depend on k and therefore are precomputed once for all.</p>
153 *
154 * @version $Revision: 1073158 $ $Date: 2011-02-21 22:46:52 +0100 (lun. 21 f??vr. 2011) $
155 * @since 2.0
156 */
157 public class AdamsMoultonIntegrator extends AdamsIntegrator {
158
159 /** Integrator method name. */
160 private static final String METHOD_NAME = "Adams-Moulton";
161
162 /**
163 * Build an Adams-Moulton integrator with the given order and error control parameters.
164 * @param nSteps number of steps of the method excluding the one being computed
165 * @param minStep minimal step (must be positive even for backward
166 * integration), the last step can be smaller than this
167 * @param maxStep maximal step (must be positive even for backward
168 * integration)
169 * @param scalAbsoluteTolerance allowed absolute error
170 * @param scalRelativeTolerance allowed relative error
171 * @exception IllegalArgumentException if order is 1 or less
172 */
173 public AdamsMoultonIntegrator(final int nSteps,
174 final double minStep, final double maxStep,
175 final double scalAbsoluteTolerance,
176 final double scalRelativeTolerance)
177 throws IllegalArgumentException {
178 super(METHOD_NAME, nSteps, nSteps + 1, minStep, maxStep,
179 scalAbsoluteTolerance, scalRelativeTolerance);
180 }
181
182 /**
183 * Build an Adams-Moulton integrator with the given order and error control parameters.
184 * @param nSteps number of steps of the method excluding the one being computed
185 * @param minStep minimal step (must be positive even for backward
186 * integration), the last step can be smaller than this
187 * @param maxStep maximal step (must be positive even for backward
188 * integration)
189 * @param vecAbsoluteTolerance allowed absolute error
190 * @param vecRelativeTolerance allowed relative error
191 * @exception IllegalArgumentException if order is 1 or less
192 */
193 public AdamsMoultonIntegrator(final int nSteps,
194 final double minStep, final double maxStep,
195 final double[] vecAbsoluteTolerance,
196 final double[] vecRelativeTolerance)
197 throws IllegalArgumentException {
198 super(METHOD_NAME, nSteps, nSteps + 1, minStep, maxStep,
199 vecAbsoluteTolerance, vecRelativeTolerance);
200 }
201
202
203 /** {@inheritDoc} */
204 @Override
205 public double integrate(final FirstOrderDifferentialEquations equations,
206 final double t0, final double[] y0,
207 final double t, final double[] y)
208 throws DerivativeException, IntegratorException {
209
210 final int n = y0.length;
211 sanityChecks(equations, t0, y0, t, y);
212 setEquations(equations);
213 resetEvaluations();
214 final boolean forward = t > t0;
215
216 // initialize working arrays
217 if (y != y0) {
218 System.arraycopy(y0, 0, y, 0, n);
219 }
220 final double[] yDot = new double[y0.length];
221 final double[] yTmp = new double[y0.length];
222 final double[] predictedScaled = new double[y0.length];
223 Array2DRowRealMatrix nordsieckTmp = null;
224
225 // set up two interpolators sharing the integrator arrays
226 final NordsieckStepInterpolator interpolator = new NordsieckStepInterpolator();
227 interpolator.reinitialize(y, forward);
228
229 // set up integration control objects
230 for (StepHandler handler : stepHandlers) {
231 handler.reset();
232 }
233 setStateInitialized(false);
234
235 // compute the initial Nordsieck vector using the configured starter integrator
236 start(t0, y, t);
237 interpolator.reinitialize(stepStart, stepSize, scaled, nordsieck);
238 interpolator.storeTime(stepStart);
239
240 double hNew = stepSize;
241 interpolator.rescale(hNew);
242
243 isLastStep = false;
244 do {
245
246 double error = 10;
247 while (error >= 1.0) {
248
249 stepSize = hNew;
250
251 // predict a first estimate of the state at step end (P in the PECE sequence)
252 final double stepEnd = stepStart + stepSize;
253 interpolator.setInterpolatedTime(stepEnd);
254 System.arraycopy(interpolator.getInterpolatedState(), 0, yTmp, 0, y0.length);
255
256 // evaluate a first estimate of the derivative (first E in the PECE sequence)
257 computeDerivatives(stepEnd, yTmp, yDot);
258
259 // update Nordsieck vector
260 for (int j = 0; j < y0.length; ++j) {
261 predictedScaled[j] = stepSize * yDot[j];
262 }
263 nordsieckTmp = updateHighOrderDerivativesPhase1(nordsieck);
264 updateHighOrderDerivativesPhase2(scaled, predictedScaled, nordsieckTmp);
265
266 // apply correction (C in the PECE sequence)
267 error = nordsieckTmp.walkInOptimizedOrder(new Corrector(y, predictedScaled, yTmp));
268
269 if (error >= 1.0) {
270 // reject the step and attempt to reduce error by stepsize control
271 final double factor = computeStepGrowShrinkFactor(error);
272 hNew = filterStep(stepSize * factor, forward, false);
273 interpolator.rescale(hNew);
274 }
275 }
276
277 // evaluate a final estimate of the derivative (second E in the PECE sequence)
278 final double stepEnd = stepStart + stepSize;
279 computeDerivatives(stepEnd, yTmp, yDot);
280
281 // update Nordsieck vector
282 final double[] correctedScaled = new double[y0.length];
283 for (int j = 0; j < y0.length; ++j) {
284 correctedScaled[j] = stepSize * yDot[j];
285 }
286 updateHighOrderDerivativesPhase2(predictedScaled, correctedScaled, nordsieckTmp);
287
288 // discrete events handling
289 System.arraycopy(yTmp, 0, y, 0, n);
290 interpolator.reinitialize(stepEnd, stepSize, correctedScaled, nordsieckTmp);
291 interpolator.storeTime(stepStart);
292 interpolator.shift();
293 interpolator.storeTime(stepEnd);
294 stepStart = acceptStep(interpolator, y, yDot, t);
295 scaled = correctedScaled;
296 nordsieck = nordsieckTmp;
297
298 if (!isLastStep) {
299
300 // prepare next step
301 interpolator.storeTime(stepStart);
302
303 if (resetOccurred) {
304 // some events handler has triggered changes that
305 // invalidate the derivatives, we need to restart from scratch
306 start(stepStart, y, t);
307 interpolator.reinitialize(stepStart, stepSize, scaled, nordsieck);
308
309 }
310
311 // stepsize control for next step
312 final double factor = computeStepGrowShrinkFactor(error);
313 final double scaledH = stepSize * factor;
314 final double nextT = stepStart + scaledH;
315 final boolean nextIsLast = forward ? (nextT >= t) : (nextT <= t);
316 hNew = filterStep(scaledH, forward, nextIsLast);
317
318 final double filteredNextT = stepStart + hNew;
319 final boolean filteredNextIsLast = forward ? (filteredNextT >= t) : (filteredNextT <= t);
320 if (filteredNextIsLast) {
321 hNew = t - stepStart;
322 }
323
324 interpolator.rescale(hNew);
325 }
326
327 } while (!isLastStep);
328
329 final double stopTime = stepStart;
330 stepStart = Double.NaN;
331 stepSize = Double.NaN;
332 return stopTime;
333
334 }
335
336 /** Corrector for current state in Adams-Moulton method.
337 * <p>
338 * This visitor implements the Taylor series formula:
339 * <pre>
340 * Y<sub>n+1</sub> = y<sub>n</sub> + s<sub>1</sub>(n+1) + [ -1 +1 -1 +1 ... ±1 ] r<sub>n+1</sub>
341 * </pre>
342 * </p>
343 */
344 private class Corrector implements RealMatrixPreservingVisitor {
345
346 /** Previous state. */
347 private final double[] previous;
348
349 /** Current scaled first derivative. */
350 private final double[] scaled;
351
352 /** Current state before correction. */
353 private final double[] before;
354
355 /** Current state after correction. */
356 private final double[] after;
357
358 /** Simple constructor.
359 * @param previous previous state
360 * @param scaled current scaled first derivative
361 * @param state state to correct (will be overwritten after visit)
362 */
363 public Corrector(final double[] previous, final double[] scaled, final double[] state) {
364 this.previous = previous;
365 this.scaled = scaled;
366 this.after = state;
367 this.before = state.clone();
368 }
369
370 /** {@inheritDoc} */
371 public void start(int rows, int columns,
372 int startRow, int endRow, int startColumn, int endColumn) {
373 Arrays.fill(after, 0.0);
374 }
375
376 /** {@inheritDoc} */
377 public void visit(int row, int column, double value) {
378 if ((row & 0x1) == 0) {
379 after[column] -= value;
380 } else {
381 after[column] += value;
382 }
383 }
384
385 /**
386 * End visiting the Nordsieck vector.
387 * <p>The correction is used to control stepsize. So its amplitude is
388 * considered to be an error, which must be normalized according to
389 * error control settings. If the normalized value is greater than 1,
390 * the correction was too large and the step must be rejected.</p>
391 * @return the normalized correction, if greater than 1, the step
392 * must be rejected
393 */
394 public double end() {
395
396 double error = 0;
397 for (int i = 0; i < after.length; ++i) {
398 after[i] += previous[i] + scaled[i];
399 if (i < mainSetDimension) {
400 final double yScale = FastMath.max(FastMath.abs(previous[i]), FastMath.abs(after[i]));
401 final double tol = (vecAbsoluteTolerance == null) ?
402 (scalAbsoluteTolerance + scalRelativeTolerance * yScale) :
403 (vecAbsoluteTolerance[i] + vecRelativeTolerance[i] * yScale);
404 final double ratio = (after[i] - before[i]) / tol;
405 error += ratio * ratio;
406 }
407 }
408
409 return FastMath.sqrt(error / mainSetDimension);
410
411 }
412 }
413
414 }