You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@commons.apache.org by er...@apache.org on 2012/12/12 15:11:04 UTC

svn commit: r1420684 [8/15] - in /commons/proper/math/trunk/src: main/java/org/apache/commons/math3/exception/ main/java/org/apache/commons/math3/exception/util/ main/java/org/apache/commons/math3/fitting/ main/java/org/apache/commons/math3/optim/ main...

Added: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/LevenbergMarquardtOptimizer.java
URL: http://svn.apache.org/viewvc/commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/LevenbergMarquardtOptimizer.java?rev=1420684&view=auto
==============================================================================
--- commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/LevenbergMarquardtOptimizer.java (added)
+++ commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/LevenbergMarquardtOptimizer.java Wed Dec 12 14:10:38 2012
@@ -0,0 +1,939 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+import java.util.Arrays;
+import org.apache.commons.math3.exception.ConvergenceException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.util.Precision;
+import org.apache.commons.math3.util.FastMath;
+
+
+/**
+ * This class solves a least-squares problem using the Levenberg-Marquardt algorithm.
+ *
+ * <p>This implementation <em>should</em> work even for over-determined systems
+ * (i.e. systems having more point than equations). Over-determined systems
+ * are solved by ignoring the point which have the smallest impact according
+ * to their jacobian column norm. Only the rank of the matrix and some loop bounds
+ * are changed to implement this.</p>
+ *
+ * <p>The resolution engine is a simple translation of the MINPACK <a
+ * href="http://www.netlib.org/minpack/lmder.f">lmder</a> routine with minor
+ * changes. The changes include the over-determined resolution, the use of
+ * inherited convergence checker and the Q.R. decomposition which has been
+ * rewritten following the algorithm described in the
+ * P. Lascaux and R. Theodor book <i>Analyse num&eacute;rique matricielle
+ * appliqu&eacute;e &agrave; l'art de l'ing&eacute;nieur</i>, Masson 1986.</p>
+ * <p>The authors of the original fortran version are:
+ * <ul>
+ * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
+ * <li>Burton S. Garbow</li>
+ * <li>Kenneth E. Hillstrom</li>
+ * <li>Jorge J. More</li>
+ * </ul>
+ * The redistribution policy for MINPACK is available <a
+ * href="http://www.netlib.org/minpack/disclaimer">here</a>, for convenience, it
+ * is reproduced below.</p>
+ *
+ * <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
+ * <tr><td>
+ *    Minpack Copyright Notice (1999) University of Chicago.
+ *    All rights reserved
+ * </td></tr>
+ * <tr><td>
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * <ol>
+ *  <li>Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.</li>
+ * <li>Redistributions in binary form must reproduce the above
+ *     copyright notice, this list of conditions and the following
+ *     disclaimer in the documentation and/or other materials provided
+ *     with the distribution.</li>
+ * <li>The end-user documentation included with the redistribution, if any,
+ *     must include the following acknowledgment:
+ *     <code>This product includes software developed by the University of
+ *           Chicago, as Operator of Argonne National Laboratory.</code>
+ *     Alternately, this acknowledgment may appear in the software itself,
+ *     if and wherever such third-party acknowledgments normally appear.</li>
+ * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
+ *     WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
+ *     UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
+ *     THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
+ *     IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
+ *     OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
+ *     OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
+ *     USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
+ *     THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
+ *     DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
+ *     UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
+ *     BE CORRECTED.</strong></li>
+ * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
+ *     HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
+ *     ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
+ *     INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
+ *     ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
+ *     PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
+ *     SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
+ *     (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
+ *     EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
+ *     POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
+ * <ol></td></tr>
+ * </table>
+ *
+ * @version $Id: LevenbergMarquardtOptimizer.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public class LevenbergMarquardtOptimizer
+    extends AbstractLeastSquaresOptimizer {
+    /** Number of solved point. */
+    private int solvedCols;
+    /** Diagonal elements of the R matrix in the Q.R. decomposition. */
+    private double[] diagR;
+    /** Norms of the columns of the jacobian matrix. */
+    private double[] jacNorm;
+    /** Coefficients of the Householder transforms vectors. */
+    private double[] beta;
+    /** Columns permutation array. */
+    private int[] permutation;
+    /** Rank of the jacobian matrix. */
+    private int rank;
+    /** Levenberg-Marquardt parameter. */
+    private double lmPar;
+    /** Parameters evolution direction associated with lmPar. */
+    private double[] lmDir;
+    /** Positive input variable used in determining the initial step bound. */
+    private final double initialStepBoundFactor;
+    /** Desired relative error in the sum of squares. */
+    private final double costRelativeTolerance;
+    /**  Desired relative error in the approximate solution parameters. */
+    private final double parRelativeTolerance;
+    /** Desired max cosine on the orthogonality between the function vector
+     * and the columns of the jacobian. */
+    private final double orthoTolerance;
+    /** Threshold for QR ranking. */
+    private final double qrRankingThreshold;
+    /** Weighted residuals. */
+    private double[] weightedResidual;
+    /** Weighted Jacobian. */
+    private double[][] weightedJacobian;
+
+    /**
+     * Build an optimizer for least squares problems with default values
+     * for all the tuning parameters (see the {@link
+     * #LevenbergMarquardtOptimizer(double,double,double,double,double)
+     * other contructor}.
+     * The default values for the algorithm settings are:
+     * <ul>
+     *  <li>Initial step bound factor: 100</li>
+     *  <li>Cost relative tolerance: 1e-10</li>
+     *  <li>Parameters relative tolerance: 1e-10</li>
+     *  <li>Orthogonality tolerance: 1e-10</li>
+     *  <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
+     * </ul>
+     */
+    public LevenbergMarquardtOptimizer() {
+        this(100, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
+    }
+
+    /**
+     * Constructor that allows the specification of a custom convergence
+     * checker.
+     * Note that all the usual convergence checks will be <em>disabled</em>.
+     * The default values for the algorithm settings are:
+     * <ul>
+     *  <li>Initial step bound factor: 100</li>
+     *  <li>Cost relative tolerance: 1e-10</li>
+     *  <li>Parameters relative tolerance: 1e-10</li>
+     *  <li>Orthogonality tolerance: 1e-10</li>
+     *  <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
+     * </ul>
+     *
+     * @param checker Convergence checker.
+     */
+    public LevenbergMarquardtOptimizer(ConvergenceChecker<PointVectorValuePair> checker) {
+        this(100, checker, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
+    }
+
+    /**
+     * Constructor that allows the specification of a custom convergence
+     * checker, in addition to the standard ones.
+     *
+     * @param initialStepBoundFactor Positive input variable used in
+     * determining the initial step bound. This bound is set to the
+     * product of initialStepBoundFactor and the euclidean norm of
+     * {@code diag * x} if non-zero, or else to {@code initialStepBoundFactor}
+     * itself. In most cases factor should lie in the interval
+     * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
+     * @param checker Convergence checker.
+     * @param costRelativeTolerance Desired relative error in the sum of
+     * squares.
+     * @param parRelativeTolerance Desired relative error in the approximate
+     * solution parameters.
+     * @param orthoTolerance Desired max cosine on the orthogonality between
+     * the function vector and the columns of the Jacobian.
+     * @param threshold Desired threshold for QR ranking. If the squared norm
+     * of a column vector is smaller or equal to this threshold during QR
+     * decomposition, it is considered to be a zero vector and hence the rank
+     * of the matrix is reduced.
+     */
+    public LevenbergMarquardtOptimizer(double initialStepBoundFactor,
+                                       ConvergenceChecker<PointVectorValuePair> checker,
+                                       double costRelativeTolerance,
+                                       double parRelativeTolerance,
+                                       double orthoTolerance,
+                                       double threshold) {
+        super(checker);
+        this.initialStepBoundFactor = initialStepBoundFactor;
+        this.costRelativeTolerance = costRelativeTolerance;
+        this.parRelativeTolerance = parRelativeTolerance;
+        this.orthoTolerance = orthoTolerance;
+        this.qrRankingThreshold = threshold;
+    }
+
+    /**
+     * Build an optimizer for least squares problems with default values
+     * for some of the tuning parameters (see the {@link
+     * #LevenbergMarquardtOptimizer(double,double,double,double,double)
+     * other contructor}.
+     * The default values for the algorithm settings are:
+     * <ul>
+     *  <li>Initial step bound factor}: 100</li>
+     *  <li>QR ranking threshold}: {@link Precision#SAFE_MIN}</li>
+     * </ul>
+     *
+     * @param costRelativeTolerance Desired relative error in the sum of
+     * squares.
+     * @param parRelativeTolerance Desired relative error in the approximate
+     * solution parameters.
+     * @param orthoTolerance Desired max cosine on the orthogonality between
+     * the function vector and the columns of the Jacobian.
+     */
+    public LevenbergMarquardtOptimizer(double costRelativeTolerance,
+                                       double parRelativeTolerance,
+                                       double orthoTolerance) {
+        this(100,
+             costRelativeTolerance, parRelativeTolerance, orthoTolerance,
+             Precision.SAFE_MIN);
+    }
+
+    /**
+     * The arguments control the behaviour of the default convergence checking
+     * procedure.
+     * Additional criteria can defined through the setting of a {@link
+     * ConvergenceChecker}.
+     *
+     * @param initialStepBoundFactor Positive input variable used in
+     * determining the initial step bound. This bound is set to the
+     * product of initialStepBoundFactor and the euclidean norm of
+     * {@code diag * x} if non-zero, or else to {@code initialStepBoundFactor}
+     * itself. In most cases factor should lie in the interval
+     * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
+     * @param costRelativeTolerance Desired relative error in the sum of
+     * squares.
+     * @param parRelativeTolerance Desired relative error in the approximate
+     * solution parameters.
+     * @param orthoTolerance Desired max cosine on the orthogonality between
+     * the function vector and the columns of the Jacobian.
+     * @param threshold Desired threshold for QR ranking. If the squared norm
+     * of a column vector is smaller or equal to this threshold during QR
+     * decomposition, it is considered to be a zero vector and hence the rank
+     * of the matrix is reduced.
+     */
+    public LevenbergMarquardtOptimizer(double initialStepBoundFactor,
+                                       double costRelativeTolerance,
+                                       double parRelativeTolerance,
+                                       double orthoTolerance,
+                                       double threshold) {
+        super(null); // No custom convergence criterion.
+        this.initialStepBoundFactor = initialStepBoundFactor;
+        this.costRelativeTolerance = costRelativeTolerance;
+        this.parRelativeTolerance = parRelativeTolerance;
+        this.orthoTolerance = orthoTolerance;
+        this.qrRankingThreshold = threshold;
+    }
+
+    /** {@inheritDoc} */
+    @Override
+    protected PointVectorValuePair doOptimize() {
+        final int nR = getTarget().length; // Number of observed data.
+        final double[] currentPoint = getStartPoint();
+        final int nC = currentPoint.length; // Number of parameters.
+
+        // arrays shared with the other private methods
+        solvedCols  = FastMath.min(nR, nC);
+        diagR       = new double[nC];
+        jacNorm     = new double[nC];
+        beta        = new double[nC];
+        permutation = new int[nC];
+        lmDir       = new double[nC];
+
+        // local point
+        double   delta   = 0;
+        double   xNorm   = 0;
+        double[] diag    = new double[nC];
+        double[] oldX    = new double[nC];
+        double[] oldRes  = new double[nR];
+        double[] oldObj  = new double[nR];
+        double[] qtf     = new double[nR];
+        double[] work1   = new double[nC];
+        double[] work2   = new double[nC];
+        double[] work3   = new double[nC];
+
+        final RealMatrix weightMatrixSqrt = getWeightSquareRoot();
+
+        // Evaluate the function at the starting point and calculate its norm.
+        double[] currentObjective = computeObjectiveValue(currentPoint);
+        double[] currentResiduals = computeResiduals(currentObjective);
+        PointVectorValuePair current = new PointVectorValuePair(currentPoint, currentObjective);
+        double currentCost = computeCost(currentResiduals);
+
+        // Outer loop.
+        lmPar = 0;
+        boolean firstIteration = true;
+        int iter = 0;
+        final ConvergenceChecker<PointVectorValuePair> checker = getConvergenceChecker();
+        while (true) {
+            ++iter;
+            final PointVectorValuePair previous = current;
+
+            // QR decomposition of the jacobian matrix
+            qrDecomposition(computeWeightedJacobian(currentPoint));
+
+            weightedResidual = weightMatrixSqrt.operate(currentResiduals);
+            for (int i = 0; i < nR; i++) {
+                qtf[i] = weightedResidual[i];
+            }
+
+            // compute Qt.res
+            qTy(qtf);
+
+            // now we don't need Q anymore,
+            // so let jacobian contain the R matrix with its diagonal elements
+            for (int k = 0; k < solvedCols; ++k) {
+                int pk = permutation[k];
+                weightedJacobian[k][pk] = diagR[pk];
+            }
+
+            if (firstIteration) {
+                // scale the point according to the norms of the columns
+                // of the initial jacobian
+                xNorm = 0;
+                for (int k = 0; k < nC; ++k) {
+                    double dk = jacNorm[k];
+                    if (dk == 0) {
+                        dk = 1.0;
+                    }
+                    double xk = dk * currentPoint[k];
+                    xNorm  += xk * xk;
+                    diag[k] = dk;
+                }
+                xNorm = FastMath.sqrt(xNorm);
+
+                // initialize the step bound delta
+                delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
+            }
+
+            // check orthogonality between function vector and jacobian columns
+            double maxCosine = 0;
+            if (currentCost != 0) {
+                for (int j = 0; j < solvedCols; ++j) {
+                    int    pj = permutation[j];
+                    double s  = jacNorm[pj];
+                    if (s != 0) {
+                        double sum = 0;
+                        for (int i = 0; i <= j; ++i) {
+                            sum += weightedJacobian[i][pj] * qtf[i];
+                        }
+                        maxCosine = FastMath.max(maxCosine, FastMath.abs(sum) / (s * currentCost));
+                    }
+                }
+            }
+            if (maxCosine <= orthoTolerance) {
+                // Convergence has been reached.
+                setCost(currentCost);
+                return current;
+            }
+
+            // rescale if necessary
+            for (int j = 0; j < nC; ++j) {
+                diag[j] = FastMath.max(diag[j], jacNorm[j]);
+            }
+
+            // Inner loop.
+            for (double ratio = 0; ratio < 1.0e-4;) {
+
+                // save the state
+                for (int j = 0; j < solvedCols; ++j) {
+                    int pj = permutation[j];
+                    oldX[pj] = currentPoint[pj];
+                }
+                final double previousCost = currentCost;
+                double[] tmpVec = weightedResidual;
+                weightedResidual = oldRes;
+                oldRes    = tmpVec;
+                tmpVec    = currentObjective;
+                currentObjective = oldObj;
+                oldObj    = tmpVec;
+
+                // determine the Levenberg-Marquardt parameter
+                determineLMParameter(qtf, delta, diag, work1, work2, work3);
+
+                // compute the new point and the norm of the evolution direction
+                double lmNorm = 0;
+                for (int j = 0; j < solvedCols; ++j) {
+                    int pj = permutation[j];
+                    lmDir[pj] = -lmDir[pj];
+                    currentPoint[pj] = oldX[pj] + lmDir[pj];
+                    double s = diag[pj] * lmDir[pj];
+                    lmNorm  += s * s;
+                }
+                lmNorm = FastMath.sqrt(lmNorm);
+                // on the first iteration, adjust the initial step bound.
+                if (firstIteration) {
+                    delta = FastMath.min(delta, lmNorm);
+                }
+
+                // Evaluate the function at x + p and calculate its norm.
+                currentObjective = computeObjectiveValue(currentPoint);
+                currentResiduals = computeResiduals(currentObjective);
+                current = new PointVectorValuePair(currentPoint, currentObjective);
+                currentCost = computeCost(currentResiduals);
+
+                // compute the scaled actual reduction
+                double actRed = -1.0;
+                if (0.1 * currentCost < previousCost) {
+                    double r = currentCost / previousCost;
+                    actRed = 1.0 - r * r;
+                }
+
+                // compute the scaled predicted reduction
+                // and the scaled directional derivative
+                for (int j = 0; j < solvedCols; ++j) {
+                    int pj = permutation[j];
+                    double dirJ = lmDir[pj];
+                    work1[j] = 0;
+                    for (int i = 0; i <= j; ++i) {
+                        work1[i] += weightedJacobian[i][pj] * dirJ;
+                    }
+                }
+                double coeff1 = 0;
+                for (int j = 0; j < solvedCols; ++j) {
+                    coeff1 += work1[j] * work1[j];
+                }
+                double pc2 = previousCost * previousCost;
+                coeff1 = coeff1 / pc2;
+                double coeff2 = lmPar * lmNorm * lmNorm / pc2;
+                double preRed = coeff1 + 2 * coeff2;
+                double dirDer = -(coeff1 + coeff2);
+
+                // ratio of the actual to the predicted reduction
+                ratio = (preRed == 0) ? 0 : (actRed / preRed);
+
+                // update the step bound
+                if (ratio <= 0.25) {
+                    double tmp =
+                        (actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
+                        if ((0.1 * currentCost >= previousCost) || (tmp < 0.1)) {
+                            tmp = 0.1;
+                        }
+                        delta = tmp * FastMath.min(delta, 10.0 * lmNorm);
+                        lmPar /= tmp;
+                } else if ((lmPar == 0) || (ratio >= 0.75)) {
+                    delta = 2 * lmNorm;
+                    lmPar *= 0.5;
+                }
+
+                // test for successful iteration.
+                if (ratio >= 1.0e-4) {
+                    // successful iteration, update the norm
+                    firstIteration = false;
+                    xNorm = 0;
+                    for (int k = 0; k < nC; ++k) {
+                        double xK = diag[k] * currentPoint[k];
+                        xNorm += xK * xK;
+                    }
+                    xNorm = FastMath.sqrt(xNorm);
+
+                    // tests for convergence.
+                    if (checker != null) {
+                        // we use the vectorial convergence checker
+                        if (checker.converged(iter, previous, current)) {
+                            setCost(currentCost);
+                            return current;
+                        }
+                    }
+                } else {
+                    // failed iteration, reset the previous values
+                    currentCost = previousCost;
+                    for (int j = 0; j < solvedCols; ++j) {
+                        int pj = permutation[j];
+                        currentPoint[pj] = oldX[pj];
+                    }
+                    tmpVec    = weightedResidual;
+                    weightedResidual = oldRes;
+                    oldRes    = tmpVec;
+                    tmpVec    = currentObjective;
+                    currentObjective = oldObj;
+                    oldObj    = tmpVec;
+                    // Reset "current" to previous values.
+                    current = new PointVectorValuePair(currentPoint, currentObjective);
+                }
+
+                // Default convergence criteria.
+                if ((FastMath.abs(actRed) <= costRelativeTolerance &&
+                     preRed <= costRelativeTolerance &&
+                     ratio <= 2.0) ||
+                    delta <= parRelativeTolerance * xNorm) {
+                    setCost(currentCost);
+                    return current;
+                }
+
+                // tests for termination and stringent tolerances
+                // (2.2204e-16 is the machine epsilon for IEEE754)
+                if ((FastMath.abs(actRed) <= 2.2204e-16) && (preRed <= 2.2204e-16) && (ratio <= 2.0)) {
+                    throw new ConvergenceException(LocalizedFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE,
+                                                   costRelativeTolerance);
+                } else if (delta <= 2.2204e-16 * xNorm) {
+                    throw new ConvergenceException(LocalizedFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE,
+                                                   parRelativeTolerance);
+                } else if (maxCosine <= 2.2204e-16)  {
+                    throw new ConvergenceException(LocalizedFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE,
+                                                   orthoTolerance);
+                }
+            }
+        }
+    }
+
+    /**
+     * Determine the Levenberg-Marquardt parameter.
+     * <p>This implementation is a translation in Java of the MINPACK
+     * <a href="http://www.netlib.org/minpack/lmpar.f">lmpar</a>
+     * routine.</p>
+     * <p>This method sets the lmPar and lmDir attributes.</p>
+     * <p>The authors of the original fortran function are:</p>
+     * <ul>
+     *   <li>Argonne National Laboratory. MINPACK project. March 1980</li>
+     *   <li>Burton  S. Garbow</li>
+     *   <li>Kenneth E. Hillstrom</li>
+     *   <li>Jorge   J. More</li>
+     * </ul>
+     * <p>Luc Maisonobe did the Java translation.</p>
+     *
+     * @param qy array containing qTy
+     * @param delta upper bound on the euclidean norm of diagR * lmDir
+     * @param diag diagonal matrix
+     * @param work1 work array
+     * @param work2 work array
+     * @param work3 work array
+     */
+    private void determineLMParameter(double[] qy, double delta, double[] diag,
+                                      double[] work1, double[] work2, double[] work3) {
+        final int nC = weightedJacobian[0].length;
+
+        // compute and store in x the gauss-newton direction, if the
+        // jacobian is rank-deficient, obtain a least squares solution
+        for (int j = 0; j < rank; ++j) {
+            lmDir[permutation[j]] = qy[j];
+        }
+        for (int j = rank; j < nC; ++j) {
+            lmDir[permutation[j]] = 0;
+        }
+        for (int k = rank - 1; k >= 0; --k) {
+            int pk = permutation[k];
+            double ypk = lmDir[pk] / diagR[pk];
+            for (int i = 0; i < k; ++i) {
+                lmDir[permutation[i]] -= ypk * weightedJacobian[i][pk];
+            }
+            lmDir[pk] = ypk;
+        }
+
+        // evaluate the function at the origin, and test
+        // for acceptance of the Gauss-Newton direction
+        double dxNorm = 0;
+        for (int j = 0; j < solvedCols; ++j) {
+            int pj = permutation[j];
+            double s = diag[pj] * lmDir[pj];
+            work1[pj] = s;
+            dxNorm += s * s;
+        }
+        dxNorm = FastMath.sqrt(dxNorm);
+        double fp = dxNorm - delta;
+        if (fp <= 0.1 * delta) {
+            lmPar = 0;
+            return;
+        }
+
+        // if the jacobian is not rank deficient, the Newton step provides
+        // a lower bound, parl, for the zero of the function,
+        // otherwise set this bound to zero
+        double sum2;
+        double parl = 0;
+        if (rank == solvedCols) {
+            for (int j = 0; j < solvedCols; ++j) {
+                int pj = permutation[j];
+                work1[pj] *= diag[pj] / dxNorm;
+            }
+            sum2 = 0;
+            for (int j = 0; j < solvedCols; ++j) {
+                int pj = permutation[j];
+                double sum = 0;
+                for (int i = 0; i < j; ++i) {
+                    sum += weightedJacobian[i][pj] * work1[permutation[i]];
+                }
+                double s = (work1[pj] - sum) / diagR[pj];
+                work1[pj] = s;
+                sum2 += s * s;
+            }
+            parl = fp / (delta * sum2);
+        }
+
+        // calculate an upper bound, paru, for the zero of the function
+        sum2 = 0;
+        for (int j = 0; j < solvedCols; ++j) {
+            int pj = permutation[j];
+            double sum = 0;
+            for (int i = 0; i <= j; ++i) {
+                sum += weightedJacobian[i][pj] * qy[i];
+            }
+            sum /= diag[pj];
+            sum2 += sum * sum;
+        }
+        double gNorm = FastMath.sqrt(sum2);
+        double paru = gNorm / delta;
+        if (paru == 0) {
+            // 2.2251e-308 is the smallest positive real for IEE754
+            paru = 2.2251e-308 / FastMath.min(delta, 0.1);
+        }
+
+        // if the input par lies outside of the interval (parl,paru),
+        // set par to the closer endpoint
+        lmPar = FastMath.min(paru, FastMath.max(lmPar, parl));
+        if (lmPar == 0) {
+            lmPar = gNorm / dxNorm;
+        }
+
+        for (int countdown = 10; countdown >= 0; --countdown) {
+
+            // evaluate the function at the current value of lmPar
+            if (lmPar == 0) {
+                lmPar = FastMath.max(2.2251e-308, 0.001 * paru);
+            }
+            double sPar = FastMath.sqrt(lmPar);
+            for (int j = 0; j < solvedCols; ++j) {
+                int pj = permutation[j];
+                work1[pj] = sPar * diag[pj];
+            }
+            determineLMDirection(qy, work1, work2, work3);
+
+            dxNorm = 0;
+            for (int j = 0; j < solvedCols; ++j) {
+                int pj = permutation[j];
+                double s = diag[pj] * lmDir[pj];
+                work3[pj] = s;
+                dxNorm += s * s;
+            }
+            dxNorm = FastMath.sqrt(dxNorm);
+            double previousFP = fp;
+            fp = dxNorm - delta;
+
+            // if the function is small enough, accept the current value
+            // of lmPar, also test for the exceptional cases where parl is zero
+            if ((FastMath.abs(fp) <= 0.1 * delta) ||
+                    ((parl == 0) && (fp <= previousFP) && (previousFP < 0))) {
+                return;
+            }
+
+            // compute the Newton correction
+            for (int j = 0; j < solvedCols; ++j) {
+                int pj = permutation[j];
+                work1[pj] = work3[pj] * diag[pj] / dxNorm;
+            }
+            for (int j = 0; j < solvedCols; ++j) {
+                int pj = permutation[j];
+                work1[pj] /= work2[j];
+                double tmp = work1[pj];
+                for (int i = j + 1; i < solvedCols; ++i) {
+                    work1[permutation[i]] -= weightedJacobian[i][pj] * tmp;
+                }
+            }
+            sum2 = 0;
+            for (int j = 0; j < solvedCols; ++j) {
+                double s = work1[permutation[j]];
+                sum2 += s * s;
+            }
+            double correction = fp / (delta * sum2);
+
+            // depending on the sign of the function, update parl or paru.
+            if (fp > 0) {
+                parl = FastMath.max(parl, lmPar);
+            } else if (fp < 0) {
+                paru = FastMath.min(paru, lmPar);
+            }
+
+            // compute an improved estimate for lmPar
+            lmPar = FastMath.max(parl, lmPar + correction);
+
+        }
+    }
+
+    /**
+     * Solve a*x = b and d*x = 0 in the least squares sense.
+     * <p>This implementation is a translation in Java of the MINPACK
+     * <a href="http://www.netlib.org/minpack/qrsolv.f">qrsolv</a>
+     * routine.</p>
+     * <p>This method sets the lmDir and lmDiag attributes.</p>
+     * <p>The authors of the original fortran function are:</p>
+     * <ul>
+     *   <li>Argonne National Laboratory. MINPACK project. March 1980</li>
+     *   <li>Burton  S. Garbow</li>
+     *   <li>Kenneth E. Hillstrom</li>
+     *   <li>Jorge   J. More</li>
+     * </ul>
+     * <p>Luc Maisonobe did the Java translation.</p>
+     *
+     * @param qy array containing qTy
+     * @param diag diagonal matrix
+     * @param lmDiag diagonal elements associated with lmDir
+     * @param work work array
+     */
+    private void determineLMDirection(double[] qy, double[] diag,
+                                      double[] lmDiag, double[] work) {
+
+        // copy R and Qty to preserve input and initialize s
+        //  in particular, save the diagonal elements of R in lmDir
+        for (int j = 0; j < solvedCols; ++j) {
+            int pj = permutation[j];
+            for (int i = j + 1; i < solvedCols; ++i) {
+                weightedJacobian[i][pj] = weightedJacobian[j][permutation[i]];
+            }
+            lmDir[j] = diagR[pj];
+            work[j]  = qy[j];
+        }
+
+        // eliminate the diagonal matrix d using a Givens rotation
+        for (int j = 0; j < solvedCols; ++j) {
+
+            // prepare the row of d to be eliminated, locating the
+            // diagonal element using p from the Q.R. factorization
+            int pj = permutation[j];
+            double dpj = diag[pj];
+            if (dpj != 0) {
+                Arrays.fill(lmDiag, j + 1, lmDiag.length, 0);
+            }
+            lmDiag[j] = dpj;
+
+            //  the transformations to eliminate the row of d
+            // modify only a single element of Qty
+            // beyond the first n, which is initially zero.
+            double qtbpj = 0;
+            for (int k = j; k < solvedCols; ++k) {
+                int pk = permutation[k];
+
+                // determine a Givens rotation which eliminates the
+                // appropriate element in the current row of d
+                if (lmDiag[k] != 0) {
+
+                    final double sin;
+                    final double cos;
+                    double rkk = weightedJacobian[k][pk];
+                    if (FastMath.abs(rkk) < FastMath.abs(lmDiag[k])) {
+                        final double cotan = rkk / lmDiag[k];
+                        sin   = 1.0 / FastMath.sqrt(1.0 + cotan * cotan);
+                        cos   = sin * cotan;
+                    } else {
+                        final double tan = lmDiag[k] / rkk;
+                        cos = 1.0 / FastMath.sqrt(1.0 + tan * tan);
+                        sin = cos * tan;
+                    }
+
+                    // compute the modified diagonal element of R and
+                    // the modified element of (Qty,0)
+                    weightedJacobian[k][pk] = cos * rkk + sin * lmDiag[k];
+                    final double temp = cos * work[k] + sin * qtbpj;
+                    qtbpj = -sin * work[k] + cos * qtbpj;
+                    work[k] = temp;
+
+                    // accumulate the tranformation in the row of s
+                    for (int i = k + 1; i < solvedCols; ++i) {
+                        double rik = weightedJacobian[i][pk];
+                        final double temp2 = cos * rik + sin * lmDiag[i];
+                        lmDiag[i] = -sin * rik + cos * lmDiag[i];
+                        weightedJacobian[i][pk] = temp2;
+                    }
+                }
+            }
+
+            // store the diagonal element of s and restore
+            // the corresponding diagonal element of R
+            lmDiag[j] = weightedJacobian[j][permutation[j]];
+            weightedJacobian[j][permutation[j]] = lmDir[j];
+        }
+
+        // solve the triangular system for z, if the system is
+        // singular, then obtain a least squares solution
+        int nSing = solvedCols;
+        for (int j = 0; j < solvedCols; ++j) {
+            if ((lmDiag[j] == 0) && (nSing == solvedCols)) {
+                nSing = j;
+            }
+            if (nSing < solvedCols) {
+                work[j] = 0;
+            }
+        }
+        if (nSing > 0) {
+            for (int j = nSing - 1; j >= 0; --j) {
+                int pj = permutation[j];
+                double sum = 0;
+                for (int i = j + 1; i < nSing; ++i) {
+                    sum += weightedJacobian[i][pj] * work[i];
+                }
+                work[j] = (work[j] - sum) / lmDiag[j];
+            }
+        }
+
+        // permute the components of z back to components of lmDir
+        for (int j = 0; j < lmDir.length; ++j) {
+            lmDir[permutation[j]] = work[j];
+        }
+    }
+
+    /**
+     * Decompose a matrix A as A.P = Q.R using Householder transforms.
+     * <p>As suggested in the P. Lascaux and R. Theodor book
+     * <i>Analyse num&eacute;rique matricielle appliqu&eacute;e &agrave;
+     * l'art de l'ing&eacute;nieur</i> (Masson, 1986), instead of representing
+     * the Householder transforms with u<sub>k</sub> unit vectors such that:
+     * <pre>
+     * H<sub>k</sub> = I - 2u<sub>k</sub>.u<sub>k</sub><sup>t</sup>
+     * </pre>
+     * we use <sub>k</sub> non-unit vectors such that:
+     * <pre>
+     * H<sub>k</sub> = I - beta<sub>k</sub>v<sub>k</sub>.v<sub>k</sub><sup>t</sup>
+     * </pre>
+     * where v<sub>k</sub> = a<sub>k</sub> - alpha<sub>k</sub> e<sub>k</sub>.
+     * The beta<sub>k</sub> coefficients are provided upon exit as recomputing
+     * them from the v<sub>k</sub> vectors would be costly.</p>
+     * <p>This decomposition handles rank deficient cases since the tranformations
+     * are performed in non-increasing columns norms order thanks to columns
+     * pivoting. The diagonal elements of the R matrix are therefore also in
+     * non-increasing absolute values order.</p>
+     *
+     * @param jacobian Weighted Jacobian matrix at the current point.
+     * @exception ConvergenceException if the decomposition cannot be performed
+     */
+    private void qrDecomposition(RealMatrix jacobian) throws ConvergenceException {
+        // Code in this class assumes that the weighted Jacobian is -(W^(1/2) J),
+        // hence the multiplication by -1.
+        weightedJacobian = jacobian.scalarMultiply(-1).getData();
+
+        final int nR = weightedJacobian.length;
+        final int nC = weightedJacobian[0].length;
+
+        // initializations
+        for (int k = 0; k < nC; ++k) {
+            permutation[k] = k;
+            double norm2 = 0;
+            for (int i = 0; i < nR; ++i) {
+                double akk = weightedJacobian[i][k];
+                norm2 += akk * akk;
+            }
+            jacNorm[k] = FastMath.sqrt(norm2);
+        }
+
+        // transform the matrix column after column
+        for (int k = 0; k < nC; ++k) {
+
+            // select the column with the greatest norm on active components
+            int nextColumn = -1;
+            double ak2 = Double.NEGATIVE_INFINITY;
+            for (int i = k; i < nC; ++i) {
+                double norm2 = 0;
+                for (int j = k; j < nR; ++j) {
+                    double aki = weightedJacobian[j][permutation[i]];
+                    norm2 += aki * aki;
+                }
+                if (Double.isInfinite(norm2) || Double.isNaN(norm2)) {
+                    throw new ConvergenceException(LocalizedFormats.UNABLE_TO_PERFORM_QR_DECOMPOSITION_ON_JACOBIAN,
+                                                   nR, nC);
+                }
+                if (norm2 > ak2) {
+                    nextColumn = i;
+                    ak2        = norm2;
+                }
+            }
+            if (ak2 <= qrRankingThreshold) {
+                rank = k;
+                return;
+            }
+            int pk                  = permutation[nextColumn];
+            permutation[nextColumn] = permutation[k];
+            permutation[k]          = pk;
+
+            // choose alpha such that Hk.u = alpha ek
+            double akk   = weightedJacobian[k][pk];
+            double alpha = (akk > 0) ? -FastMath.sqrt(ak2) : FastMath.sqrt(ak2);
+            double betak = 1.0 / (ak2 - akk * alpha);
+            beta[pk]     = betak;
+
+            // transform the current column
+            diagR[pk]        = alpha;
+            weightedJacobian[k][pk] -= alpha;
+
+            // transform the remaining columns
+            for (int dk = nC - 1 - k; dk > 0; --dk) {
+                double gamma = 0;
+                for (int j = k; j < nR; ++j) {
+                    gamma += weightedJacobian[j][pk] * weightedJacobian[j][permutation[k + dk]];
+                }
+                gamma *= betak;
+                for (int j = k; j < nR; ++j) {
+                    weightedJacobian[j][permutation[k + dk]] -= gamma * weightedJacobian[j][pk];
+                }
+            }
+        }
+        rank = solvedCols;
+    }
+
+    /**
+     * Compute the product Qt.y for some Q.R. decomposition.
+     *
+     * @param y vector to multiply (will be overwritten with the result)
+     */
+    private void qTy(double[] y) {
+        final int nR = weightedJacobian.length;
+        final int nC = weightedJacobian[0].length;
+
+        for (int k = 0; k < nC; ++k) {
+            int pk = permutation[k];
+            double gamma = 0;
+            for (int i = k; i < nR; ++i) {
+                gamma += weightedJacobian[i][pk] * y[i];
+            }
+            gamma *= beta[pk];
+            for (int i = k; i < nR; ++i) {
+                y[i] -= gamma * weightedJacobian[i][pk];
+            }
+        }
+    }
+}

Propchange: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/LevenbergMarquardtOptimizer.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/package-info.java
URL: http://svn.apache.org/viewvc/commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/package-info.java?rev=1420684&view=auto
==============================================================================
--- commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/package-info.java (added)
+++ commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/package-info.java Wed Dec 12 14:10:38 2012
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector.jacobian;
+
+/**
+ * This package provides optimization algorithms that require derivatives.
+ */

Propchange: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/jacobian/package-info.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/package-info.java
URL: http://svn.apache.org/viewvc/commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/package-info.java?rev=1420684&view=auto
==============================================================================
--- commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/package-info.java (added)
+++ commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/package-info.java Wed Dec 12 14:10:38 2012
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.nonlinear.vector;
+
+/**
+ * Algorithms for optimizing a vector function.
+ */

Propchange: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/nonlinear/vector/package-info.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/package-info.java
URL: http://svn.apache.org/viewvc/commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/package-info.java?rev=1420684&view=auto
==============================================================================
--- commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/package-info.java (added)
+++ commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/package-info.java Wed Dec 12 14:10:38 2012
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim;
+
+/**
+ * <p>
+ *  Generally, optimizers are algorithms that will either
+ *  {@link GoalType#MINIMIZE minimize} or {@link GoalType#MAXIMIZE maximize}
+ *  a scalar function, called the {@link ObjectiveFunction <em>objective
+ *  function</em>}.
+ *  <br/>
+ *  For some scalar objective functions the gradient can be computed (analytically
+ *  or numerically). Algorithms that use this knowledge are defined in the
+ *  {@link org.apache.commons.math3.optim.nonlinear.scalar.gradient} package.
+ *  The algorithms that do not need this additional information are located in
+ *  the {@link org.apache.commons.math3.optim.nonlinear.scalar.noderiv} package.
+ * </p>
+ *
+ * <p>
+ *  Some problems are solved more efficiently by algorithms that, instead of an
+ *  objective function, need access to a
+ *  {@link org.apache.commons.math3.optim.nonlinear.vector.ModelFunction
+ *  <em>model function</em>}: such a model predicts a set of values which the
+ *  algorithm tries to match with a set of given
+ *  {@link org.apache.commons.math3.optim.nonlinear.vector.Target target values}.
+ *  Those algorithms are located in the
+ *  {@link org.apache.commons.math3.optim.nonlinear.vector} package.
+ *  <br/>
+ *  Algorithms that also require the
+ *  {@link org.apache.commons.math3.optim.nonlinear.vector.ModelFunctionJacobian
+ *  Jacobian matrix of the model} are located in the
+ *  {@link org.apache.commons.math3.optim.nonlinear.vector.jacobian} package.
+ *  <br/>
+ *  The {@link org.apache.commons.math3.optim.nonlinear.vector.jacobian.AbstractLeastSquaresOptimizer
+ *  non-linear least-squares optimizers} are a specialization of the the latter,
+ *  that minimize the distance (called <em>cost</em> or <em>&chi;<sup>2</sup></em>)
+ *  between model and observations.
+ *  <br/>
+ *  For cases where the Jacobian cannot be provided, a utility class will
+ *  {@link org.apache.commons.math3.optim.nonlinear.scalar.LeastSquaresConverter
+ *  convert} a (vector) model into a (scalar) objective function.
+ * </p>
+ *
+ * <p>
+ *  This package provides common functionality for the optimization algorithms.
+ *  Abstract classes ({@link BaseOptimizer} and {@link BaseMultivariateOptimizer})
+ *  define boiler-plate code for storing {@link MaxEval evaluations} and
+ *  {@link MaxIter iterations} counters and a user-defined
+ *  {@link ConvergenceChecker convergence checker}.
+ * </p>
+ *
+ * <p>
+ *  For each of the optimizer types, there is a special implementation that
+ *  wraps an optimizer instance and provides a "multi-start" feature: it calls
+ *  the underlying optimizer several times with different starting points and
+ *  returns the best optimum found, or all optima if so desired.
+ *  This could be useful to avoid being trapped in a local extremum.
+ * </p>
+ */

Propchange: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/package-info.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/BracketFinder.java
URL: http://svn.apache.org/viewvc/commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/BracketFinder.java?rev=1420684&view=auto
==============================================================================
--- commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/BracketFinder.java (added)
+++ commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/BracketFinder.java Wed Dec 12 14:10:38 2012
@@ -0,0 +1,287 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.univariate;
+
+import org.apache.commons.math3.util.Incrementor;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.exception.TooManyEvaluationsException;
+import org.apache.commons.math3.exception.MaxCountExceededException;
+import org.apache.commons.math3.analysis.UnivariateFunction;
+import org.apache.commons.math3.optim.GoalType;
+
+/**
+ * Provide an interval that brackets a local optimum of a function.
+ * This code is based on a Python implementation (from <em>SciPy</em>,
+ * module {@code optimize.py} v0.5).
+ *
+ * @version $Id: BracketFinder.java 1413186 2012-11-24 13:47:59Z erans $
+ * @since 2.2
+ */
+public class BracketFinder {
+    /** Tolerance to avoid division by zero. */
+    private static final double EPS_MIN = 1e-21;
+    /**
+     * Golden section.
+     */
+    private static final double GOLD = 1.618034;
+    /**
+     * Factor for expanding the interval.
+     */
+    private final double growLimit;
+    /**
+     * Counter for function evaluations.
+     */
+    private final Incrementor evaluations = new Incrementor();
+    /**
+     * Lower bound of the bracket.
+     */
+    private double lo;
+    /**
+     * Higher bound of the bracket.
+     */
+    private double hi;
+    /**
+     * Point inside the bracket.
+     */
+    private double mid;
+    /**
+     * Function value at {@link #lo}.
+     */
+    private double fLo;
+    /**
+     * Function value at {@link #hi}.
+     */
+    private double fHi;
+    /**
+     * Function value at {@link #mid}.
+     */
+    private double fMid;
+
+    /**
+     * Constructor with default values {@code 100, 50} (see the
+     * {@link #BracketFinder(double,int) other constructor}).
+     */
+    public BracketFinder() {
+        this(100, 50);
+    }
+
+    /**
+     * Create a bracketing interval finder.
+     *
+     * @param growLimit Expanding factor.
+     * @param maxEvaluations Maximum number of evaluations allowed for finding
+     * a bracketing interval.
+     */
+    public BracketFinder(double growLimit,
+                         int maxEvaluations) {
+        if (growLimit <= 0) {
+            throw new NotStrictlyPositiveException(growLimit);
+        }
+        if (maxEvaluations <= 0) {
+            throw new NotStrictlyPositiveException(maxEvaluations);
+        }
+
+        this.growLimit = growLimit;
+        evaluations.setMaximalCount(maxEvaluations);
+    }
+
+    /**
+     * Search new points that bracket a local optimum of the function.
+     *
+     * @param func Function whose optimum should be bracketed.
+     * @param goal {@link GoalType Goal type}.
+     * @param xA Initial point.
+     * @param xB Initial point.
+     * @throws TooManyEvaluationsException if the maximum number of evaluations
+     * is exceeded.
+     */
+    public void search(UnivariateFunction func, GoalType goal, double xA, double xB) {
+        evaluations.resetCount();
+        final boolean isMinim = goal == GoalType.MINIMIZE;
+
+        double fA = eval(func, xA);
+        double fB = eval(func, xB);
+        if (isMinim ?
+            fA < fB :
+            fA > fB) {
+
+            double tmp = xA;
+            xA = xB;
+            xB = tmp;
+
+            tmp = fA;
+            fA = fB;
+            fB = tmp;
+        }
+
+        double xC = xB + GOLD * (xB - xA);
+        double fC = eval(func, xC);
+
+        while (isMinim ? fC < fB : fC > fB) {
+            double tmp1 = (xB - xA) * (fB - fC);
+            double tmp2 = (xB - xC) * (fB - fA);
+
+            double val = tmp2 - tmp1;
+            double denom = Math.abs(val) < EPS_MIN ? 2 * EPS_MIN : 2 * val;
+
+            double w = xB - ((xB - xC) * tmp2 - (xB - xA) * tmp1) / denom;
+            double wLim = xB + growLimit * (xC - xB);
+
+            double fW;
+            if ((w - xC) * (xB - w) > 0) {
+                fW = eval(func, w);
+                if (isMinim ?
+                    fW < fC :
+                    fW > fC) {
+                    xA = xB;
+                    xB = w;
+                    fA = fB;
+                    fB = fW;
+                    break;
+                } else if (isMinim ?
+                           fW > fB :
+                           fW < fB) {
+                    xC = w;
+                    fC = fW;
+                    break;
+                }
+                w = xC + GOLD * (xC - xB);
+                fW = eval(func, w);
+            } else if ((w - wLim) * (wLim - xC) >= 0) {
+                w = wLim;
+                fW = eval(func, w);
+            } else if ((w - wLim) * (xC - w) > 0) {
+                fW = eval(func, w);
+                if (isMinim ?
+                    fW < fC :
+                    fW > fC) {
+                    xB = xC;
+                    xC = w;
+                    w = xC + GOLD * (xC - xB);
+                    fB = fC;
+                    fC =fW;
+                    fW = eval(func, w);
+                }
+            } else {
+                w = xC + GOLD * (xC - xB);
+                fW = eval(func, w);
+            }
+
+            xA = xB;
+            fA = fB;
+            xB = xC;
+            fB = fC;
+            xC = w;
+            fC = fW;
+        }
+
+        lo = xA;
+        fLo = fA;
+        mid = xB;
+        fMid = fB;
+        hi = xC;
+        fHi = fC;
+
+        if (lo > hi) {
+            double tmp = lo;
+            lo = hi;
+            hi = tmp;
+
+            tmp = fLo;
+            fLo = fHi;
+            fHi = tmp;
+        }
+    }
+
+    /**
+     * @return the number of evalutations.
+     */
+    public int getMaxEvaluations() {
+        return evaluations.getMaximalCount();
+    }
+
+    /**
+     * @return the number of evalutations.
+     */
+    public int getEvaluations() {
+        return evaluations.getCount();
+    }
+
+    /**
+     * @return the lower bound of the bracket.
+     * @see #getFLo()
+     */
+    public double getLo() {
+        return lo;
+    }
+
+    /**
+     * Get function value at {@link #getLo()}.
+     * @return function value at {@link #getLo()}
+     */
+    public double getFLo() {
+        return fLo;
+    }
+
+    /**
+     * @return the higher bound of the bracket.
+     * @see #getFHi()
+     */
+    public double getHi() {
+        return hi;
+    }
+
+    /**
+     * Get function value at {@link #getHi()}.
+     * @return function value at {@link #getHi()}
+     */
+    public double getFHi() {
+        return fHi;
+    }
+
+    /**
+     * @return a point in the middle of the bracket.
+     * @see #getFMid()
+     */
+    public double getMid() {
+        return mid;
+    }
+
+    /**
+     * Get function value at {@link #getMid()}.
+     * @return function value at {@link #getMid()}
+     */
+    public double getFMid() {
+        return fMid;
+    }
+
+    /**
+     * @param f Function.
+     * @param x Argument.
+     * @return {@code f(x)}
+     * @throws TooManyEvaluationsException if the maximal number of evaluations is
+     * exceeded.
+     */
+    private double eval(UnivariateFunction f, double x) {
+        try {
+            evaluations.incrementCount();
+        } catch (MaxCountExceededException e) {
+            throw new TooManyEvaluationsException(e.getMax());
+        }
+        return f.value(x);
+    }
+}

Propchange: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/BracketFinder.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/BrentOptimizer.java
URL: http://svn.apache.org/viewvc/commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/BrentOptimizer.java?rev=1420684&view=auto
==============================================================================
--- commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/BrentOptimizer.java (added)
+++ commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/BrentOptimizer.java Wed Dec 12 14:10:38 2012
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.univariate;
+
+import org.apache.commons.math3.util.Precision;
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.optim.GoalType;
+
+/**
+ * For a function defined on some interval {@code (lo, hi)}, this class
+ * finds an approximation {@code x} to the point at which the function
+ * attains its minimum.
+ * It implements Richard Brent's algorithm (from his book "Algorithms for
+ * Minimization without Derivatives", p. 79) for finding minima of real
+ * univariate functions.
+ * <br/>
+ * This code is an adaptation, partly based on the Python code from SciPy
+ * (module "optimize.py" v0.5); the original algorithm is also modified
+ * <ul>
+ *  <li>to use an initial guess provided by the user,</li>
+ *  <li>to ensure that the best point encountered is the one returned.</li>
+ * </ul>
+ *
+ * @version $Id: BrentOptimizer.java 1416643 2012-12-03 19:37:14Z tn $
+ * @since 2.0
+ */
+public class BrentOptimizer extends UnivariateOptimizer {
+    /**
+     * Golden section.
+     */
+    private static final double GOLDEN_SECTION = 0.5 * (3 - FastMath.sqrt(5));
+    /**
+     * Minimum relative tolerance.
+     */
+    private static final double MIN_RELATIVE_TOLERANCE = 2 * FastMath.ulp(1d);
+    /**
+     * Relative threshold.
+     */
+    private final double relativeThreshold;
+    /**
+     * Absolute threshold.
+     */
+    private final double absoluteThreshold;
+
+    /**
+     * The arguments are used implement the original stopping criterion
+     * of Brent's algorithm.
+     * {@code abs} and {@code rel} define a tolerance
+     * {@code tol = rel |x| + abs}. {@code rel} should be no smaller than
+     * <em>2 macheps</em> and preferably not much less than <em>sqrt(macheps)</em>,
+     * where <em>macheps</em> is the relative machine precision. {@code abs} must
+     * be positive.
+     *
+     * @param rel Relative threshold.
+     * @param abs Absolute threshold.
+     * @param checker Additional, user-defined, convergence checking
+     * procedure.
+     * @throws NotStrictlyPositiveException if {@code abs <= 0}.
+     * @throws NumberIsTooSmallException if {@code rel < 2 * Math.ulp(1d)}.
+     */
+    public BrentOptimizer(double rel,
+                          double abs,
+                          ConvergenceChecker<UnivariatePointValuePair> checker) {
+        super(checker);
+
+        if (rel < MIN_RELATIVE_TOLERANCE) {
+            throw new NumberIsTooSmallException(rel, MIN_RELATIVE_TOLERANCE, true);
+        }
+        if (abs <= 0) {
+            throw new NotStrictlyPositiveException(abs);
+        }
+
+        relativeThreshold = rel;
+        absoluteThreshold = abs;
+    }
+
+    /**
+     * The arguments are used for implementing the original stopping criterion
+     * of Brent's algorithm.
+     * {@code abs} and {@code rel} define a tolerance
+     * {@code tol = rel |x| + abs}. {@code rel} should be no smaller than
+     * <em>2 macheps</em> and preferably not much less than <em>sqrt(macheps)</em>,
+     * where <em>macheps</em> is the relative machine precision. {@code abs} must
+     * be positive.
+     *
+     * @param rel Relative threshold.
+     * @param abs Absolute threshold.
+     * @throws NotStrictlyPositiveException if {@code abs <= 0}.
+     * @throws NumberIsTooSmallException if {@code rel < 2 * Math.ulp(1d)}.
+     */
+    public BrentOptimizer(double rel,
+                          double abs) {
+        this(rel, abs, null);
+    }
+
+    /** {@inheritDoc} */
+    @Override
+    protected UnivariatePointValuePair doOptimize() {
+        final boolean isMinim = getGoalType() == GoalType.MINIMIZE;
+        final double lo = getMin();
+        final double mid = getStartValue();
+        final double hi = getMax();
+
+        // Optional additional convergence criteria.
+        final ConvergenceChecker<UnivariatePointValuePair> checker
+            = getConvergenceChecker();
+
+        double a;
+        double b;
+        if (lo < hi) {
+            a = lo;
+            b = hi;
+        } else {
+            a = hi;
+            b = lo;
+        }
+
+        double x = mid;
+        double v = x;
+        double w = x;
+        double d = 0;
+        double e = 0;
+        double fx = computeObjectiveValue(x);
+        if (!isMinim) {
+            fx = -fx;
+        }
+        double fv = fx;
+        double fw = fx;
+
+        UnivariatePointValuePair previous = null;
+        UnivariatePointValuePair current
+            = new UnivariatePointValuePair(x, isMinim ? fx : -fx);
+        // Best point encountered so far (which is the initial guess).
+        UnivariatePointValuePair best = current;
+
+        int iter = 0;
+        while (true) {
+            final double m = 0.5 * (a + b);
+            final double tol1 = relativeThreshold * FastMath.abs(x) + absoluteThreshold;
+            final double tol2 = 2 * tol1;
+
+            // Default stopping criterion.
+            final boolean stop = FastMath.abs(x - m) <= tol2 - 0.5 * (b - a);
+            if (!stop) {
+                double p = 0;
+                double q = 0;
+                double r = 0;
+                double u = 0;
+
+                if (FastMath.abs(e) > tol1) { // Fit parabola.
+                    r = (x - w) * (fx - fv);
+                    q = (x - v) * (fx - fw);
+                    p = (x - v) * q - (x - w) * r;
+                    q = 2 * (q - r);
+
+                    if (q > 0) {
+                        p = -p;
+                    } else {
+                        q = -q;
+                    }
+
+                    r = e;
+                    e = d;
+
+                    if (p > q * (a - x) &&
+                        p < q * (b - x) &&
+                        FastMath.abs(p) < FastMath.abs(0.5 * q * r)) {
+                        // Parabolic interpolation step.
+                        d = p / q;
+                        u = x + d;
+
+                        // f must not be evaluated too close to a or b.
+                        if (u - a < tol2 || b - u < tol2) {
+                            if (x <= m) {
+                                d = tol1;
+                            } else {
+                                d = -tol1;
+                            }
+                        }
+                    } else {
+                        // Golden section step.
+                        if (x < m) {
+                            e = b - x;
+                        } else {
+                            e = a - x;
+                        }
+                        d = GOLDEN_SECTION * e;
+                    }
+                } else {
+                    // Golden section step.
+                    if (x < m) {
+                        e = b - x;
+                    } else {
+                        e = a - x;
+                    }
+                    d = GOLDEN_SECTION * e;
+                }
+
+                // Update by at least "tol1".
+                if (FastMath.abs(d) < tol1) {
+                    if (d >= 0) {
+                        u = x + tol1;
+                    } else {
+                        u = x - tol1;
+                    }
+                } else {
+                    u = x + d;
+                }
+
+                double fu = computeObjectiveValue(u);
+                if (!isMinim) {
+                    fu = -fu;
+                }
+
+                // User-defined convergence checker.
+                previous = current;
+                current = new UnivariatePointValuePair(u, isMinim ? fu : -fu);
+                best = best(best,
+                            best(previous,
+                                 current,
+                                 isMinim),
+                            isMinim);
+
+                if (checker != null) {
+                    if (checker.converged(iter, previous, current)) {
+                        return best;
+                    }
+                }
+
+                // Update a, b, v, w and x.
+                if (fu <= fx) {
+                    if (u < x) {
+                        b = x;
+                    } else {
+                        a = x;
+                    }
+                    v = w;
+                    fv = fw;
+                    w = x;
+                    fw = fx;
+                    x = u;
+                    fx = fu;
+                } else {
+                    if (u < x) {
+                        a = u;
+                    } else {
+                        b = u;
+                    }
+                    if (fu <= fw ||
+                        Precision.equals(w, x)) {
+                        v = w;
+                        fv = fw;
+                        w = u;
+                        fw = fu;
+                    } else if (fu <= fv ||
+                               Precision.equals(v, x) ||
+                               Precision.equals(v, w)) {
+                        v = u;
+                        fv = fu;
+                    }
+                }
+            } else { // Default termination (Brent's criterion).
+                return best(best,
+                            best(previous,
+                                 current,
+                                 isMinim),
+                            isMinim);
+            }
+            ++iter;
+        }
+    }
+
+    /**
+     * Selects the best of two points.
+     *
+     * @param a Point and value.
+     * @param b Point and value.
+     * @param isMinim {@code true} if the selected point must be the one with
+     * the lowest value.
+     * @return the best point, or {@code null} if {@code a} and {@code b} are
+     * both {@code null}. When {@code a} and {@code b} have the same function
+     * value, {@code a} is returned.
+     */
+    private UnivariatePointValuePair best(UnivariatePointValuePair a,
+                                          UnivariatePointValuePair b,
+                                          boolean isMinim) {
+        if (a == null) {
+            return b;
+        }
+        if (b == null) {
+            return a;
+        }
+
+        if (isMinim) {
+            return a.getValue() <= b.getValue() ? a : b;
+        } else {
+            return a.getValue() >= b.getValue() ? a : b;
+        }
+    }
+}

Propchange: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/BrentOptimizer.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/MultiStartUnivariateOptimizer.java
URL: http://svn.apache.org/viewvc/commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/MultiStartUnivariateOptimizer.java?rev=1420684&view=auto
==============================================================================
--- commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/MultiStartUnivariateOptimizer.java (added)
+++ commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/MultiStartUnivariateOptimizer.java Wed Dec 12 14:10:38 2012
@@ -0,0 +1,235 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optim.univariate;
+
+import java.util.Arrays;
+import java.util.Comparator;
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.exception.NullArgumentException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.random.RandomGenerator;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.GoalType;
+import org.apache.commons.math3.optim.OptimizationData;
+
+/**
+ * Special implementation of the {@link UnivariateOptimizer} interface
+ * adding multi-start features to an existing optimizer.
+ * <br/>
+ * This class wraps an optimizer in order to use it several times in
+ * turn with different starting points (trying to avoid being trapped
+ * in a local extremum when looking for a global one).
+ *
+ * @version $Id$
+ * @since 3.0
+ */
+public class MultiStartUnivariateOptimizer
+    extends UnivariateOptimizer {
+    /** Underlying classical optimizer. */
+    private final UnivariateOptimizer optimizer;
+    /** Number of evaluations already performed for all starts. */
+    private int totalEvaluations;
+    /** Number of starts to go. */
+    private int starts;
+    /** Random generator for multi-start. */
+    private RandomGenerator generator;
+    /** Found optima. */
+    private UnivariatePointValuePair[] optima;
+    /** Optimization data. */
+    private OptimizationData[] optimData;
+    /**
+     * Location in {@link #optimData} where the updated maximum
+     * number of evaluations will be stored.
+     */
+    private int maxEvalIndex = -1;
+    /**
+     * Location in {@link #optimData} where the updated start value
+     * will be stored.
+     */
+    private int searchIntervalIndex = -1;
+
+    /**
+     * Create a multi-start optimizer from a single-start optimizer.
+     *
+     * @param optimizer Single-start optimizer to wrap.
+     * @param starts Number of starts to perform. If {@code starts == 1},
+     * the {@code optimize} methods will return the same solution as
+     * {@code optimizer} would.
+     * @param generator Random generator to use for restarts.
+     * @throws NullArgumentException if {@code optimizer} or {@code generator}
+     * is {@code null}.
+     * @throws NotStrictlyPositiveException if {@code starts < 1}.
+     */
+    public MultiStartUnivariateOptimizer(final UnivariateOptimizer optimizer,
+                                         final int starts,
+                                         final RandomGenerator generator) {
+        super(optimizer.getConvergenceChecker());
+
+        if (optimizer == null ||
+            generator == null) {
+            throw new NullArgumentException();
+        }
+        if (starts < 1) {
+            throw new NotStrictlyPositiveException(starts);
+        }
+
+        this.optimizer = optimizer;
+        this.starts = starts;
+        this.generator = generator;
+    }
+
+    /** {@inheritDoc} */
+    @Override
+    public int getEvaluations() {
+        return totalEvaluations;
+    }
+
+    /**
+     * Gets all the optima found during the last call to {@code optimize}.
+     * The optimizer stores all the optima found during a set of
+     * restarts. The {@code optimize} method returns the best point only.
+     * This method returns all the points found at the end of each starts,
+     * including the best one already returned by the {@code optimize} method.
+     * <br/>
+     * The returned array as one element for each start as specified
+     * in the constructor. It is ordered with the results from the
+     * runs that did converge first, sorted from best to worst
+     * objective value (i.e in ascending order if minimizing and in
+     * descending order if maximizing), followed by {@code null} elements
+     * corresponding to the runs that did not converge. This means all
+     * elements will be {@code null} if the {@code optimize} method did throw
+     * an exception.
+     * This also means that if the first element is not {@code null}, it is
+     * the best point found across all starts.
+     *
+     * @return an array containing the optima.
+     * @throws MathIllegalStateException if {@link #optimize(OptimizationData[])
+     * optimize} has not been called.
+     */
+    public UnivariatePointValuePair[] getOptima() {
+        if (optima == null) {
+            throw new MathIllegalStateException(LocalizedFormats.NO_OPTIMUM_COMPUTED_YET);
+        }
+        return optima.clone();
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @throws MathIllegalStateException if {@code optData} does not contain an
+     * instance of {@link MaxEval} or {@link SearchInterval}.
+     */
+    @Override
+    public UnivariatePointValuePair optimize(OptimizationData... optData) {
+        // Store arguments in order to pass them to the internal optimizer.
+       optimData = optData;
+        // Set up base class and perform computations.
+        return super.optimize(optData);
+    }
+
+    /** {@inheritDoc} */
+    @Override
+    protected UnivariatePointValuePair doOptimize() {
+        // Remove all instances of "MaxEval" and "SearchInterval" from the
+        // array that will be passed to the internal optimizer.
+        // The former is to enforce smaller numbers of allowed evaluations
+        // (according to how many have been used up already), and the latter
+        // to impose a different start value for each start.
+        for (int i = 0; i < optimData.length; i++) {
+            if (optimData[i] instanceof MaxEval) {
+                optimData[i] = null;
+                maxEvalIndex = i;
+                continue;
+            }
+            if (optimData[i] instanceof SearchInterval) {
+                optimData[i] = null;
+                searchIntervalIndex = i;
+                continue;
+            }
+        }
+        if (maxEvalIndex == -1) {
+            throw new MathIllegalStateException();
+        }
+        if (searchIntervalIndex == -1) {
+            throw new MathIllegalStateException();
+        }
+
+        RuntimeException lastException = null;
+        optima = new UnivariatePointValuePair[starts];
+        totalEvaluations = 0;
+
+        final int maxEval = getMaxEvaluations();
+        final double min = getMin();
+        final double max = getMax();
+        final double startValue = getStartValue();
+
+        // Multi-start loop.
+        for (int i = 0; i < starts; i++) {
+            // CHECKSTYLE: stop IllegalCatch
+            try {
+                // Decrease number of allowed evaluations.
+                optimData[maxEvalIndex] = new MaxEval(maxEval - totalEvaluations);
+                // New start value.
+                final double s = (i == 0) ?
+                    startValue :
+                    min + generator.nextDouble() * (max - min);
+                optimData[searchIntervalIndex] = new SearchInterval(min, max, s);
+                // Optimize.
+                optima[i] = optimizer.optimize(optimData);
+            } catch (RuntimeException mue) {
+                lastException = mue;
+                optima[i] = null;
+            }
+            // CHECKSTYLE: resume IllegalCatch
+
+            totalEvaluations += optimizer.getEvaluations();
+        }
+
+        sortPairs(getGoalType());
+
+        if (optima[0] == null) {
+            throw lastException; // Cannot be null if starts >= 1.
+        }
+
+        // Return the point with the best objective function value.
+        return optima[0];
+    }
+
+    /**
+     * Sort the optima from best to worst, followed by {@code null} elements.
+     *
+     * @param goal Goal type.
+     */
+    private void sortPairs(final GoalType goal) {
+        Arrays.sort(optima, new Comparator<UnivariatePointValuePair>() {
+                public int compare(final UnivariatePointValuePair o1,
+                                   final UnivariatePointValuePair o2) {
+                    if (o1 == null) {
+                        return (o2 == null) ? 0 : 1;
+                    } else if (o2 == null) {
+                        return -1;
+                    }
+                    final double v1 = o1.getValue();
+                    final double v2 = o2.getValue();
+                    return (goal == GoalType.MINIMIZE) ?
+                        Double.compare(v1, v2) : Double.compare(v2, v1);
+                }
+            });
+    }
+}

Propchange: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/MultiStartUnivariateOptimizer.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/SearchInterval.java
URL: http://svn.apache.org/viewvc/commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/SearchInterval.java?rev=1420684&view=auto
==============================================================================
--- commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/SearchInterval.java (added)
+++ commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/SearchInterval.java Wed Dec 12 14:10:38 2012
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optim.univariate;
+
+import org.apache.commons.math3.optim.OptimizationData;
+import org.apache.commons.math3.exception.NumberIsTooLargeException;
+import org.apache.commons.math3.exception.OutOfRangeException;
+
+/**
+ * Search interval and (optional) start value.
+ * <br/>
+ * Immutable class.
+ *
+ * @version $Id$
+ * @since 3.1
+ */
+public class SearchInterval implements OptimizationData {
+    /** Lower bound. */
+    private final double lower;
+    /** Upper bound. */
+    private final double upper;
+    /** Start value. */
+    private final double start;
+
+    /**
+     * @param lo Lower bound.
+     * @param hi Upper bound.
+     * @param init Start value.
+     * @throws NumberIsTooLargeException if {@code lo >= hi}.
+     * @throws OutOfRangeException if {@code init < lo} or {@code init > hi}.
+     */
+    public SearchInterval(double lo,
+                          double hi,
+                          double init) {
+        if (lo >= hi) {
+            throw new NumberIsTooLargeException(lo, hi, false);
+        }
+        if (init < lo ||
+            init > hi) {
+            throw new OutOfRangeException(init, lo, hi);
+        }
+
+        lower = lo;
+        upper = hi;
+        start = init;
+    }
+
+    /**
+     * @param lo Lower bound.
+     * @param hi Upper bound.
+     * @throws NumberIsTooLargeException if {@code lo >= hi}.
+     */
+    public SearchInterval(double lo,
+                          double hi) {
+        this(lo, hi, 0.5 * (lo + hi));
+    }
+
+    /**
+     * Gets the lower bound.
+     *
+     * @return the lower bound.
+     */
+    public double getMin() {
+        return lower;
+    }
+    /**
+     * Gets the upper bound.
+     *
+     * @return the upper bound.
+     */
+    public double getMax() {
+        return upper;
+    }
+    /**
+     * Gets the start value.
+     *
+     * @return the start value.
+     */
+    public double getStartValue() {
+        return start;
+    }
+}

Propchange: commons/proper/math/trunk/src/main/java/org/apache/commons/math3/optim/univariate/SearchInterval.java
------------------------------------------------------------------------------
    svn:eol-style = native